python_code
stringlengths
0
456k
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team class DeepSpeedOptimizer(object): pass class ZeROOptimizer(DeepSpeedOptimizer): pass
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from collections import OrderedDict import torch import sys import os from deepspeed import comm as dist from deepspeed.runtime.constants import PIPE_REPLICATED from deepspeed.ops.op_builder import UtilsBuilder from deepspeed.runtime import ZeROOptimizer from packaging import version as pkg_version from deepspeed.git_version_info import version from deepspeed.runtime.utils import (get_global_norm_of_tensors, clip_tensors_by_global_norm, DummyOptim, align_dense_tensors, all_gather_dp_groups, bwc_tensor_model_parallel_rank, is_model_parallel_parameter, see_memory_usage) from deepspeed.utils import link_hp_params, fragment_address from deepspeed.checkpoint import enable_universal_checkpoint from deepspeed.checkpoint.constants import (DS_VERSION, PARTITION_COUNT, BASE_OPTIMIZER_STATE, SINGLE_PARTITION_OF_FP32_GROUPS, CLIP_GRAD, GROUP_PADDINGS, PARAM_SLICE_MAPPINGS) setattr(sys.modules[__name__], 'fragment_address', fragment_address) class BF16_Optimizer(ZeROOptimizer): def __init__(self, init_optimizer, param_names, mpu=None, clip_grad=0.0, norm_type=2, allgather_bucket_size=5000000000, dp_process_group=None, timers=None): super().__init__() see_memory_usage('begin bf16_optimizer', force=True) self.timers = timers self.optimizer = init_optimizer self.param_names = param_names self.using_real_optimizer = not isinstance(self.optimizer, DummyOptim) self.clip_grad = clip_grad self.norm_type = norm_type self.mpu = mpu self.allgather_bucket_size = int(allgather_bucket_size) self.dp_process_group = dp_process_group self.dp_rank = dist.get_rank(group=self.dp_process_group) self.real_dp_process_group = [dp_process_group for i in range(len(self.optimizer.param_groups))] # Load pre-built or JIT compile (un)flatten ops util_ops = UtilsBuilder().load() self.flatten = util_ops.flatten self.unflatten = util_ops.unflatten #align nccl all-gather send buffers to 4-bye boundary self.nccl_start_alignment_factor = 2 # 4-byte alignment/sizeof(fp16) = 2 # Build BF16/FP32 groups self.bf16_groups = [] self.bf16_groups_flat = [] self.bf16_partitioned_groups = [] self.fp32_groups_flat_partition = [] # Maintain different fp32 gradients views for convenience self.fp32_groups_gradients = [] self.fp32_groups_gradient_dict = {} self.fp32_groups_gradients_flat = [] self.fp32_groups_actual_gradients_flat = [] self.fp32_groups_gradient_flat_partition = [] self.fp32_groups_has_gradients = [] self.step_count = 0 self.group_paddings = [] if self.using_real_optimizer: self._setup_for_real_optimizer() see_memory_usage('end bf16_optimizer', force=True) def _setup_for_real_optimizer(self): dp_world_size = dist.get_world_size(group=self.dp_process_group) self.partition_count = [dp_world_size for i in range(len(self.optimizer.param_groups))] for i, param_group in enumerate(self.optimizer.param_groups): see_memory_usage(f'before initializing group {i}', force=True) partition_id = dist.get_rank(group=self.real_dp_process_group[i]) # grab the original list trainable_parameters = [param for param in param_group['params'] if param.requires_grad] self.bf16_groups.append(trainable_parameters) # create flat bf16 params self.bf16_groups_flat.append( self._flatten_dense_tensors_aligned(self.bf16_groups[i], self.nccl_start_alignment_factor * dp_world_size)) # Make bf16 params point to flat tensor storage self._update_storage_to_flattened_tensor(tensor_list=self.bf16_groups[i], flat_tensor=self.bf16_groups_flat[i]) # divide flat weights into equal sized partitions partition_size = self.bf16_groups_flat[i].numel() // dp_world_size bf16_dp_partitions = [ self.bf16_groups_flat[i].narrow(0, dp_index * partition_size, partition_size) for dp_index in range(dp_world_size) ] self.bf16_partitioned_groups.append(bf16_dp_partitions) # create fp32 params partition self.fp32_groups_flat_partition.append(bf16_dp_partitions[partition_id].clone().float().detach()) self.fp32_groups_flat_partition[i].requires_grad = True num_elem_list = [t.numel() for t in self.bf16_groups[i]] # create fp32 gradients self.fp32_groups_gradients_flat.append(torch.zeros_like(self.bf16_groups_flat[i], dtype=torch.float32)) # track individual fp32 gradients for entire model fp32_gradients = self._split_flat_tensor(flat_tensor=self.fp32_groups_gradients_flat[i], num_elem_list=num_elem_list) self.fp32_groups_gradients.append(fp32_gradients) self.fp32_groups_gradient_dict[i] = fp32_gradients # flat tensor corresponding to actual fp32 gradients (i.e., minus alignment padding) length_without_padding = sum(num_elem_list) self.fp32_groups_actual_gradients_flat.append( torch.narrow(self.fp32_groups_gradients_flat[i], 0, 0, length_without_padding)) # flat tensor corresponding to gradient partition self.fp32_groups_gradient_flat_partition.append( torch.narrow(self.fp32_groups_gradients_flat[i], 0, partition_id * partition_size, partition_size)) # track fp32 gradient updates self.fp32_groups_has_gradients.append([False] * len(self.bf16_groups[i])) # Record padding required for alignment if partition_id == dist.get_world_size(group=self.real_dp_process_group[i]) - 1: padding = self.bf16_groups_flat[i].numel() - length_without_padding else: padding = 0 self.group_paddings.append(padding) # update optimizer param groups to reference fp32 params partition param_group['params'] = [self.fp32_groups_flat_partition[i]] see_memory_usage(f'after initializing group {i}', force=True) see_memory_usage('before initialize_optimizer', force=True) self.initialize_optimizer_states() see_memory_usage('end initialize_optimizer', force=True) # Need optimizer states initialized before linking lp to optimizer state self._link_all_hp_params() self._enable_universal_checkpoint() self._param_slice_mappings = self._create_param_mapping() def _enable_universal_checkpoint(self): for lp_param_group in self.bf16_groups: enable_universal_checkpoint(param_list=lp_param_group) def _create_param_mapping(self): param_mapping = [] for i, _ in enumerate(self.optimizer.param_groups): param_mapping_per_group = OrderedDict() for lp in self.bf16_groups[i]: if lp._hp_mapping is not None: lp_name = self.param_names[lp] param_mapping_per_group[lp_name] = lp._hp_mapping.get_hp_fragment_address() param_mapping.append(param_mapping_per_group) return param_mapping def _link_all_hp_params(self): dp_world_size = dist.get_world_size(group=self.dp_process_group) for i, _ in enumerate(self.optimizer.param_groups): # Link bf16 and fp32 params in partition partition_id = dist.get_rank(group=self.real_dp_process_group[i]) partition_size = self.bf16_groups_flat[i].numel() // dp_world_size flat_hp_partition = self.fp32_groups_flat_partition[i] link_hp_params(lp_param_list=self.bf16_groups[i], flat_hp_partition=flat_hp_partition, gradient_dict=self.fp32_groups_gradient_dict, offload_gradient_dict=None, use_offload=False, param_group_index=i, partition_start=partition_id * partition_size, partition_size=partition_size, partition_optimizer_state=self.optimizer.state[flat_hp_partition], dp_group=self.real_dp_process_group[i]) def initialize_optimizer_states(self): """Take an optimizer step with zero-valued gradients to allocate internal optimizer state. This helps prevent memory fragmentation by allocating optimizer state at the beginning of training instead of after activations have been allocated. """ for param_partition, grad_partition in zip(self.fp32_groups_flat_partition, self.fp32_groups_gradient_flat_partition): param_partition.grad = grad_partition self.optimizer.step() self.clear_hp_grads() def _split_flat_tensor(self, flat_tensor, num_elem_list): assert sum(num_elem_list) <= flat_tensor.numel() tensor_list = [] offset = 0 for num_elem in num_elem_list: dense_tensor = torch.narrow(flat_tensor, 0, offset, num_elem) tensor_list.append(dense_tensor) offset += num_elem return tensor_list def _update_storage_to_flattened_tensor(self, tensor_list, flat_tensor): updated_params = self.unflatten(flat_tensor, tensor_list) for p, q in zip(tensor_list, updated_params): p.data = q.data def _flatten_dense_tensors_aligned(self, tensor_list, alignment): return self.flatten(align_dense_tensors(tensor_list, alignment)) @torch.no_grad() def step(self, closure=None): if closure is not None: raise NotImplementedError(f'{self.__class__} does not support closure.') all_groups_norm = get_global_norm_of_tensors(input_tensors=self.get_grads_for_norm(), mpu=self.mpu, norm_type=self.norm_type) self._global_grad_norm = all_groups_norm assert all_groups_norm > 0. if self.clip_grad > 0.: clip_tensors_by_global_norm(input_tensors=self.get_grads_for_norm(for_clipping=True), max_norm=self.clip_grad, global_norm=all_groups_norm, mpu=self.mpu) self.optimizer.step() self.update_lp_params() self.clear_hp_grads() self.step_count += 1 def backward(self, loss, update_hp_grads=True, clear_lp_grads=False, **bwd_kwargs): """Perform a backward pass and copy the low-precision gradients to the high-precision copy. We copy/accumulate to the high-precision grads now to prevent accumulating in the bf16 grads after successive backward() calls (i.e., grad accumulation steps > 1) The low-precision grads are deallocated during this procedure. """ self.clear_lp_grads() loss.backward(**bwd_kwargs) if update_hp_grads: self.update_hp_grads(clear_lp_grads=clear_lp_grads) @torch.no_grad() def update_hp_grads(self, clear_lp_grads=False): for i, group in enumerate(self.bf16_groups): for j, lp in enumerate(group): if lp.grad is None: continue hp_grad = self.fp32_groups_gradients[i][j] assert hp_grad is not None, \ f'high precision param has no gradient, lp param_id = {id(lp)} group_info = [{i}][{j}]' hp_grad.data.add_(lp.grad.data.to(hp_grad.dtype).view(hp_grad.shape)) lp._hp_grad = hp_grad self.fp32_groups_has_gradients[i][j] = True # clear gradients if clear_lp_grads: lp.grad = None @torch.no_grad() def get_grads_for_reduction(self): return self.fp32_groups_gradients_flat @torch.no_grad() def get_grads_for_norm(self, for_clipping=False): grads = [] tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) for i, group in enumerate(self.bf16_groups): for j, lp in enumerate(group): if not for_clipping: if hasattr(lp, PIPE_REPLICATED) and lp.ds_pipe_replicated: continue if not (tensor_mp_rank == 0 or is_model_parallel_parameter(lp)): continue if not self.fp32_groups_has_gradients[i][j]: continue grads.append(self.fp32_groups_gradients[i][j]) return grads @torch.no_grad() def update_lp_params(self): for i, (bf16_partitions, fp32_partition) in enumerate(zip(self.bf16_partitioned_groups, self.fp32_groups_flat_partition)): partition_id = dist.get_rank(group=self.real_dp_process_group[i]) bf16_partitions[partition_id].data.copy_(fp32_partition.data) # print_rank_0(f'update_lp_params {i=} {partition_id=}', force=True) # if i == 0: # print_rank_0(f'{fp32_partition[:10]=}', force=True) all_gather_dp_groups(partitioned_param_groups=self.bf16_partitioned_groups, dp_process_group=self.real_dp_process_group, start_alignment_factor=self.nccl_start_alignment_factor, allgather_bucket_size=self.allgather_bucket_size) def clear_hp_grads(self): for flat_gradients in self.fp32_groups_gradients_flat: flat_gradients.zero_() for i, group in enumerate(self.fp32_groups_gradients): self.fp32_groups_has_gradients[i] = [False] * len(group) def clear_lp_grads(self): for group in self.bf16_groups: for param in group: param.grad = None def state_dict(self): state_dict = {} state_dict[CLIP_GRAD] = self.clip_grad state_dict[BASE_OPTIMIZER_STATE] = self.optimizer.state_dict() state_dict[SINGLE_PARTITION_OF_FP32_GROUPS] = self.fp32_groups_flat_partition state_dict[GROUP_PADDINGS] = self.group_paddings state_dict[PARTITION_COUNT] = self.partition_count state_dict[DS_VERSION] = version state_dict[PARAM_SLICE_MAPPINGS] = self._param_slice_mappings return state_dict # Restore base optimizer fp32 weights bfloat16 weights def _restore_from_bit16_weights(self): for i, group in enumerate(self.bf16_groups): partition_id = dist.get_rank(group=self.real_dp_process_group[i]) for bf16_partitions, fp32_partition in zip(self.bf16_partitioned_groups, self.fp32_groups_flat_partition): fp32_partition.data.copy_(bf16_partitions[partition_id].data) def refresh_fp32_params(self): self._restore_from_bit16_weights() def load_state_dict(self, state_dict_list, checkpoint_folder, load_optimizer_states=True, load_from_fp32_weights=False): if checkpoint_folder: self._load_universal_checkpoint(checkpoint_folder, load_optimizer_states, load_from_fp32_weights) else: self._load_legacy_checkpoint(state_dict_list, load_optimizer_states, load_from_fp32_weights) def _load_legacy_checkpoint(self, state_dict_list, load_optimizer_states=True, load_from_fp32_weights=False): dp_rank = dist.get_rank(group=self.dp_process_group) current_rank_sd = state_dict_list[dp_rank] ckpt_version = current_rank_sd.get(DS_VERSION, False) assert ckpt_version, f"Empty ds_version in checkpoint, not clear how to proceed" ckpt_version = pkg_version.parse(ckpt_version) self.clip_grad = current_rank_sd.get(CLIP_GRAD, self.clip_grad) if load_optimizer_states: self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE]) if load_from_fp32_weights: for current, saved in zip(self.fp32_groups_flat_partition, current_rank_sd[SINGLE_PARTITION_OF_FP32_GROUPS]): src_tensor = _get_padded_tensor(saved, current.numel()) current.data.copy_(src_tensor.data) if load_optimizer_states: self._link_all_hp_params() def _load_universal_checkpoint(self, checkpoint_folder, load_optimizer_states, load_from_fp32_weights): self._load_hp_checkpoint_state(checkpoint_folder) @property def param_groups(self): """Forward the wrapped optimizer's parameters.""" return self.optimizer.param_groups def _load_hp_checkpoint_state(self, checkpoint_dir): checkpoint_dir = os.path.join(checkpoint_dir, "zero") tp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) tp_world_size = self.mpu.get_slice_parallel_world_size() for i, _ in enumerate(self.optimizer.param_groups): for lp in self.bf16_groups[i]: if lp._hp_mapping is not None: #print(f"Loading {self.param_names[lp]} {tp_rank=} {tp_world_size=}") lp.load_hp_checkpoint_state(os.path.join(checkpoint_dir, self.param_names[lp]), tp_rank, tp_world_size) def _get_padded_tensor(src_tensor, size): if src_tensor.numel() >= size: return src_tensor padded_tensor = torch.zeros(size, dtype=src_tensor.dtype, device=src_tensor.device) slice_tensor = torch.narrow(padded_tensor, 0, 0, src_tensor.numel()) slice_tensor.data.copy_(src_tensor.data) return padded_tensor
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch from deepspeed.utils import log_dist import numpy as np import logging class Eigenvalue(object): def __init__(self, verbose=False, max_iter=100, tol=1e-2, stability=0, gas_boundary_resolution=1, layer_name='', layer_num=0): super().__init__() self.verbose = verbose self.max_iter = max_iter self.tol = tol self.stability = stability self.gas_boundary_resolution = gas_boundary_resolution self.layer_name = layer_name self.layer_num = layer_num assert len(self.layer_name) > 0 and layer_num > 0 log_dist( f'enabled eigenvalue with verbose={verbose}, max_iter={max_iter}, tol={tol}, stability={stability}, gas_boundary_resolution={gas_boundary_resolution}, layer_name={layer_name}, layer_num={layer_num}', ranks=[0]) # Replace all nan/pos-inf/neg-inf to zero # TODO: Pytorch new version may add this function, replace this one by then. def nan_to_num(self, x): device = x.device x = x.cpu().numpy() x = np.nan_to_num(x=x, copy=False, nan=0.0, posinf=0.0, neginf=0.0) return torch.from_numpy(x).to(device) def normalize(self, v): norm_squared = self.inner_product(v, v) norm = norm_squared**0.5 + self.stability normalized_vectors = [vector / norm for vector in v] normalized_vectors = [self.nan_to_num(vector) for vector in normalized_vectors] return normalized_vectors def inner_product(self, xs, ys): return sum([torch.sum(x * y) for (x, y) in zip(xs, ys)]) def get_layers(self, module): scope_names = self.layer_name.split('.') assert len(scope_names) > 0 m = module for name in scope_names: assert hasattr(m, name), "layer_name configuration is invalid." m = getattr(m, name) return m def compute_eigenvalue(self, module, device=None, scale=1.0): block_eigenvalue = [] param_keys = [] layers = self.get_layers(module) for block in range(self.layer_num): model_block = layers[block] # We found this randn() has obvious accuracy impact in some cases, save/recover random state here. rng_state = torch.random.get_rng_state() if device is None: v = [ torch.randn(p.size()) for p in model_block.parameters() if p.grad is not None and p.grad.grad_fn is not None ] else: v = [ torch.randn(p.size(), device=device) for p in model_block.parameters() if p.grad is not None and p.grad.grad_fn is not None ] torch.random.set_rng_state(rng_state) grads = [ param.grad for param in model_block.parameters() if param.grad is not None and param.grad.grad_fn is not None ] params = [ param for param in model_block.parameters() if param.grad is not None and param.grad.grad_fn is not None ] layer_keys = [id(p) for p in model_block.parameters()] param_keys.append(layer_keys) v = self.normalize(v) # Disable eigenvalue if the model doesn't support second order gradients computation, # e.g. when enabling DS transformer kernel. if len(grads) == 0 or len(params) == 0: log_dist(f'The model does NOT support eigenvalue computation.', ranks=[0], level=logging.WARNING) return [] i = 0 eigenvalue_current, eigenvalue_previous = 1., 0. while (i < self.max_iter) and abs(eigenvalue_current) > 0 and (abs( (eigenvalue_current - eigenvalue_previous) / eigenvalue_current) >= self.tol): # test convergence criteria eigenvalue_previous = eigenvalue_current Hv = torch.autograd.grad(grads, params, grad_outputs=v, only_inputs=True, retain_graph=True) #Hv = [hv.float() for hv in Hv] Hv = [self.nan_to_num(hv).float() for hv in Hv] eigenvalue_current = self.inner_product(Hv, v).item() v = self.normalize(Hv) v = [x / scale for x in v] i += 1 eigenvalue_current *= scale block_eigenvalue.append(eigenvalue_current) if self.verbose: log_dist(f'block: {block}, power iteration: {i}, eigenvalue: {eigenvalue_current}', ranks=[0]) block_eigenvalue = self.post_process(block_eigenvalue) if self.verbose: log_dist(f'post processed block_eigenvalue: {block_eigenvalue}', ranks=[0]) # {param_id: (eigenvalue, layer_id)} ev_dict = {} for i, (layer_keys, value) in enumerate(zip(param_keys, block_eigenvalue)): ev_dict.update(dict.fromkeys(layer_keys, (value, i))) return ev_dict # 1. Map all eigenvalues to [0, 1.0]. # 2. Some layers can't generate valid eigenvalues on fp16 precision, use 1.0 instead. def post_process(self, value_list): max_value = abs(max(value_list, key=abs)) return [abs(v) / max_value if v != 0.0 else 1.0 for v in value_list]
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os import re import stat import torch import hashlib from collections import defaultdict, OrderedDict, deque from shutil import copyfile import gc from torch.nn.modules import Module from torch.nn.parameter import Parameter from torch.optim import Optimizer from torch.optim.lr_scheduler import _LRScheduler from typing import Callable, Dict, Union, Iterable import deepspeed from deepspeed import comm as dist from deepspeed.runtime.utils import see_memory_usage, DummyOptim from .zero.offload_config import OffloadDeviceEnum from deepspeed.runtime.zero.stage_1_and_2 import DeepSpeedZeroOptimizer from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus from deepspeed.runtime.zero.utils import is_zero_supported_optimizer, ZeRORuntimeException from deepspeed.runtime.zero.parameter_offload import DeepSpeedZeRoOffload from deepspeed.runtime.zero.config import ZERO_OPTIMIZATION from deepspeed.runtime.fp16.fused_optimizer import FP16_Optimizer from deepspeed.runtime.fp16.unfused_optimizer import FP16_UnfusedOptimizer from deepspeed.runtime.bf16_optimizer import BF16_Optimizer from deepspeed.runtime.config import DEEPSPEED_OPTIMIZERS, \ ADAGRAD_OPTIMIZER, ADAM_OPTIMIZER, ADAMW_OPTIMIZER, LAMB_OPTIMIZER, ONEBIT_ADAM_OPTIMIZER, ONEBIT_LAMB_OPTIMIZER, \ TORCH_ADAM_PARAM, ADAM_W_MODE, ADAM_W_MODE_DEFAULT, ZERO_ONE_ADAM_OPTIMIZER from deepspeed.runtime.dataloader import DeepSpeedDataLoader from deepspeed.runtime.constants import \ ROUTE_TRAIN, ROUTE_PREDICT, ROUTE_EVAL, \ PLD_THETA, PLD_GAMMA, BFLOAT16, FP16, AMP, GRADIENT_ACCUMULATION_STEPS, \ DATA_PARALLEL_GROUP, GLOBAL_RANK from deepspeed.runtime.zero.config import ZeroStageEnum from deepspeed.compression import compression_scheduler from deepspeed.compression.constants import \ WEIGHT_QUANTIZE_IN_FORWARD_ENABLED, \ WEIGHT_QUANTIZATION, SHARED_PARAMETERS, \ WEIGHT_QUANTIZE_ENABLED, \ WEIGHT_QUANTIZE_GROUPS, \ WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE, \ WEIGHT_QUANTIZE_CHANGE_RATIO, \ WEIGHT_QUANTIZE_TYPE, \ WEIGHT_QUANTIZE_ROUNDING, \ WEIGHT_QUANTIZE_VERBOSE, \ WEIGHT_QUANTIZE_KERNEL from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT, FROZEN_PARAM_FRAGMENTS from deepspeed.runtime.sparse_tensor import SparseTensor from deepspeed.runtime import lr_schedules from deepspeed.utils import groups from deepspeed.utils import logger, log_dist, instrument_w_nvtx from deepspeed.utils.timer import ThroughputTimer, SynchronizedWallClockTimer from deepspeed.utils.debug import debug_extract_module_and_param_names from deepspeed.monitor.monitor import MonitorMaster from deepspeed.runtime.progressive_layer_drop import ProgressiveLayerDrop from deepspeed.runtime.utils import clip_grad_norm_ from deepspeed.runtime.eigenvalue import Eigenvalue from deepspeed.runtime.data_pipeline.constants import DATA_SAMPLING, \ DATA_ROUTING, DATA_SAMPLING_ENABLED, CURRICULUM_LEARNING, \ CURRICULUM_LEARNING_ENABLED, DATA_SAMPLING_NUM_WORKERS, RANDOM_LTD, \ RANDOM_LTD_ENABLED, RANDOM_LTD_LAYER_ID, RANDOM_LTD_LAYER_NUM, \ RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE, RANDOM_LTD_LAYER_TOKEN_LR_ENABLED, \ RANDOM_LTD_GLOBAL_BATCH_SIZE, RANDOM_LTD_MICRO_BATCH_SIZE, DATA_EFFICIENCY from deepspeed.runtime.data_pipeline.curriculum_scheduler import CurriculumScheduler from deepspeed.runtime.data_pipeline.data_routing.scheduler import RandomLTDScheduler from deepspeed.runtime.data_pipeline.data_routing.helper import remove_random_ltd_state_dict from deepspeed.runtime.data_pipeline.data_routing.basic_layer import RandomLayerTokenDrop from deepspeed.runtime.checkpoint_engine.torch_checkpoint_engine import TorchCheckpointEngine from .pipe.module import PipelineModule from .utils import get_ma_status from ..ops.adam import FusedAdam from ..moe.sharded_moe import TopKGate, MOELayer from ..moe.layer import MoE from ..moe.utils import is_moe_param from ..git_version_info import version from deepspeed.profiling.flops_profiler.profiler import FlopsProfiler from deepspeed.utils.logging import print_json_dist, print_configuration from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import UtilsBuilder from deepspeed.runtime.config import DtypeEnum MEMORY_OPT_ALLREDUCE_SIZE = 500000000 DeepSpeedOptimizerCallable = \ Callable[[Union[Iterable[Parameter], Dict[str, Iterable]]], Optimizer] DeepSpeedSchedulerCallable = Callable[[Optimizer], _LRScheduler] try: import apex from apex import amp APEX_INSTALLED = True except ImportError: # Fail silently so we don't spam logs unnecessarily if user isn't using amp APEX_INSTALLED = False def split_half_float_double_sparse(tensors): device_type = get_accelerator().device_name() supported_types = [ "torch.{}.HalfTensor".format(device_type), "torch.{}.FloatTensor".format(device_type), "torch.{}.DoubleTensor".format(device_type), "torch.{}.BFloat16Tensor".format(device_type), SparseTensor.type() ] for t in tensors: assert t.type() in supported_types, f"attempting to reduce an unsupported grad type: {t.type()}" buckets = [] for i, dtype in enumerate(supported_types): bucket = [t for t in tensors if t.type() == dtype] if bucket: buckets.append((dtype, bucket)) return buckets FORWARD_MICRO_TIMER = 'forward_microstep' FORWARD_GLOBAL_TIMER = 'forward' BACKWARD_MICRO_TIMER = 'backward_microstep' BACKWARD_GLOBAL_TIMER = 'backward' BACKWARD_INNER_MICRO_TIMER = 'backward_inner_microstep' BACKWARD_INNER_GLOBAL_TIMER = 'backward_inner' BACKWARD_REDUCE_MICRO_TIMER = 'backward_allreduce_microstep' BACKWARD_REDUCE_GLOBAL_TIMER = 'backward_allreduce' STEP_MICRO_TIMER = 'step_microstep' STEP_GLOBAL_TIMER = 'step' class EngineTimers(object): r"""Wallclock timers for DeepSpeedEngine""" def __init__(self, enable_micro_timers, enable_global_timers): self.forward_timers = [] self.backward_timers = [] self.backward_inner_timers = [] self.backward_reduce_timers = [] self.step_timers = [] self.global_timers = [] self.micro_timers = [] if enable_micro_timers: self.forward_timers += [FORWARD_MICRO_TIMER] self.backward_timers += [BACKWARD_MICRO_TIMER] self.backward_inner_timers += [BACKWARD_INNER_MICRO_TIMER] self.backward_reduce_timers += [BACKWARD_REDUCE_MICRO_TIMER] self.step_timers += [STEP_MICRO_TIMER] self.micro_timers += [ FORWARD_MICRO_TIMER, BACKWARD_MICRO_TIMER, BACKWARD_INNER_MICRO_TIMER, BACKWARD_REDUCE_MICRO_TIMER, STEP_MICRO_TIMER ] if enable_global_timers: self.forward_timers += [FORWARD_GLOBAL_TIMER] self.backward_timers += [BACKWARD_GLOBAL_TIMER] self.backward_inner_timers += [BACKWARD_INNER_GLOBAL_TIMER] self.backward_reduce_timers += [BACKWARD_REDUCE_GLOBAL_TIMER] self.step_timers += [STEP_GLOBAL_TIMER] self.global_timers += [ FORWARD_GLOBAL_TIMER, BACKWARD_GLOBAL_TIMER, BACKWARD_INNER_GLOBAL_TIMER, BACKWARD_REDUCE_GLOBAL_TIMER, STEP_GLOBAL_TIMER ] class DeepSpeedEngine(Module): r"""DeepSpeed engine for training.""" def __init__( self, args, model, optimizer=None, model_parameters=None, training_data=None, lr_scheduler=None, mpu=None, dist_init_required=None, collate_fn=None, config=None, config_class=None, dont_change_device=False, ): super(DeepSpeedEngine, self).__init__() self.dont_change_device = dont_change_device self.client_optimizer = optimizer self.client_lr_scheduler = lr_scheduler self.training_data = training_data self.collate_fn = collate_fn self.mpu = mpu self.data_parallel_group = None self.global_steps = 0 self.global_samples = 0 self.micro_steps = 0 self.skipped_steps = 0 self.gradient_average = True self.warn_unscaled_loss = True self.config = config self._config = config_class self.loaded_checkpoint_mp_world_size = None self.loaded_checkpoint_dp_world_size = None self.enable_backward_allreduce = True self.progressive_layer_drop = None self.eigenvalue = None self.block_eigenvalue = None self.gas_boundary_ctr = 0 self.dist_backend = get_accelerator().communication_backend_name() self.has_moe_layers = False self.num_experts = [] self.gate_modules = [] self.moe_layers = [] self._step_applied = False self._global_grad_norm = None self.use_ds_comm = False # False --> Use torch.dist, True --> Use ds.comm backend. self.checkpoint_engine = None self._is_gradient_accumulation_boundary = None self.scale_wrt_gas = None self.losses = [] # for debug purposes - can then debug print: debug_get_module_name(module) debug_extract_module_and_param_names(model) # needed for zero_to_fp32 weights reconstruction to remap nameless data to state_dict self.param_names = {param: name for name, param in model.named_parameters()} self._do_args_sanity_check(args) self._configure_with_arguments(args, mpu) self._do_sanity_check() see_memory_usage(f"DeepSpeed Engine: After args sanity test", force=self.memory_breakdown()) if mpu is not None: if self.elasticity_enabled(): if not self.is_elastic_model_parallel_supported(): assert not self.elasticity_enabled(), ("Elasticity is not currently supported" " with model parallelism.") self._set_distributed_vars(args) dist.configure(self._config) self.monitor = MonitorMaster(self._config.monitor_config) see_memory_usage( f"DeepSpeed Engine: Before configure distributed model", force=self.memory_breakdown(), ) self.pipeline_parallelism = isinstance(model, PipelineModule) # Configure distributed model self._configure_distributed_model(model) self._get_model_parameters() see_memory_usage(f"DeepSpeed Engine: After configure distributed model") # Configure wall clock timers self.timers = SynchronizedWallClockTimer() # Throughput timer self.tput_timer = ThroughputTimer( batch_size=self.train_batch_size(), steps_per_output=self.steps_per_print(), monitor_memory=False, ) log_dist(f"DeepSpeed Flops Profiler Enabled: {self.flops_profiler_enabled()}", ranks=[0]) if self.flops_profiler_enabled(): self.flops_profiler = FlopsProfiler(self.module, self) if training_data: self.training_dataloader = self.deepspeed_io(training_data) else: self.training_dataloader = None # Configure optimizer and scheduler self.optimizer = None self.basic_optimizer = None self.lr_scheduler = None has_optimizer = False if optimizer or self.optimizer_name(): has_optimizer = True # If no parameters given by init default to module parameters if model_parameters is None: model_parameters = self.module.parameters() # Convert model parameters from generator to list if not isinstance(model_parameters, list): model_parameters = list(model_parameters) if has_optimizer: self._configure_optimizer(optimizer, model_parameters) self._configure_lr_scheduler(lr_scheduler) self._report_progress(0) elif self.zero_optimization(): # no optim selected but zero is enabled self.optimizer = self._configure_zero_optimizer(optimizer=None) elif self.bfloat16_enabled(): self.optimizer = self._configure_bf16_optimizer(optimizer=None) # Hook optimizer for snip_momentum pruning if hasattr(model, 'pruners'): from ..compression.helper import rewrite_optimizer_step self.optimizer.pruners = model.pruners rewrite_optimizer_step(self.optimizer) # Bookkeeping for sparse support self.sparse_tensor_module_names = set() # if self.sparse_gradients_enabled(): for name, module in self.module.named_modules(): if isinstance(module, (torch.nn.Embedding, torch.nn.EmbeddingBag)) and self.sparse_gradients_enabled(): self.sparse_tensor_module_names.add(name + ".weight") logger.info("Will convert {} to sparse tensor during training".format(name)) self.save_non_zero_checkpoint = False self.save_zero_checkpoint = False if not isinstance(self.optimizer, DeepSpeedZeRoOffload): self._configure_checkpointing(dist_init_required) if self.eigenvalue_enabled(): self.eigenvalue = self._configure_eigenvalue() if self.pld_enabled(): self.progressive_layer_drop = self._configure_progressive_layer_drop() if self.curriculum_enabled_legacy(): self.curriculum_scheduler_legacy = self._configure_curriculum_scheduler_legacy() if self.random_ltd_enabled(): random_ltd_config = self.random_ltd_config() random_ltd_config[RANDOM_LTD_GLOBAL_BATCH_SIZE] = self.train_batch_size() random_ltd_config[RANDOM_LTD_MICRO_BATCH_SIZE] = self.train_micro_batch_size_per_gpu() self.random_ltd_scheduler = self._configure_random_ltd_scheduler(random_ltd_config) # Engine timers self.engine_timers = EngineTimers(enable_micro_timers=self.wall_clock_breakdown(), enable_global_timers=self.wall_clock_breakdown() or self.flops_profiler_enabled()) if self.global_rank == 0: self._config.print("DeepSpeedEngine configuration") if self.dump_state(): print_configuration(self, "DeepSpeedEngine") # Load pre-installed or JIT compile (un)flatten ops util_ops = UtilsBuilder().load() self.flatten = util_ops.flatten self.unflatten = util_ops.unflatten def destroy(self): if self.optimizer is not None and hasattr(self.optimizer, 'destroy'): self.optimizer.destroy() def _get_model_parameters(self): if self.autotuning_profile_model_info(): self.autotuning_model_info = {} num_params = 0 trainable_num_params = 0 for p in self.module.parameters(): # since user code might call deepspeed.zero.Init() before deepspeed.initialize(), need to check the attribute to check if the parameter is partitioned in zero 3 already or not n = 0 if hasattr(p, "ds_tensor"): # if the parameter is partitioned in zero 3 n += p.ds_numel else: # if the parameter is not partitioned in zero 3 yet n += p.numel() num_params += n if p.requires_grad: trainable_num_params += n if self.global_rank == 0: self.autotuning_model_info["num_params"] = num_params * self.mp_world_size self.autotuning_model_info["trainable_num_params"] = trainable_num_params * self.mp_world_size logger.info(f"model parameter = {num_params}") def get_batch_info(self): """Get all training batch related settings. Returns: train_batch_size (int): The effective training batch size. This is the amount of data samples that leads to one step of model update. train_micro_batch_size_per_gpu (int): Batch size to be processed by one GPU in one step (without gradient accumulation). gradient_accumulation_steps (int): Number of training steps to accumulate gradients before averaging and applying them. """ return ( self.train_batch_size, self.train_micro_batch_size_per_gpu, self.gradient_accumulation_steps, ) def set_train_batch_size(self, train_batch_size): """Adjust the global batch size by increasing or decreasing the number of micro-batches (i.e., gradient accumulation steps). The size of each micro-batch (i.e., ``train_micro_batch_size_per_gpu``) is not changed. Args: train_batch_size (int): The new global batch size for training. Raises: ValueError: if ``train_batch_size`` is not divisible by the configured micro-batch size and data parallelism. """ if train_batch_size % (self.train_micro_batch_size_per_gpu() * self.dp_world_size) != 0: #print(f'{train_batch_size=} {self.train_micro_batch_size_per_gpu()=} {self.dp_world_size=}') raise ValueError(f'Train batch size must be divisible by micro-batch data parallelism') new_gas = train_batch_size // (self.train_micro_batch_size_per_gpu() * self.dp_world_size) # overwrite config self._config.train_batch_size = train_batch_size self._config.gradient_accumulation_steps = new_gas def set_data_post_process_func(self, post_process_func): if self.training_dataloader is not None: self.training_dataloader.post_process_func = post_process_func def set_custom_curriculum_learning_schedule(self, schedule_func_dict): if self.training_dataloader is not None and self.curriculum_learning_enabled(): self.training_dataloader.data_sampler.set_custom_curriculum_learning_schedule(schedule_func_dict) def get_global_grad_norm(self) -> float: """Return the 2-norm of all gradients. If there is model parallelism, the norm will be global. The computed norm will be cached and reused until the next step() pass. .. note:: In the presence of model parallelism, this is a collective call and acts as a barrier among ``mpu.get_model_parallel_group()``. Returns: float: norm """ return self._global_grad_norm def __getattr__(self, name): """ Pass through attributes defined in the model if they are not overridden by ds-engine. """ _module = {} if "module" in self.__dict__: _module = self.__dict__['module'] if name in dir(self): return getattr(self, name) elif name in dir(_module): return getattr(_module, name) else: raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'") def checkpoint_tag_validation_enabled(self): return self._config.checkpoint_tag_validation_enabled def checkpoint_tag_validation_fail(self): return self._config.checkpoint_tag_validation_fail def elasticity_enabled(self): return self._config.elasticity_enabled def is_elastic_model_parallel_supported(self): if self.elasticity_enabled(): # Add code for finding number of GPUs per node automatically if self._config.num_gpus_per_node % self._config.elastic_model_parallel_size == 0: return True else: return False def pld_enabled(self): return self._config.pld_enabled def pld_params(self): return self._config.pld_params def pld_theta(self): return self.pld_params()[PLD_THETA] def pld_gamma(self): return self.pld_params()[PLD_GAMMA] def eigenvalue_enabled(self): return self._config.eigenvalue_enabled def eigenvalue_verbose(self): return self._config.eigenvalue_verbose def eigenvalue_max_iter(self): return self._config.eigenvalue_max_iter def eigenvalue_tol(self): return self._config.eigenvalue_tol def eigenvalue_stability(self): return self._config.eigenvalue_stability def eigenvalue_gas_boundary_resolution(self): return self._config.eigenvalue_gas_boundary_resolution def eigenvalue_layer_name(self): return self._config.eigenvalue_layer_name def eigenvalue_layer_num(self): return self._config.eigenvalue_layer_num def curriculum_enabled_legacy(self): return self._config.curriculum_enabled_legacy def curriculum_params_legacy(self): return self._config.curriculum_params_legacy def data_efficiency_enabled(self): return self._config.data_efficiency_enabled def data_efficiency_config(self): return self._config.data_efficiency_config def data_sampling_enabled(self): return self._config.data_efficiency_config[DATA_SAMPLING][DATA_SAMPLING_ENABLED] def data_sampling_config(self): return self._config.data_efficiency_config[DATA_SAMPLING] def curriculum_learning_enabled(self): return self._config.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_ENABLED] def curriculum_learning_config(self): return self._config.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING] def random_ltd_enabled(self): return self._config.data_efficiency_config[DATA_ROUTING][RANDOM_LTD][RANDOM_LTD_ENABLED] def random_ltd_config(self): return self._config.data_efficiency_config[DATA_ROUTING][RANDOM_LTD] def random_ltd_initialize(self): assert self.random_ltd_enabled() random_ltd_config = self.random_ltd_config() random_ltd_queue = deque([x for x in sorted(random_ltd_config[RANDOM_LTD_LAYER_ID])]) count = 0 for name, layer in self.module.named_modules(): if isinstance(layer, RandomLayerTokenDrop): if len(random_ltd_queue) != 0 and str(random_ltd_queue[0]) in name: ###[1,2,3] layer.init_config(random_ltd_config, self.random_ltd_scheduler, count) random_ltd_queue.popleft() count += 1 if random_ltd_config[RANDOM_LTD_LAYER_NUM] != count: raise ValueError(f'random_ltd_layer_num {random_ltd_config[RANDOM_LTD_LAYER_NUM]} must be \ equivalent to the len of random_ltd_layer_id {count}') if random_ltd_config[RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE][RANDOM_LTD_LAYER_TOKEN_LR_ENABLED]: assert self.client_lr_scheduler is None raise ValueError(f'not yet support') #self.lr_scheduler = lr_schedules.WarmupLayerTokenDecayLR(self.optimizer, self.random_ltd_scheduler) def wall_clock_breakdown(self): return self._config.wall_clock_breakdown def flops_profiler_enabled(self): return self._config.flops_profiler_config.enabled or self.autotuning_enabled() def flops_profiler_profile_step(self): step = self._config.flops_profiler_config.profile_step if self._config.autotuning_config.enabled: step = self.autotuning_start_profile_step() return step def flops_profiler_module_depth(self): return self._config.flops_profiler_config.module_depth def flops_profiler_top_modules(self): return self._config.flops_profiler_config.top_modules def flops_profiler_detailed(self): if self._config.autotuning_config.enabled: return False return self._config.flops_profiler_config.detailed def flops_profiler_output_file(self): return self._config.flops_profiler_config.output_file def memory_breakdown(self): return self._config.memory_breakdown def autotuning_enabled(self): return self._config.autotuning_config.enabled def autotuning_start_profile_step(self): return self._config.autotuning_config.start_profile_step def autotuning_end_profile_step(self): return self._config.autotuning_config.end_profile_step def autotuning_metric_path(self): path = self._config.autotuning_config.metric_path if not path: path = os.path.join(os.getcwd(), "autotuning_metric.json") return path def autotuning_model_info_path(self): path = self._config.autotuning_config.model_info_path if not path: path = os.path.join(os.getcwd(), "autotuning_model_info.json") return path def autotuning_metric(self): return self._config.autotuning_config.metric def autotuning_profile_model_info(self): return self.autotuning_enabled( ) and self._config.autotuning_config.model_info and self._config.autotuning_config.model_info.get( "profile", False) def sparse_gradients_enabled(self): return self._config.sparse_gradients_enabled def train_batch_size(self): return self._config.train_batch_size def train_micro_batch_size_per_gpu(self): return self._config.train_micro_batch_size_per_gpu def optimizer_name(self): return (self.client_optimizer.__class__.__name__ if self.client_optimizer else self._config.optimizer_name) def optimizer_params(self): return self._config.optimizer_params def optimizer_legacy_fusion(self): return self._config.optimizer_legacy_fusion def scheduler_name(self): return self._config.scheduler_name def scheduler_params(self): return self._config.scheduler_params def quantize_training(self): return ( self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS] [WEIGHT_QUANTIZE_IN_FORWARD_ENABLED], self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_ENABLED], self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_GROUPS], self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS] [WEIGHT_QUANTIZE_FP16_MIXED_QUANTIZE], self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_CHANGE_RATIO], self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_TYPE], self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_ROUNDING], self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_VERBOSE], self._config.compression_config[WEIGHT_QUANTIZATION][SHARED_PARAMETERS][WEIGHT_QUANTIZE_KERNEL], ) def zero_optimization(self): return self._config.zero_enabled def zero_allow_untested_optimizer(self): return self._config.zero_allow_untested_optimizer def zero_force_ds_cpu_optimizer(self): return self._config.zero_force_ds_cpu_optimizer def zero_reduce_scatter(self): return self._config.zero_config.reduce_scatter def zero_overlap_comm(self): return self._config.zero_config.overlap_comm def zero_offload_optimizer(self): return self._config.zero_config.offload_optimizer def zero_offload_param(self): return self._config.zero_config.offload_param def zero_use_cpu_optimizer(self): if self._config.zero_config.offload_optimizer is not None: return self._config.zero_config.offload_optimizer.device in [OffloadDeviceEnum.cpu, OffloadDeviceEnum.nvme] return False def zero_cpu_offload(self): if self._config.zero_config.offload_optimizer is not None: return self._config.zero_config.offload_optimizer.device == OffloadDeviceEnum.cpu return False def zero_sub_group_size(self): return self._config.zero_config.sub_group_size def zero_optimization_stage(self): return self._config.zero_optimization_stage def mics_shard_size(self): return self._config.mics_shard_size def zero_reduce_bucket_size(self): return self._config.zero_config.reduce_bucket_size def zero_allgather_bucket_size(self): return self._config.zero_config.allgather_bucket_size def zero_optimization_partition_gradients(self): return self.zero_optimization_stage() >= ZeroStageEnum.gradients def zero_optimization_partition_weights(self): return self.zero_optimization_stage() >= ZeroStageEnum.weights def zero_contiguous_gradients(self): return self._config.zero_config.contiguous_gradients def zero_load_from_fp32_weights(self): return self._config.zero_config.load_from_fp32_weights def zero_elastic_checkpoint(self): return self._config.zero_config.elastic_checkpoint def zero_max_live_parameters(self): return self._config.zero_config.max_live_parameters def zero_max_reuse_distance(self): return self._config.zero_config.max_reuse_distance def zero_prefetch_bucket_size(self): return self._config.zero_config.prefetch_bucket_size def zero_param_persistence_threshold(self): return self._config.zero_config.param_persistence_threshold def zero_model_persistence_threshold(self): return self._config.zero_config.model_persistence_threshold def zero_gather_16bit_weights_on_model_save(self): return self._config.zero_config.gather_16bit_weights_on_model_save def zero_grad_hooks(self): return self._config.zero_config.grad_hooks def zero_legacy_stage1(self): return self._config.zero_config.legacy_stage1 def zero_ignore_unused_parameters(self): return self._config.zero_config.ignore_unused_parameters def fp16_enabled(self): return self._config.fp16_enabled def bfloat16_enabled(self): return self._config.bfloat16_enabled def fp16_master_weights_and_gradients(self): return self._config.fp16_master_weights_and_gradients def amp_enabled(self): return self._config.amp_enabled def amp_params(self): return self._config.amp_params def fp16_auto_cast(self): return self._config.fp16_auto_cast def loss_scale(self): return self._config.loss_scale def gradient_accumulation_steps(self): return self._config.gradient_accumulation_steps def use_node_local_storage(self): return self._config.use_node_local_storage def load_universal_checkpoint(self): return self._config.load_universal_checkpoint @property def communication_data_type(self): res = self._config.communication_data_type if res is not None: return res if self.fp16_enabled(): return torch.float16 if self.bfloat16_enabled(): return torch.bfloat16 return torch.float32 def postscale_gradients(self): return not self._config.prescale_gradients def gradient_predivide_factor(self): return self._config.gradient_predivide_factor def steps_per_print(self): return self._config.steps_per_print def zero_allgather_partitions(self): return self._config.zero_config.allgather_partitions def zero_round_robin_gradients(self): return self._config.zero_config.round_robin_gradients def dump_state(self): return self._config.dump_state def gradient_clipping(self): return self._config.gradient_clipping def dynamic_loss_scale(self): return self._config.loss_scale == 0 def initial_dynamic_scale(self): return self._config.initial_dynamic_scale def dynamic_loss_scale_args(self): return self._config.dynamic_loss_scale_args def swap_tensor_config(self): return self._config.swap_tensor_config def aio_config(self): return self._config.aio_config def get_data_types(self): model_dtype = torch.float32 if self.fp16_enabled(): model_dtype = torch.float16 elif self.bfloat16_enabled(): model_dtype = torch.bfloat16 if self._config.grad_accum_dtype == None: if model_dtype == torch.bfloat16 and not self.zero_optimization(): grad_accum_dtype = torch.float32 else: grad_accum_dtype = model_dtype else: grad_accum_dtype = DtypeEnum(self._config.grad_accum_dtype).value return (model_dtype, grad_accum_dtype) def _configure_lr_scheduler(self, client_lr_scheduler): # First check for scheduler in json configuration lr_scheduler = self._scheduler_from_config(self.optimizer) if lr_scheduler: log_dist(f"DeepSpeed using configured LR scheduler = {self.scheduler_name()}", ranks=[0]) self.lr_scheduler = lr_scheduler else: if isinstance(client_lr_scheduler, Callable): log_dist('DeepSpeed using client callable to create LR scheduler', ranks=[0]) self.lr_scheduler = client_lr_scheduler(self.basic_optimizer) else: log_dist('DeepSpeed using client LR scheduler', ranks=[0]) self.lr_scheduler = client_lr_scheduler log_dist(f'DeepSpeed LR Scheduler = {self.lr_scheduler}', ranks=[0]) def _configure_checkpointing(self, dist_init_required): self.checkpoint_engine = TorchCheckpointEngine() if self._config is not None and self._config.nebula_config.enabled: try: from deepspeed.runtime.checkpoint_engine.nebula_checkpoint_engine import \ NebulaCheckpointEngine self.checkpoint_engine = NebulaCheckpointEngine(config_params=self._config.nebula_config) except ImportError as err: logger.error(f"No torch_nebula was found! Will fall back to torch.save. Details: {err}") self.checkpoint_engine = TorchCheckpointEngine() dp_rank = self.global_rank if self.mpu: dp_rank = self.mpu.get_data_parallel_rank() rank = self.local_rank if self.use_node_local_storage() else dp_rank # only the first data parallel process needs to store the model checkpoint # if you want to use node local storage this must be done by rank 0 on each # node self.save_non_zero_checkpoint = (rank == 0) or self.zero_optimization_partition_weights() if self.zero_optimization() or self.bfloat16_enabled(): param_rank = dist.get_rank(group=self.optimizer.dp_process_group) # Only the first parameter parallel process needs to store the # optimizer state checkpoints for zero self.save_zero_checkpoint = param_rank == dp_rank def _scheduler_from_config(self, optimizer): scheduler_name = self.scheduler_name() if scheduler_name is not None: if hasattr(lr_schedules, scheduler_name): scheduler = getattr(lr_schedules, scheduler_name) else: assert hasattr(torch.optim.lr_scheduler, scheduler_name), f"DeepSpeed does not recognize LR scheduler {scheduler_name}" scheduler = getattr(torch.optim.lr_scheduler, scheduler_name) scheduler_params = self.scheduler_params() instantiated_scheduler = scheduler(optimizer, **scheduler_params) return instantiated_scheduler else: return None def _set_distributed_vars(self, args): device_rank = args.device_rank if args is not None and hasattr(args, 'device_rank') else self.local_rank if device_rank >= 0: get_accelerator().set_device(device_rank) self.device = torch.device(get_accelerator().device_name(), device_rank) self.world_size = dist.get_world_size() self.global_rank = dist.get_rank() else: self.world_size = 1 self.global_rank = 0 self.device = torch.device(get_accelerator().device_name()) # Configure based on command line arguments def _configure_with_arguments(self, args, mpu): # After the distributed backend is initialized we are guaranteed the LOCAL_RANK # environment variable is set. We must align args.local_rank to this value for # backwards compatibility with scripts relying on [args|self].local_rank containing # the correct local rank info. _do_args_sanity_check will ensure this is the case. if "OMPI_COMM_WORLD_LOCAL_RANK" in os.environ: ompi_local_rank = os.environ.get("OMPI_COMM_WORLD_LOCAL_RANK") local_rank = os.environ.get('LOCAL_RANK', ompi_local_rank) assert ompi_local_rank == local_rank, f"LOCAL_RANK ({local_rank}) != OMPI_COMM_WORLD_LOCAL_RANK ({ompi_local_rank}), " \ "not sure how to proceed as we're seeing conflicting local rank info." os.environ['LOCAL_RANK'] = local_rank self.local_rank = int(os.environ['LOCAL_RANK']) if hasattr(args, 'local_rank'): args.local_rank = self.local_rank # Validate command line arguments def _do_args_sanity_check(self, args): assert "LOCAL_RANK" in os.environ or "OMPI_COMM_WORLD_LOCAL_RANK" in os.environ, "DeepSpeed requires the LOCAL_RANK environment " \ "variable, it is set by the deepspeed launcher, deepspeed.init_distributed, or the torch's launcher. If using a " \ "different launcher please ensure LOCAL_RANK is set prior to initializing deepspeed." if hasattr(args, 'local_rank') and args.local_rank != None: assert isinstance(args.local_rank, int), f"args.local_rank of {args.local_rank} is an unknown type {type(args.local_rank)}" if args.local_rank >= 0: env_local_rank = int(os.environ.get("LOCAL_RANK")) assert ( env_local_rank == args.local_rank ), f"Mismatch in local rank setting, args.local_rank={args.local_rank} but env['LOCAL_RANK']={env_local_rank}." def _is_supported_optimizer(self, optimizer_name): return (optimizer_name in DEEPSPEED_OPTIMIZERS or getattr(torch.optim, optimizer_name, None) is not None) def _supported_optims(self): FairseqOptimizer = None try: from fairseq.optim.fairseq_optimizer import FairseqOptimizer except ImportError: pass expected_optim_types = [Optimizer] if FairseqOptimizer: # fairseq optims are not torch.optim objects expected_optim_types.append(FairseqOptimizer) return expected_optim_types # Validate configuration based on command line arguments def _do_sanity_check(self): expected_optim_types = self._supported_optims() expected_optim_types += [type(None), Callable] assert isinstance(self.client_optimizer, tuple(expected_optim_types)), \ f'Client Optimizer is of unexpected type {type(self.client_optimizer)}' if not self.client_optimizer: if self.optimizer_name() is not None: assert self._is_supported_optimizer( self.optimizer_name()), "{} is not a supported DeepSpeed Optimizer".format(self.optimizer_name()) if (self.optimizer_name() == LAMB_OPTIMIZER or self.optimizer_name() == ONEBIT_LAMB_OPTIMIZER): assert (self.dynamic_loss_scale()), "DeepSpeed {} optimizer requires dynamic loss scaling".format( self.optimizer_name()) # Detect invalid combinations of client optimizer and client scheduler if isinstance(self.client_lr_scheduler, _LRScheduler): assert isinstance(self.client_optimizer, Optimizer), \ f'Client Optimizer (type = {type(self.client_optimizer)} is not instantiated but Client LR Scheduler is instantiated' def _broadcast_model(self): def is_replicated(p): if hasattr(p, "ds_status") and p.ds_status is not ZeroParamStatus.AVAILABLE: return False return True for p in self.module.parameters(): # Broadcast the model for different parameters if is_moe_param(p): if torch.is_tensor(p) and is_replicated(p): dist.broadcast(p, groups._get_expert_broadcast_src_rank(p.group_name), group=self.expert_data_parallel_group[p.group_name]) else: if torch.is_tensor(p) and is_replicated(p): dist.broadcast(p, groups._get_broadcast_src_rank(), group=self.data_parallel_group) @staticmethod def __check_params(model: Module, dtype: torch.dtype) -> None: return if not all(param.dtype == dtype for param in model.parameters()) and dist.get_rank() == 0: raise ValueError(f"{dtype} is enabled but the following parameters have dtype that is " f"not {dtype}: " f"{[(n, p.dtype) for n, p in model.named_parameters() if p.dtype != dtype]}") def _set_client_model(self, model): # register client model in _modules so that nn.module methods work correctly modules = self.__dict__.get('_modules') modules['module'] = model # register module attribute in engine but avoid getattr self.__dict__['module'] = model def _configure_distributed_model(self, model): self._set_client_model(model) if self.fp16_enabled(): if self.zero_optimization_partition_weights() and any( [hasattr(param, "ds_id") for param in self.module.parameters()]): self.__check_params(self.module, torch.half) self.module.half() elif self.bfloat16_enabled(): if self.zero_optimization_partition_weights() and any( hasattr(param, 'ds_id') for param in self.module.parameters()): self.__check_params(self.module, torch.bfloat16) self.module.bfloat16() else: self.__check_params(self.module, torch.float) if not self.dont_change_device: self.module.to(self.device) # MoE related initialization for _, module in self.module.named_modules(): if isinstance(module, MoE): self.has_moe_layers = True self.num_experts.append(module.num_experts) if self.has_moe_layers: for _, module in self.module.named_modules(): if isinstance(module, TopKGate): self.gate_modules.append(module) if self.wall_clock_breakdown(): module.wall_clock_breakdown = True if isinstance(module, MOELayer): self.moe_layers.append(module) if self.wall_clock_breakdown(): module.wall_clock_breakdown = True # Pass the mpu from here to groups. For subsequent use, just query groups if self.mpu is not None: groups.mpu = self.mpu # Set deepspeed parallelism spec. for the model including expert parallelism for _, module in self.module.named_modules(): if hasattr(module, 'set_deepspeed_parallelism'): module.set_deepspeed_parallelism() # Query the groups module to get information about various parallel groups self.data_parallel_group = groups._get_data_parallel_group() self.dp_world_size = groups._get_data_parallel_world_size() self.mp_world_size = groups._get_model_parallel_world_size() self.expert_parallel_group = groups._get_expert_parallel_group_dict() self.expert_data_parallel_group = groups._get_expert_data_parallel_group_dict() if not self.amp_enabled(): self._broadcast_model() # check if parameters are duplicated in optimizer param_groups def _check_for_duplicates(self, optimizer): for name, param in self.module.named_parameters(): param_id = id(param) def ids_list(group): return [id(param) for param in group] occurrence = sum([ ids_list(group['params']).count(param_id) if param_id in ids_list(group['params']) else 0 for group in optimizer.param_groups ]) assert occurrence <= 1, f"Parameter with name: {name} occurs multiple times in optimizer.param_groups. Make sure it only appears once to prevent undefined behavior." def _do_optimizer_sanity_check(self, basic_optimizer): model_dtype, grad_accum_dtype = self.get_data_types() zero_enabled = self.zero_optimization() amp_enabled = self.amp_enabled() # config based assertions assert ( not (amp_enabled and zero_enabled) ), "Amp and ZeRO are not currently compatible, please use (legacy) fp16 mode which performs similar to amp opt_mode=O2" if zero_enabled: if not is_zero_supported_optimizer(basic_optimizer): assert ( self.zero_allow_untested_optimizer() ), 'You are using an untested ZeRO Optimizer. Please add <"zero_allow_untested_optimizer": true> in the configuration file to use it.' if self.global_rank == 0: logger.warning("**** You are using ZeRO with an untested optimizer, proceed with caution *****") if model_dtype == torch.bfloat16 and grad_accum_dtype == torch.float32 and self.zero_optimization_stage( ) == 1: return BFLOAT16 if model_dtype != grad_accum_dtype: raise NotImplementedError( "Model data type and gradient accumulation data type must be equal to use ZeRO") return ZERO_OPTIMIZATION elif amp_enabled: if model_dtype != grad_accum_dtype: raise NotImplementedError( "Model data type and gradient accumulation data type must be equal to use Amp") if model_dtype == torch.bfloat16 or model_dtype == torch.float16: raise NotImplementedError("Cannot enable both amp with (legacy) fp16 or bfloat16 mode") try: logger.info("Initializing Apex amp from: {}".format(amp.__path__)) except NameError: # If apex/amp is available it will be imported above raise RuntimeError("Unable to import apex/amp, please make sure it is installed") return AMP # data type checks elif model_dtype == grad_accum_dtype: if model_dtype == torch.bfloat16: raise NotImplementedError( "Bfloat16 wrapper must use a gradient accumulation type of fp32, enable ZeRO to use Bfloat16 gradient accumulation" ) if model_dtype == torch.float16: return FP16 # else optimizer_wrapper = None elif model_dtype == torch.bfloat16 and grad_accum_dtype == torch.float32: return BFLOAT16 else: raise NotImplementedError("unsupported mix of model dtype and gradient accumulation type") return None # Configure optimizer def _configure_optimizer(self, client_optimizer, model_parameters): if client_optimizer is not None: if isinstance(client_optimizer, tuple(self._supported_optims())): client_optimizer.param_groups[:] = [ pg for pg in client_optimizer.param_groups if len(pg["params"]) != 0 ] log_dist("Removing param_group that has no 'params' in the client Optimizer", ranks=[0]) basic_optimizer = client_optimizer log_dist('Using client Optimizer as basic optimizer', ranks=[0]) else: basic_optimizer = client_optimizer(model_parameters) log_dist('Using client callable to create basic optimizer', ranks=[0]) if self.zero_use_cpu_optimizer() and not isinstance(basic_optimizer, deepspeed.ops.adam.DeepSpeedCPUAdam): if self.zero_force_ds_cpu_optimizer(): msg = f'You are using ZeRO-Offload with a client provided optimizer ({type(basic_optimizer)}) which in most cases will yield poor performance. Please either use deepspeed.ops.adam.DeepSpeedCPUAdam or set an optimizer in your ds-config (https://www.deepspeed.ai/docs/config-json/#optimizer-parameters). If you really want to use a custom optimizer w. ZeRO-Offload and understand the performance impacts you can also set <"zero_force_ds_cpu_optimizer": false> in your configuration file.' raise ZeRORuntimeException(msg) else: basic_optimizer = self._configure_basic_optimizer(model_parameters) log_dist(f"Using DeepSpeed Optimizer param name {self.optimizer_name()} as basic optimizer", ranks=[0]) self._check_for_duplicates(basic_optimizer) self.basic_optimizer = basic_optimizer log_dist("DeepSpeed Basic Optimizer = {}".format(basic_optimizer.__class__.__name__), ranks=[0]) optimizer_wrapper = self._do_optimizer_sanity_check(basic_optimizer) if optimizer_wrapper == ZERO_OPTIMIZATION: self.optimizer = self._configure_zero_optimizer(basic_optimizer) elif optimizer_wrapper == AMP: amp_params = self.amp_params() log_dist(f"Initializing AMP with these params: {amp_params}", ranks=[0]) model, self.optimizer = amp.initialize(self.module, basic_optimizer, **amp_params) self._set_client_model(model) self._broadcast_model() # TODO: maybe need to broadcast experts differently? elif optimizer_wrapper == FP16: self.optimizer = self._configure_fp16_optimizer(basic_optimizer) elif optimizer_wrapper == BFLOAT16: self.optimizer = self._configure_bf16_optimizer(basic_optimizer) else: self.optimizer = basic_optimizer log_dist("DeepSpeed Final Optimizer = {}".format(self.optimizer_name()), ranks=[0]) self.compression_scheduler = self._configure_compression_scheduler() self.quantizer = self._configure_quantization() def _configure_basic_optimizer(self, model_parameters): optimizer_parameters = self.optimizer_params() if optimizer_parameters is None: optimizer_parameters = {} # print(optimizer_parameters.keys()) if "max_grad_norm" in optimizer_parameters.keys(): raise ValueError( "'max_grad_norm' is not supported as an optimizer parameter, please switch to using the deepspeed parameter 'gradient_clipping' see: https://www.deepspeed.ai/docs/config-json/#gradient-clipping for more details" ) if self.optimizer_name() in [ADAM_OPTIMIZER, ADAMW_OPTIMIZER]: torch_adam = optimizer_parameters.pop(TORCH_ADAM_PARAM, False) adam_w_mode = optimizer_parameters.pop(ADAM_W_MODE, ADAM_W_MODE_DEFAULT) # Optimizer name of Adam forces AdamW logic unless adam_w_mode is explicitly set effective_adam_w_mode = self.optimizer_name() == ADAMW_OPTIMIZER or adam_w_mode if torch_adam: if not effective_adam_w_mode: optimizer = torch.optim.Adam(model_parameters, **optimizer_parameters) else: optimizer = torch.optim.AdamW(model_parameters, **optimizer_parameters) else: if self.zero_use_cpu_optimizer(): from deepspeed.ops.adam import DeepSpeedCPUAdam optimizer = DeepSpeedCPUAdam(model_parameters, **optimizer_parameters, adamw_mode=effective_adam_w_mode) else: from deepspeed.ops.adam import FusedAdam optimizer = FusedAdam( model_parameters, **optimizer_parameters, adam_w_mode=effective_adam_w_mode, ) elif self.optimizer_name() == ADAGRAD_OPTIMIZER: if self.zero_use_cpu_optimizer(): from deepspeed.ops.adagrad import DeepSpeedCPUAdagrad optimizer = DeepSpeedCPUAdagrad(model_parameters, **optimizer_parameters) else: optimizer = torch.optim.Adagrad(model_parameters, **optimizer_parameters) elif self.optimizer_name() == LAMB_OPTIMIZER: from deepspeed.ops.lamb import FusedLamb optimizer = FusedLamb(model_parameters, **optimizer_parameters) elif self.optimizer_name() == ONEBIT_ADAM_OPTIMIZER: assert not self.zero_optimization(), "1bit-Adam is not compatible with ZeRO" from deepspeed.runtime.fp16.onebit.adam import OnebitAdam optimizer = OnebitAdam(model_parameters, self, **optimizer_parameters) if not self.fp16_enabled(): logger.warning(f"Currently the convergence of 1-bit Adam is only verified under FP16") elif self.optimizer_name() == ZERO_ONE_ADAM_OPTIMIZER: assert not self.zero_optimization(), "0/1 Adam is not compatible with ZeRO" from deepspeed.runtime.fp16.onebit.zoadam import ZeroOneAdam optimizer = ZeroOneAdam(model_parameters, self, **optimizer_parameters) if not self.fp16_enabled(): logger.warning(f'Currently the convergence of 0/1 Adam is only verified under FP16') elif self.optimizer_name() == ONEBIT_LAMB_OPTIMIZER: assert not self.zero_optimization(), "1bit-Lamb is not compatible with ZeRO" from deepspeed.runtime.fp16.onebit.lamb import OnebitLamb optimizer = OnebitLamb(model_parameters, self, **optimizer_parameters) if not self.fp16_enabled(): logger.warning(f"Currently the convergence of 1-bit Lamb is only verified under FP16") else: torch_optimizer = getattr(torch.optim, self.optimizer_name()) optimizer = torch_optimizer(model_parameters, **optimizer_parameters) return optimizer def _configure_compression_scheduler(self): return compression_scheduler(self.module, self._config.compression_config) def _configure_random_ltd_scheduler(self, configs): return RandomLTDScheduler(configs) def _configure_quantization(self): ( quantize_weight_in_forward, quantize_enabled, q_groups, q_mixed_fp16, q_change_ratio, q_type, q_rounding, q_verbose, use_quantizer_kernel, ) = self.quantize_training() if quantize_enabled and not quantize_weight_in_forward: assert self.fp16_enabled( ), "MoQ (quantize in optimization step) weight quantization is only supported for FP16" quantizer = None if quantize_enabled and not quantize_weight_in_forward: from deepspeed.runtime.quantize import Quantizer quantizer = Quantizer( q_groups, q_mixed_fp16, q_change_ratio, q_type, q_rounding, q_verbose, self.eigenvalue_enabled(), use_quantizer_kernel, self.eigenvalue_layer_num() if self.eigenvalue_enabled() else 0, ) return quantizer def _configure_fp16_optimizer(self, optimizer): initial_dynamic_scale = self.initial_dynamic_scale() dynamic_loss_args = self.dynamic_loss_scale_args() clip_grad = self.gradient_clipping() if APEX_INSTALLED: fused_opts = (apex.optimizers.FusedAdam, FusedAdam) else: fused_opts = FusedAdam if isinstance(optimizer, fused_opts) \ or self.optimizer_name() in [ONEBIT_ADAM_OPTIMIZER, ZERO_ONE_ADAM_OPTIMIZER]: if self.dynamic_loss_scale(): log_dist(f'Creating fp16 optimizer with dynamic loss scale', ranks=[0]) timers = self.timers if self.wall_clock_breakdown() else None optimizer = FP16_Optimizer( optimizer, deepspeed=self, dynamic_loss_scale=True, initial_dynamic_scale=initial_dynamic_scale, dynamic_loss_args=dynamic_loss_args, mpu=self.mpu, clip_grad=clip_grad, fused_adam_legacy=self.optimizer_legacy_fusion(), timers=timers, has_moe_layers=self.has_moe_layers, ) else: log_dist(f'Creating fp16 optimizer with static loss scale: {self.loss_scale()}', ranks=[0]) optimizer = FP16_Optimizer( optimizer, deepspeed=self, static_loss_scale=self.loss_scale(), mpu=self.mpu, clip_grad=clip_grad, fused_adam_legacy=self.optimizer_legacy_fusion(), has_moe_layers=self.has_moe_layers, ) else: log_dist(f'Creating fp16 unfused optimizer with dynamic loss scale', ranks=[0]) optimizer = FP16_UnfusedOptimizer( optimizer, deepspeed=self, static_loss_scale=self.loss_scale(), dynamic_loss_scale=self.dynamic_loss_scale(), dynamic_loss_args=dynamic_loss_args, mpu=self.mpu, clip_grad=clip_grad, fused_lamb_legacy=self.optimizer_name() == LAMB_OPTIMIZER, ) return optimizer def _configure_bf16_optimizer(self, optimizer): clip_grad = self.gradient_clipping() if optimizer is None: optimizer = DummyOptim(list(self.module.parameters())) log_dist('Creating BF16 optimizer', ranks=[0]) timers = self.timers if self.wall_clock_breakdown() else None optimizer = BF16_Optimizer(optimizer, self.param_names, mpu=self.mpu, clip_grad=clip_grad, allgather_bucket_size=self.zero_allgather_bucket_size(), dp_process_group=self.data_parallel_group, timers=timers) return optimizer def _configure_zero_optimizer(self, optimizer): zero_stage = self.zero_optimization_stage() mics_shard_size = self.mics_shard_size() model_dtype, grad_accum_dtype = self.get_data_types() timers = self.timers if self.wall_clock_breakdown() else None if optimizer is None: optimizer = DummyOptim(list(self.module.parameters())) if self.zero_legacy_stage1(): raise Exception( "The deprecated version of ZeRO Stage 1 is not supported in deepspeed >= 0.5.9. Please downgrade to a version less than 0.5.9 if you need to use this deprecated version of ZeRO." ) if zero_stage <= ZeroStageEnum.gradients: overlap_comm = self.zero_overlap_comm() contiguous_gradients = self.zero_contiguous_gradients() round_robin_gradients = self.zero_round_robin_gradients() assert not isinstance(optimizer, DummyOptim), "zero stage {} requires an optimizer".format(zero_stage) log_dist(f'Creating {model_dtype} ZeRO stage {zero_stage} optimizer', ranks=[0]) # Overlap and contiguous grads are meaningless in stage 1 and are ignored if zero_stage == ZeroStageEnum.optimizer_states: overlap_comm = False round_robin_gradients = False # Non-MoE requires contiguous grads to be disabled w. stage 1 if not self.has_moe_layers: contiguous_gradients = False if isinstance(self.module, PipelineModule): if overlap_comm: logger.warning("Pipeline parallelism does not support overlapped communication, will be disabled.") overlap_comm = False optimizer = DeepSpeedZeroOptimizer( optimizer, self.param_names, timers=timers, static_loss_scale=self.loss_scale(), dynamic_loss_scale=self.dynamic_loss_scale(), dynamic_loss_args=self.dynamic_loss_scale_args(), clip_grad=self.gradient_clipping(), contiguous_gradients=contiguous_gradients, reduce_bucket_size=self.zero_reduce_bucket_size(), allgather_bucket_size=self.zero_allgather_bucket_size(), dp_process_group=self.data_parallel_group, expert_parallel_group=self.expert_parallel_group if self.has_moe_layers else None, expert_data_parallel_group=self.expert_data_parallel_group if self.has_moe_layers else None, reduce_scatter=self.zero_reduce_scatter(), overlap_comm=overlap_comm, cpu_offload=self.zero_cpu_offload(), mpu=self.mpu, postscale_gradients=self.postscale_gradients(), gradient_predivide_factor=self.gradient_predivide_factor(), gradient_accumulation_steps=self.gradient_accumulation_steps(), ignore_unused_parameters=self.zero_ignore_unused_parameters(), partition_grads=zero_stage == ZeroStageEnum.gradients, round_robin_gradients=round_robin_gradients, has_moe_layers=self.has_moe_layers, fp16_master_weights_and_gradients=self.fp16_master_weights_and_gradients(), communication_data_type=self.communication_data_type, elastic_checkpoint=self.zero_elastic_checkpoint()) elif zero_stage == ZeroStageEnum.weights: assert not self.has_moe_layers, "MoE not supported with Stage 3" if isinstance(optimizer, DummyOptim): log_dist("Creating ZeRO Offload", ranks=[0]) optimizer = DeepSpeedZeRoOffload(self.module, timers=timers, ds_config=self.config, overlap_comm=self.zero_overlap_comm(), prefetch_bucket_size=self.zero_prefetch_bucket_size(), max_reuse_distance=self.zero_max_reuse_distance(), max_live_parameters=self.zero_max_live_parameters(), param_persistence_threshold=self.zero_param_persistence_threshold(), model_persistence_threshold=self.zero_model_persistence_threshold(), offload_param_config=self.zero_offload_param(), mpu=self.mpu) else: log_dist( f'Creating fp16 ZeRO stage {zero_stage} optimizer,' f' MiCS is enabled {mics_shard_size>0},' f' Hierarchical params gather {self._config.mics_hierarchial_params_gather}', ranks=[0]) if mics_shard_size > 0: return self._return_mics_optimizer(optimizer, timers) log_dist(f'Creating {model_dtype} ZeRO stage {zero_stage} optimizer', ranks=[0]) from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3 optimizer = DeepSpeedZeroOptimizer_Stage3( self.module, optimizer, timers=timers, ds_config=self.config, static_loss_scale=self.loss_scale(), dynamic_loss_scale=self.dynamic_loss_scale(), dynamic_loss_args=self.dynamic_loss_scale_args(), clip_grad=self.gradient_clipping(), contiguous_gradients=self.zero_contiguous_gradients(), reduce_bucket_size=self.zero_reduce_bucket_size(), prefetch_bucket_size=self.zero_prefetch_bucket_size(), max_reuse_distance=self.zero_max_reuse_distance(), max_live_parameters=self.zero_max_live_parameters(), param_persistence_threshold=self.zero_param_persistence_threshold(), model_persistence_threshold=self.zero_model_persistence_threshold(), dp_process_group=self.data_parallel_group, reduce_scatter=self.zero_reduce_scatter(), overlap_comm=self.zero_overlap_comm(), offload_optimizer_config=self.zero_offload_optimizer(), offload_param_config=self.zero_offload_param(), sub_group_size=self.zero_sub_group_size(), mpu=self.mpu, postscale_gradients=self.postscale_gradients(), gradient_predivide_factor=self.gradient_predivide_factor(), gradient_accumulation_steps=self.gradient_accumulation_steps(), aio_config=self.aio_config(), communication_data_type=self.communication_data_type) else: raise NotImplementedError("ZeRO stage {} not implemented".format(zero_stage)) return optimizer def _return_mics_optimizer(self, basic_optimizer, timers): from deepspeed.runtime.zero.mics import MiCS_Optimizer optimizer = MiCS_Optimizer(self.module, basic_optimizer, timers=timers, ds_config=self.config, static_loss_scale=self.loss_scale(), dynamic_loss_scale=self.dynamic_loss_scale(), dynamic_loss_args=self.dynamic_loss_scale_args(), clip_grad=self.gradient_clipping(), contiguous_gradients=self.zero_contiguous_gradients(), reduce_bucket_size=self.zero_reduce_bucket_size(), prefetch_bucket_size=self.zero_prefetch_bucket_size(), max_reuse_distance=self.zero_max_reuse_distance(), max_live_parameters=self.zero_max_live_parameters(), param_persistence_threshold=self.zero_param_persistence_threshold(), model_persistence_threshold=self.zero_model_persistence_threshold(), dp_process_group=self.data_parallel_group, reduce_scatter=self.zero_reduce_scatter(), overlap_comm=self.zero_overlap_comm(), offload_optimizer_config=self.zero_offload_optimizer(), offload_param_config=self.zero_offload_param(), sub_group_size=self.zero_sub_group_size(), mpu=self.mpu, postscale_gradients=self.postscale_gradients(), gradient_predivide_factor=self.gradient_predivide_factor(), gradient_accumulation_steps=self.gradient_accumulation_steps(), aio_config=self.aio_config(), communication_data_type=self.communication_data_type) return optimizer def _configure_eigenvalue(self): eigenvalue = Eigenvalue( verbose=self.eigenvalue_verbose(), max_iter=self.eigenvalue_max_iter(), tol=self.eigenvalue_tol(), stability=self.eigenvalue_stability(), gas_boundary_resolution=self.eigenvalue_gas_boundary_resolution(), layer_name=self.eigenvalue_layer_name(), layer_num=self.eigenvalue_layer_num(), ) return eigenvalue def _configure_progressive_layer_drop(self): pld = ProgressiveLayerDrop(theta=self.pld_theta(), gamma=self.pld_gamma()) return pld def _configure_curriculum_scheduler_legacy(self): scheduler = CurriculumScheduler(self.curriculum_params_legacy()) return scheduler @staticmethod def is_map_style_dataset(obj): return hasattr(obj, "__getitem__") and hasattr(obj, "__len__") @staticmethod def is_iterable_style_dataset(obj): return isinstance(obj, torch.utils.data.IterableDataset) # hasattr(obj, "__iter__") should work as well def dataloader_drop_last(self): return self._config.dataloader_drop_last def was_step_applied(self) -> bool: """Returns True if the latest ``step()`` produced in parameter updates. Note that a ``False`` return is not an error condition. Steps are frequently no-ops, such as between gradient accumulation boundaries or when overflows occur. Returns: bool: Whether the latest ``step()`` modified model parameters. """ return self._step_applied def deepspeed_io(self, dataset, batch_size=None, route=ROUTE_TRAIN, pin_memory=True, data_sampler=None, collate_fn=None, num_local_io_workers=None): if not (self.is_map_style_dataset(dataset) or self.is_iterable_style_dataset(dataset)): raise ValueError("Training data must be a torch Dataset") if batch_size is None: batch_size = self.train_micro_batch_size_per_gpu() if collate_fn is None: collate_fn = self.collate_fn # Currently we only use timer in train route deepspeed_io_timer = None if route == ROUTE_TRAIN: deepspeed_io_timer = self.tput_timer # If mpu is provided, forward world size and parallel rank to sampler. data_parallel_world_size = self.dp_world_size data_parallel_rank = self.global_rank if self.mpu is not None: data_parallel_world_size = self.mpu.get_data_parallel_world_size() data_parallel_rank = self.mpu.get_data_parallel_rank() if data_sampler is None and (route == ROUTE_PREDICT or route == ROUTE_EVAL): data_sampler = torch.utils.data.DistributedSampler( dataset, num_replicas=data_parallel_world_size, rank=data_parallel_rank, shuffle=False, ) deepspeed_dataloader_config = {} if self.curriculum_learning_enabled(): deepspeed_dataloader_config = { CURRICULUM_LEARNING: self.curriculum_learning_enabled(), DATA_EFFICIENCY: self.data_efficiency_config(), DATA_PARALLEL_GROUP: self.data_parallel_group, GRADIENT_ACCUMULATION_STEPS: self.gradient_accumulation_steps(), GLOBAL_RANK: self.global_rank, DATA_SAMPLING_NUM_WORKERS: self.data_sampling_config()[DATA_SAMPLING_NUM_WORKERS] } return DeepSpeedDataLoader(dataset=dataset, batch_size=batch_size, pin_memory=pin_memory, collate_fn=collate_fn, local_rank=self.local_rank, tput_timer=deepspeed_io_timer, num_local_io_workers=num_local_io_workers, data_sampler=data_sampler, data_parallel_world_size=data_parallel_world_size, data_parallel_rank=data_parallel_rank, dataloader_drop_last=self.dataloader_drop_last(), deepspeed_dataloader_config=deepspeed_dataloader_config) def train(self, mode=True): r"""""" self.warn_unscaled_loss = True self.module.train(mode) def eval(self): r"""""" self.warn_unscaled_loss = True self.module.train(False) def _scale_loss_by_gas(self, prescaled_loss): if isinstance(prescaled_loss, torch.Tensor): scaled_loss = prescaled_loss / self.gradient_accumulation_steps() elif isinstance(prescaled_loss, tuple) or isinstance(prescaled_loss, list): scaled_loss = [] for l in prescaled_loss: if isinstance(l, torch.Tensor): scaled_loss.append(l / self.gradient_accumulation_steps()) else: scaled_loss.append(l) else: scaled_loss = prescaled_loss if self.warn_unscaled_loss: logger.warning(f"DeepSpeed unable to scale loss because of type: {type(prescaled_loss)}") self.warn_unscaled_loss = False return scaled_loss @instrument_w_nvtx def forward(self, *inputs, **kwargs): r"""Execute forward propagation Arguments: *inputs: Variable length input list **kwargs: variable length keyword arguments """ if self.autotuning_profile_model_info(): ma = get_ma_status() else: see_memory_usage("Engine before forward", force=self.memory_breakdown()) flops_profiler_active = (self.flops_profiler_enabled() and self.global_steps == self.flops_profiler_profile_step() and self.global_rank == 0) # used to check quantization happens at step 0! if self.global_steps == 0 and hasattr(self, "compression_scheduler"): self.compression_scheduler.step(step_zero_check=True) if self.quantizer: tensor_to_quantize = self.optimizer.bit16_groups if self.zero_optimization_stage( ) == 2 else self.optimizer.fp16_groups if self.compression_scheduler.weight_quantization_enabled: self.quantizer.quantize( tensor_to_quantize, (self.optimizer.overflow if self.fp16_enabled() else False), self.eigenvalue_enabled(), None, ) if flops_profiler_active: self.flops_profiler.start_profile(ignore_list=None) if self.module.training: if self.progressive_layer_drop: kwargs.update(self.progressive_layer_drop.get_state()) if self.__class__.__name__ != "PipelineEngine": # TODO: The above if condition is a HACK since for PipelineEngine # it's difficult to inject argument in forward pass. if self.module.training and self.curriculum_enabled_legacy(): self.curriculum_scheduler_legacy.update_difficulty(self.global_steps + 1) if self.curriculum_params_legacy()["curriculum_type"] == "seqlen": kwargs.update({"curriculum_seqlen": self.curriculum_scheduler_legacy.get_current_difficulty()}) if self.module.training and self.random_ltd_enabled(): self.random_ltd_scheduler.update_seq(self.global_steps) if self.zero_optimization_partition_weights(): # Enable automated discovery of external parameters by indicating that # we are in a forward pass. for module in self.module.modules(): module._parameters._in_forward = True self._start_timers(self.engine_timers.forward_timers) if self.training_dataloader is None: self.tput_timer.start() if self.fp16_auto_cast(): inputs = self._cast_inputs_half(inputs) loss = self.module(*inputs, **kwargs) if self.zero_optimization_partition_weights(): # Disable automated discovery of external parameters for module in self.module.modules(): module._parameters._in_forward = False self._stop_timers(self.engine_timers.forward_timers) if flops_profiler_active: self.flops_profiler.stop_profile() if self.autotuning_profile_model_info(): activation_mem = get_ma_status() - ma self.autotuning_model_info["activation_mem_per_gpu"] = activation_mem print_json_dist(self.autotuning_model_info, [0], path=self.autotuning_model_info_path()) exit() else: see_memory_usage("Engine after forward", force=self.memory_breakdown()) return loss def _cast_inputs_half(self, inputs): if isinstance(inputs, (list, tuple)): new_inputs = [] for v in inputs: new_inputs.append(self._cast_inputs_half(v)) return inputs.__class__(new_inputs) elif isinstance(inputs, dict): new_inputs = {} for k, v in inputs.items(): new_inputs[k] = self._cast_inputs_half(v) return new_inputs elif hasattr(inputs, 'half'): return inputs.half() else: return inputs def print_forward_breakdown(self, fwd_time): gate_time = 0.0 moe_time = 0.0 falltoall = 0.0 salltoall = 0.0 for gate in self.gate_modules: #logger.info(f"Individual TopK gate time: {gate.gate_time:.2f} ms") gate_time += gate.gate_time for l in self.moe_layers: #logger.info(f"MoE layer; total: {l.time_moe:.2f} ms, first alltoall: {l.time_falltoall:.2f}, second alltoall: {l.time_salltoall:.2f}") moe_time += l.time_moe falltoall += l.time_falltoall salltoall += l.time_salltoall # TODO: Allreduce/average them across ranks for more accurate timing. # if deepspeed.comm.get_rank() == 0: log_dist( f"rank={dist.get_rank()} time (ms) | forward: {fwd_time:.2f} (forward_moe: {moe_time:.2f}, 1st alltoall: {falltoall:.2f}, 2nd alltoall: {salltoall:.2f}, top-k: {gate_time:.2f})", ranks=[0]) @instrument_w_nvtx def allreduce_gradients(self, bucket_size=MEMORY_OPT_ALLREDUCE_SIZE): assert not (self.bfloat16_enabled() and self.pipeline_parallelism), \ f'allreduce_gradients() is not valid when bfloat+pipeline_parallelism is enabled' # Pass (PP) gas boundary flag to optimizer (required for zero) self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary() # ZeRO stage >= 2 communicates during non gradient accumulation boundaries as well if self.zero_optimization_partition_gradients(): self.optimizer.overlapping_partition_gradients_reduce_epilogue() # Communicate only at gradient accumulation boundaries elif self.is_gradient_accumulation_boundary(): if self.zero_optimization_stage() == ZeroStageEnum.optimizer_states and hasattr( self.optimizer, 'reduce_gradients'): self.optimizer.reduce_gradients(pipeline_parallel=self.pipeline_parallelism) else: self.buffered_allreduce_fallback(elements_per_buffer=bucket_size) @instrument_w_nvtx def backward(self, loss, allreduce_gradients=True, release_loss=False, retain_graph=False, scale_wrt_gas=True): r"""Execute backward pass on the loss Arguments: loss: Torch tensor on which to execute backward propagation allreduce_gradients: is deprecated, ignored, and will soon be removed' retain_graph: bool, default: false forward on user defined choice of retain_graph """ see_memory_usage("Engine before backward", force=self.memory_breakdown()) if self.scale_wrt_gas is not None: scale_wrt_gas = self.scale_wrt_gas if not allreduce_gradients: logger.warning(f"Argument `allreduce_gradients` is deprecated, ignored, and will soon be removed") # scale loss w.r.t. gradient accumulation if needed if self.gradient_accumulation_steps() > 1 and scale_wrt_gas: loss = self._scale_loss_by_gas(loss.float()) # Log training Loss if self.monitor.enabled: if self.is_gradient_accumulation_boundary(): if self.global_rank == 0: self.summary_events = [( f"Train/Samples/train_loss", sum(self.losses) / self.gradient_accumulation_steps(), self.global_samples, )] self.monitor.write_events(self.summary_events) if self.is_gradient_accumulation_boundary(): self.losses = [] else: self.losses.append(loss.mean().item()) self._start_timers(self.engine_timers.backward_timers) assert self.optimizer is not None and not isinstance(self.optimizer, DummyOptim), \ "must provide optimizer during init in order to use backward" self._start_timers(self.engine_timers.backward_inner_timers) if self.zero_optimization(): self.optimizer.is_gradient_accumulation_boundary = self.is_gradient_accumulation_boundary() self.optimizer.backward(loss, retain_graph=retain_graph) elif self.amp_enabled(): # AMP requires delaying unscale when inside gradient accumulation boundaries # https://nvidia.github.io/apex/advanced.html#gradient-accumulation-across-iterations delay_unscale = not self.is_gradient_accumulation_boundary() with amp.scale_loss(loss, self.optimizer, delay_unscale=delay_unscale) as scaled_loss: scaled_loss.backward(retain_graph=retain_graph) elif self.fp16_enabled(): if self.eigenvalue_enabled(): self.optimizer.backward(loss, create_graph=True, retain_graph=True) else: self.optimizer.backward(loss, retain_graph=retain_graph) elif self.bfloat16_enabled(): self.optimizer.backward(loss) else: if self.eigenvalue_enabled(): loss.backward(create_graph=True, retain_graph=True) else: loss.backward(retain_graph=retain_graph) self._stop_timers(self.engine_timers.backward_inner_timers) self._start_timers(self.engine_timers.backward_reduce_timers) if allreduce_gradients and self.enable_backward_allreduce: # Traditional code path that allreduces the module parameter grads self.allreduce_gradients() self._stop_timers(self.engine_timers.backward_reduce_timers) self._stop_timers(self.engine_timers.backward_timers) if release_loss: # loss.data = None pass see_memory_usage("Engine after backward", force=self.memory_breakdown()) return loss def is_gradient_accumulation_boundary(self): """ Query whether the current micro-batch is at the boundary of gradient accumulation, and thus will trigger gradient reductions and an optimizer step. Returns: bool: if the current step is a gradient accumulation boundary. """ if self._is_gradient_accumulation_boundary is None: return (self.micro_steps + 1) % \ self.gradient_accumulation_steps() == 0 else: return self._is_gradient_accumulation_boundary def set_gradient_accumulation_boundary(self, is_boundary): """ Manually overrides the DeepSpeed engine's gradient accumulation boundary state, this is an optional feature and should be used with care. The state should be set before to the intended value before each forward/backward. The final fordward/backward should have the boundary state set to True. This style allows client code to only call engine.step() once after all the gradient accumulation passes are complete. See example below: .. code-block:: python engine.set_gradient_accumulation_boundary(False) for _ in range(gradient_accumulation_steps - 1): micro_batch = next(data_loader) loss = engine(micro_batch) engine.backward(loss) engine.set_gradient_accumulation_boundary(True) micro_batch = next(data_loader) loss = engine(micro_batch) engine.backward(loss) engine.step() Arguments: is_boundary (bool): are we at a gradient accumulation boundary or not? """ self._is_gradient_accumulation_boundary = is_boundary self.optimizer.is_gradient_accumulation_boundary = is_boundary def zero_grad(self): """ Zero parameter grads. """ for param_name, param in self.module.named_parameters(): param.grad = None def clip_fp32_gradients(self): clip_grad_norm_(parameters=self.module.parameters(), max_norm=self.gradient_clipping(), mpu=self.mpu) def _take_model_step(self, lr_kwargs, block_eigenvalue={}): if self.gradient_clipping() > 0.0: if not (self.fp16_enabled() or self.bfloat16_enabled() or self.amp_enabled() or self.zero_optimization()): self.clip_fp32_gradients() elif self.amp_enabled(): # AMP's recommended way of doing clipping # https://nvidia.github.io/apex/advanced.html#gradient-clipping master_params = amp.master_params(self.optimizer) clip_grad_norm_(parameters=master_params, max_norm=self.gradient_clipping(), mpu=self.mpu) self.optimizer.step() if hasattr(self.optimizer, '_global_grad_norm'): self._global_grad_norm = self.optimizer._global_grad_norm # Quantize the updated parameter if there is no overflow if self.quantizer: tensor_to_quantize = self.optimizer.bit16_groups if self.zero_optimization_stage( ) == 2 else self.optimizer.fp16_groups if self.compression_scheduler.weight_quantization_enabled: self.quantizer.quantize( tensor_to_quantize, (self.optimizer.overflow if self.fp16_enabled() else False), self.eigenvalue_enabled(), block_eigenvalue, ) # zero grad in basic optimizer could be unreliable and may not exhibit # the behavior that we want if self.bfloat16_enabled(): # TODO: Temporary until bf16_optimizer and zero_optimizer are integrated if self.zero_optimization() and hasattr(self.optimizer, "zero_grad"): self.optimizer.zero_grad() else: pass elif self.zero_optimization() or self.fp16_enabled() or self.amp_enabled(): self.optimizer.zero_grad() else: self.zero_grad() report_progress = self.global_rank == 0 if self.global_rank else True # Check overflow here since in DS fp16 optimizer, the overflow is updated in above step() function. overflow = False if hasattr(self.optimizer, "overflow"): overflow = self.optimizer.overflow self._step_applied = not overflow if overflow: self.skipped_steps += 1 else: self.compression_scheduler.step() if self.lr_scheduler is not None: try: self.lr_scheduler.step(**(lr_kwargs or {})) except TypeError: # XXX Hack to work with Megatron 2.0 and DeepSpeed pipelines. # We don't currently have a way to specify lr_kwargs from # pipe_engine.train_batch() self.lr_scheduler.step(increment=self.train_batch_size()) if report_progress and (self.global_steps + 1) % self.steps_per_print() == 0: self._report_progress(self.global_steps + 1) self.global_steps += 1 self.global_samples += self.train_batch_size() def step(self, lr_kwargs=None): r"""Execute the weight update step after forward and backward propagation on effective_train_batch. """ see_memory_usage("Engine before step", force=self.memory_breakdown()) # Check early because self.global_steps is incremented at some point here. # TODO: Delay self.global_steps increment until very end of this function. flops_profiler_active = self.flops_profiler_enabled( ) and self.global_steps == self.flops_profiler_profile_step() and self.global_rank == 0 self._start_timers(self.engine_timers.step_timers) assert self.optimizer is not None and not isinstance(self.optimizer, DummyOptim), \ "must provide optimizer during init in order to use step" report_progress = False self._step_applied = False # assume False, will flip to True # Update the model when we reach gradient accumulation boundaries if self.is_gradient_accumulation_boundary(): self.gas_boundary_ctr += 1 if (self.eigenvalue_enabled() and (self.gas_boundary_ctr % self.eigenvalue_gas_boundary_resolution() == 0) and self.quantizer.any_precision_switch()): log_dist(f"computing eigenvalue...", ranks=[0]) self.block_eigenvalue = self.eigenvalue.compute_eigenvalue(self.module, self.device, self.optimizer.cur_scale) if self.progressive_layer_drop: self.progressive_layer_drop.update_state(self.global_steps) if (self.eigenvalue_enabled() and not self.gas_boundary_ctr % self.eigenvalue_gas_boundary_resolution() and self.quantizer.any_precision_switch()): self._take_model_step(lr_kwargs, self.block_eigenvalue) else: self._take_model_step(lr_kwargs) report_progress = self.global_rank == 0 if self.global_rank else True self.tput_timer.stop(global_step=self.is_gradient_accumulation_boundary(), report_speed=report_progress) self._stop_timers(self.engine_timers.step_timers) # Log learning rate if self.monitor.enabled: if self.is_gradient_accumulation_boundary(): if self.global_rank == 0: self.summary_events = [(f"Train/Samples/lr", self.get_lr()[0], self.global_samples)] if self.fp16_enabled() and hasattr(self.optimizer, "cur_scale"): self.summary_events.append(( f"Train/Samples/loss_scale", self.optimizer.cur_scale, self.global_samples, )) if (self.eigenvalue_enabled() and not self.gas_boundary_ctr % self.eigenvalue_gas_boundary_resolution()): ev_values = self.block_eigenvalue.values() for i in range(len(ev_values)): self.summary_events.append(( f"Train/Eigenvalues/ModelBlockParam_{i}", self.ev_values[i][0], self.global_samples, )) self.monitor.write_events(self.summary_events) # Check flops profiling if flops_profiler_active: if self.autotuning_enabled(): self.flops = self.flops_profiler.get_total_flops() * 3 else: self.flops_profiler.print_model_profile( profile_step=self.global_steps, module_depth=self.flops_profiler_module_depth(), top_modules=self.flops_profiler_top_modules(), detailed=self.flops_profiler_detailed(), output_file=self.flops_profiler_output_file(), ) self.flops_profiler.end_profile() if self.autotuning_enabled() and self.global_steps == (self.autotuning_end_profile_step() + 1): self._autotuning_exit() if self.wall_clock_breakdown(): # Log micro timing and reset self.timers.log(names=self.engine_timers.micro_timers, memory_breakdown=self.memory_breakdown()) if self.wall_clock_breakdown() or self.flops_profiler_enabled(): # Log global timing and reset if self.is_gradient_accumulation_boundary(): if self.monitor.enabled: self._write_monitor() if self.has_moe_layers: fwd_time = self.timers(FORWARD_GLOBAL_TIMER).elapsed(reset=False) self.print_forward_breakdown(fwd_time=fwd_time) self.timers.log(self.engine_timers.global_timers) self.micro_steps += 1 see_memory_usage("Engine after step", force=self.memory_breakdown()) def _start_timers(self, timer_names): for name in timer_names: self.timers(name).start() def _stop_timers(self, timer_names): record = self.is_gradient_accumulation_boundary() and \ self.flops_profiler_enabled() and \ (self.global_steps >= self.flops_profiler_profile_step()) for name in timer_names: self.timers(name).stop(record=record) def _autotuning_exit(self): if self.global_rank == 0: msg = self.timers.get_mean([ FORWARD_GLOBAL_TIMER, BACKWARD_GLOBAL_TIMER, STEP_GLOBAL_TIMER, ], reset=False) titer = msg[FORWARD_GLOBAL_TIMER] + msg[BACKWARD_GLOBAL_TIMER] + msg[STEP_GLOBAL_TIMER] msg["latency"] = titer msg["FLOPS_per_gpu"] = self.flops * 1_000_000 * self.gradient_accumulation_steps() / titer msg["throughput"] = self.train_batch_size() * 1_000_000 / \ msg["latency"] print_json_dist(msg, [0], path=self.autotuning_metric_path()) log_dist( f"Wrote metrics to {self.autotuning_metric_path()}, {os.path.abspath(self.autotuning_metric_path())}", ranks=[0]) import atexit atexit.register(print, "Autotuning: done with running current ds config.") exit() def _write_monitor(self): if self.global_rank == 0: self.summary_events = [ ( f"Train/Samples/elapsed_time_ms_forward", self.timers(FORWARD_GLOBAL_TIMER).elapsed(reset=False), self.global_samples, ), ( f"Train/Samples/elapsed_time_ms_backward", self.timers(BACKWARD_GLOBAL_TIMER).elapsed(reset=False), self.global_samples, ), ( f"Train/Samples/elapsed_time_ms_backward_inner", self.timers(BACKWARD_INNER_GLOBAL_TIMER).elapsed(reset=False), self.global_samples, ), ( f"Train/Samples/elapsed_time_ms_backward_allreduce", self.timers(BACKWARD_REDUCE_GLOBAL_TIMER).elapsed(reset=False), self.global_samples, ), ( f"Train/Samples/elapsed_time_ms_step", self.timers(STEP_GLOBAL_TIMER).elapsed(reset=False), self.global_samples, ), ] self.monitor.write_events(self.summary_events) def _get_optimizer_param(self, param_name): result = [] if not self.optimizer: return result for group in self.optimizer.param_groups: if param_name in group: result.append(group[param_name]) else: result.append(0.0) return result def get_lr(self): return self._get_optimizer_param("lr") def get_type(self): return self._get_optimizer_param("type") def get_mom(self): if self.optimizer_name() in ["SGD", "RMSprop"]: return self._get_optimizer_param("momentum") else: return self._get_optimizer_param("betas") def get_pld_theta(self): if self.progressive_layer_drop: return self.progressive_layer_drop.get_theta() else: return None def _report_progress(self, step): lr = self.get_lr() mom = self.get_mom() log_dist(f"step={step}, skipped={self.skipped_steps}, lr={lr}, mom={mom}", ranks=[0]) def allreduce_bucket(self, bucket, dp_group): tensor = self.flatten(bucket) tensor_to_allreduce = tensor if self.communication_data_type != tensor.dtype: tensor_to_allreduce = tensor.to(self.communication_data_type) if self.postscale_gradients(): if self.gradient_predivide_factor() != 1.0: tensor_to_allreduce.mul_(1.0 / self.gradient_predivide_factor()) dist.all_reduce(tensor_to_allreduce, group=dp_group) if self.gradient_average: if self.gradient_predivide_factor() != dist.get_world_size(group=dp_group): tensor_to_allreduce.mul_(self.gradient_predivide_factor() / dist.get_world_size(group=dp_group)) else: tensor_to_allreduce.mul_(1. / dist.get_world_size(group=dp_group)) dist.all_reduce(tensor_to_allreduce, group=dp_group) if self.communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce: tensor.copy_(tensor_to_allreduce) return tensor def allreduce_and_copy(self, small_bucket, dp_group): allreduced = self.allreduce_bucket(small_bucket, dp_group) for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)): buf.copy_(synced) def allreduce_no_retain(self, bucket, dp_group, numel_per_bucket=500000000): small_bucket = [] numel = 0 for tensor in bucket: small_bucket.append(tensor) numel = numel + tensor.numel() if numel > numel_per_bucket: self.allreduce_and_copy(small_bucket, dp_group) small_bucket = [] numel = 0 if len(small_bucket) > 0: self.allreduce_and_copy(small_bucket, dp_group) def _get_gradients_for_reduction(self): non_expert_grads = [] expert_grads = {} if self.has_moe_layers: for key in self.expert_data_parallel_group.keys(): expert_grads[key] = [] for param_name, param in self.module.named_parameters(): if param.grad is None: # In cases where there is an imbalance of empty grads across # ranks we must create empty grads, this will ensure that every # rank is reducing the same size. In some cases it may make # sense in the future to support the ability to average not # w.r.t. world size but with a different value. param.grad = torch.zeros(param.size(), dtype=param.dtype, device=param.device) grad_data = param.grad.data if param_name in self.sparse_tensor_module_names or grad_data.is_sparse: # Call param.grad without data to avoid problem with setting of updated grads grad_data = SparseTensor(param.grad) if is_moe_param(param): expert_grads[param.group_name].append(grad_data) else: non_expert_grads.append(grad_data) return non_expert_grads, expert_grads def _reduce_non_expert_gradients(self, grads, elements_per_buffer): split_buckets = split_half_float_double_sparse(grads) for _, bucket_tuple in enumerate(split_buckets): bucket_type, bucket = bucket_tuple if self.pipeline_parallelism: dp_group = self.mpu.get_data_parallel_group() else: dp_group = groups._get_data_parallel_group() if bucket_type == SparseTensor.type(): self.sparse_allreduce_no_retain(bucket, dp_group=dp_group) else: self.allreduce_no_retain(bucket, dp_group=dp_group, numel_per_bucket=elements_per_buffer) def _reduce_expert_gradients(self, expert_grads, elements_per_buffer): for ep_name, expert_grads_group in expert_grads.items(): expert_split_buckets = split_half_float_double_sparse(expert_grads_group) for i, bucket_tuple in enumerate(expert_split_buckets): bucket_type, bucket = bucket_tuple if bucket_type == SparseTensor.type(): self.sparse_allreduce_no_retain(bucket, groups._get_expert_data_parallel_group(ep_name)) else: # Separate between diff groups self.allreduce_no_retain(bucket, dp_group=groups._get_expert_data_parallel_group(ep_name), numel_per_bucket=elements_per_buffer) def buffered_allreduce_fallback(self, grads=None, elements_per_buffer=500000000): if grads is None: non_expert_grads, expert_grads = self._get_gradients_for_reduction() else: assert not self.has_moe_layers, "attempting to reduce grads in unsupported way w.r.t. MoE" non_expert_grads = grads self._reduce_non_expert_gradients(non_expert_grads, elements_per_buffer) if self.has_moe_layers: self._reduce_expert_gradients(expert_grads, elements_per_buffer) def sparse_allreduce_no_retain(self, bucket, dp_group): allreduced_sparses = self.sparse_allreduce_bucket(bucket, dp_group) # Densify sparse tensor and copy back to original location for tensor in allreduced_sparses: if tensor.is_sparse: tensor.orig_dense_tensor.data = tensor.to_coo_tensor() else: tensor.orig_dense_tensor.copy_(tensor.to_dense()) def sparse_allreduce_bucket(self, bucket, dp_group): sparse_list = [] for sparse in bucket: sparse_list.append(self.sparse_allreduce(sparse, dp_group)) return sparse_list def sparse_allreduce(self, sparse, dp_group): original_data_type = sparse.values.dtype if self.communication_data_type != sparse.values.dtype: if self.communication_data_type in (torch.float16, torch.bfloat16): indices = sparse.indices.to(torch.int32) else: indices = sparse.indices values = sparse.values.to(self.communication_data_type) else: indices = sparse.indices values = sparse.values if self.postscale_gradients(): if self.gradient_average: values.mul_(self.gradient_predivide_factor() / dist.get_world_size(group=dp_group)) else: values.mul_(1. / dist.get_world_size(group=dp_group)) indices_device_list = self.sparse_all_gather(indices, dp_group) values_device_list = self.sparse_all_gather(values, dp_group) sparse.indices = torch.cat(indices_device_list).to(torch.long) sparse.values = torch.cat(values_device_list).to(original_data_type) return sparse def sparse_all_gather(self, value, dp_group): my_size = torch.LongTensor([value.size()[0]]).to(self.device) all_sizes = self.all_gather_scalar(my_size, dp_group) max_size = torch.cat(all_sizes).max() fill_size = max_size - my_size assert value.dim() in [1, 2] if value.dim() == 1: if fill_size > 0: value = torch.cat([value, value.new_empty(fill_size)]) tensor_list = [value.new_empty(max_size) for _ in range(dist.get_world_size(group=dp_group))] else: if fill_size > 0: value = torch.cat([value, value.new_empty(fill_size, value.size()[1])]) tensor_list = [ value.new_empty(max_size, value.size()[1]) for _ in range(dist.get_world_size(group=dp_group)) ] dist.all_gather(tensor_list, value, group=dp_group) tensors = [] for dev_idx, t in enumerate(tensor_list): size = all_sizes[dev_idx][0] tensors.append(t.index_select(0, torch.arange(size, dtype=torch.long, device=self.device))) return tensors def all_gather_scalar(self, value, dp_group): tensor_list = [value.new_zeros(value.size()) for _ in range(dist.get_world_size(group=dp_group))] dist.all_gather(tensor_list, value, group=dp_group) return tensor_list def module_state_dict(self, destination=None, prefix="", keep_vars=False): sd = self.module.state_dict(destination, prefix, keep_vars) if self.random_ltd_enabled(): sd = remove_random_ltd_state_dict(sd) return sd @staticmethod def load_moe_state_dict(checkpoint_path, tag, state_dict, old_moe_load, model=None, mpu=None, num_experts=1, checkpoint_engine=TorchCheckpointEngine()): if old_moe_load: expp_rank = groups._get_expert_data_parallel_rank(groups._get_max_expert_size_name()) num_local_experts = max(num_experts) // groups._get_expert_parallel_world_size( groups._get_max_expert_size_name()) for local_expert_id in range(num_local_experts): global_expert_id = expp_rank * num_local_experts + local_expert_id expert_state_dict = checkpoint_engine.load( DeepSpeedEngine._get_expert_ckpt_name( checkpoint_path, -1, # -1 means ignore layer_id global_expert_id, tag, mpu), map_location=torch.device('cpu')) # Updating global -> local expert ids moe_str_prefix = '.deepspeed_moe.experts.deepspeed_experts.' for key in list(expert_state_dict.keys()): local_key = key.replace(f'{moe_str_prefix}{global_expert_id}', f'{moe_str_prefix}{local_expert_id}') expert_state_dict[local_key] = expert_state_dict.pop(key) state_dict.update(expert_state_dict) else: moe_layer_id = 0 for n_module, module in model.named_modules(): if isinstance(module, MoE): # and deepspeed.comm.get_rank() == 0: group_name = module.expert_group_name num_local_experts = module.num_local_experts expp_rank = groups._get_expert_parallel_rank(group_name) # loop all local_experts for local_expert_id in range(num_local_experts): global_expert_id = expp_rank * num_local_experts + local_expert_id expert_state_dict = checkpoint_engine.load(DeepSpeedEngine._get_expert_ckpt_name( checkpoint_path, moe_layer_id, global_expert_id, tag, mpu), map_location=torch.device('cpu')) # print(expert_state_dict.keys()) # Updating global -> local expert ids moe_str_prefix = '.deepspeed_moe.experts.deepspeed_experts.' for key in list(expert_state_dict.keys()): local_key = key.replace(f'{moe_str_prefix}{global_expert_id}', f'{moe_str_prefix}{local_expert_id}') expert_state_dict[local_key] = expert_state_dict.pop(key) state_dict.update(expert_state_dict) moe_layer_id += 1 def load_module_state_dict(self, checkpoint, strict=True, custom_load_fn=None): module_state_dict = checkpoint['module'] if custom_load_fn: custom_load_fn(src=module_state_dict, dst=self.module) else: self.module.load_state_dict( module_state_dict, # TODO strict=strict) if checkpoint.get(FROZEN_PARAM_FRAGMENTS, None) is not None: saved_frozen_params = checkpoint[FROZEN_PARAM_FRAGMENTS] for param in self.module.parameters(): if param.requires_grad: continue if param not in self.param_names: raise ValueError(f"failed to find frozen {param} in named params") name = self.param_names[param] if hasattr(param, 'ds_id'): param.ds_tensor.data.copy_(saved_frozen_params[name].data) else: param.data.copy_(saved_frozen_params[name].data) def _get_zero_ckpt_prefix(self, dp_rank, bf16_mode): return f'{"bf16_" if bf16_mode else ""}zero_pp_rank_{dp_rank}' def _get_rank_zero_ckpt_name(self, checkpoints_path, tag, mp_rank, dp_rank, bf16_mode): file_prefix = self._get_zero_ckpt_prefix(dp_rank, bf16_mode=bf16_mode) zero_ckpt_name = os.path.join( checkpoints_path, str(tag), f"{file_prefix}_mp_rank_{mp_rank:02d}_optim_states.pt", ) return zero_ckpt_name def _get_zero_ckpt_name(self, checkpoints_path, tag): mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank() pp_rank = dist.get_rank(group=self.optimizer.dp_process_group) bf16_mode = self.bfloat16_enabled() return self._get_rank_zero_ckpt_name(checkpoints_path, tag, mp_rank, pp_rank, bf16_mode) def _get_ckpt_name(self, checkpoints_path, tag, mp_placeholder=None): if mp_placeholder is not None: mp_rank_str = mp_placeholder else: mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank() mp_rank_str = f"{mp_rank:02d}" if self.zero_optimization_partition_weights(): filename = "zero_pp_rank_{}".format(dist.get_rank(group=self.optimizer.dp_process_group)) ckpt_name = os.path.join( checkpoints_path, str(tag), f"{filename}_mp_rank_{mp_rank_str}_model_states.pt", ) else: ckpt_name = os.path.join( checkpoints_path, str(tag), "mp_rank_" + mp_rank_str + "_model_states.pt", ) return ckpt_name def _get_optimizer_ckpt_name(self, checkpoints_path, tag, expp_rank): mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank() ckpt_name = os.path.join(checkpoints_path, str(tag), f'expp_rank_{expp_rank}_mp_rank_{mp_rank:02d}_optim_states.pt') return ckpt_name @staticmethod def _get_expert_ckpt_name(checkpoints_path, layer_id, expert_id, tag, mpu=None): mp_rank = 0 if mpu is None else mpu.get_model_parallel_rank() if layer_id <= -1: # Used to support old checkpoint loading ckpt_name = os.path.join(checkpoints_path, '' if tag is None else str(tag), f'expert_{expert_id}_mp_rank_{mp_rank:02d}_model_states.pt') else: # Used to support new checkpoint loading ckpt_name = os.path.join(checkpoints_path, '' if tag is None else str(tag), f'layer_{layer_id}_expert_{expert_id}_mp_rank_{mp_rank:02d}_model_states.pt') return ckpt_name def _get_all_ckpt_names(self, checkpoints_path, tag): # It is required that (checkpoints_path, tag) are consistent among all ranks. ckpt_file_pattern = self._get_ckpt_name(checkpoints_path, tag, mp_placeholder="*") import glob ckpt_files = glob.glob(ckpt_file_pattern) ckpt_files.sort() return ckpt_files def load_checkpoint(self, load_dir, tag=None, load_module_strict=True, load_optimizer_states=True, load_lr_scheduler_states=True, load_module_only=False, custom_load_fn=None): """ Load training checkpoint Arguments: load_dir: Required. Directory to load the checkpoint from tag: Checkpoint tag used as a unique identifier for checkpoint, if not provided will attempt to load tag in 'latest' file load_module_strict: Optional. Boolean to strictly enforce that the keys in state_dict of module and checkpoint match. load_optimizer_states: Optional. Boolean to load the training optimizer states from Checkpoint. Ex. ADAM's momentum and variance load_lr_scheduler_states: Optional. Boolean to add the learning rate scheduler states from Checkpoint. load_module_only: Optional. Boolean to load only the model weights from the checkpoint. Ex. warmstarting. custom_load_fn: Optional. Custom model load function. Returns: A tuple of ``load_path`` and ``client_state``. *``load_path``: Path of the loaded checkpoint. ``None`` if loading the checkpoint failed. *``client_state``: State dictionary used for loading required training states in the client code. Important: under ZeRO3, one cannot load checkpoint with ``engine.load_checkpoint()`` right after ``engine.save_checkpoint()``. It is because ``engine.module`` is partitioned, and ``load_checkpoint()`` wants a pristine model. If insisting to do so, please reinitialize engine before ``load_checkpoint()``. """ if tag is None: latest_tag = "latest_universal" if self.load_universal_checkpoint() else "latest" latest_path = os.path.join(load_dir, latest_tag) if os.path.isfile(latest_path): with open(latest_path, "r") as fd: tag = fd.read().strip() else: if self.load_universal_checkpoint(): raise ValueError(f'Invalid for universal checkpoint: {latest_path} does not exist') else: logger.warning( f"Unable to find latest file at {latest_path}, if trying to load latest " "checkpoint please ensure this file exists or pass an explicit checkpoint tag when loading a checkpoint." ) return None, None if self.zero_optimization_partition_weights(): # Prepare for checkpoint load by ensuring all parameters are partitioned self.optimizer.checkpoint_event_prologue() load_path, client_states = self._load_checkpoint(load_dir, tag, load_module_strict=load_module_strict, load_optimizer_states=load_optimizer_states, load_lr_scheduler_states=load_lr_scheduler_states, load_module_only=load_module_only, custom_load_fn=custom_load_fn) load_zero_checkpoint = self.zero_optimization() or self.bfloat16_enabled() if load_zero_checkpoint and load_path is not None: success = self._load_zero_checkpoint(load_dir, tag, load_optimizer_states=load_optimizer_states) if not success: self.optimizer._restore_from_bit16_weights() if self.zero_optimization_partition_weights(): self.optimizer.checkpoint_event_epilogue() return load_path, client_states def _load_checkpoint(self, load_dir, tag, load_module_strict=True, load_optimizer_states=True, load_lr_scheduler_states=True, load_module_only=False, custom_load_fn=None): from deepspeed.runtime.state_dict_factory import SDLoaderFactory ckpt_list = self._get_all_ckpt_names(load_dir, tag) sd_loader = SDLoaderFactory.get_sd_loader(ckpt_list, checkpoint_engine=self.checkpoint_engine) is_pipe_parallel = isinstance(self.module, PipelineModule) mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank() load_path, checkpoint, _ = sd_loader.load(self.mp_world_size, mp_rank, is_pipe_parallel=is_pipe_parallel) if checkpoint is None: return None, None if is_pipe_parallel: # Pipeline parallelism uses this to load its own checkpoint files. self._curr_ckpt_path = os.path.join(load_dir, tag) if self.has_moe_layers: # print(checkpoint.keys()) old_moe_load = False if not isinstance(checkpoint['num_experts'], list): old_moe_load = True DeepSpeedEngine.load_moe_state_dict(load_dir, tag, state_dict=checkpoint['module'], old_moe_load=old_moe_load, model=self.module, mpu=self.mpu, num_experts=self.num_experts, checkpoint_engine=self.checkpoint_engine) if not self.load_universal_checkpoint(): self.load_module_state_dict(checkpoint=checkpoint, strict=load_module_strict, custom_load_fn=custom_load_fn) self.loaded_checkpoint_dp_world_size = checkpoint['dp_world_size'] if load_module_only: deepspeed_states = ['module'] if self.optimizer is not None and self.fp16_enabled(): self.optimizer.refresh_fp32_params() else: if self.has_moe_layers: largest_group_name = groups._get_max_expert_size_name() expp_rank = groups._get_expert_parallel_rank(largest_group_name) optim_load_path = self._get_optimizer_ckpt_name(load_dir, tag, expp_rank) optim_checkpoint = self.checkpoint_engine.load(optim_load_path, map_location=torch.device('cpu')) else: optim_checkpoint = checkpoint has_zero_optimizer_state = self.zero_optimization() or self.bfloat16_enabled() if load_optimizer_states and self.optimizer is not None and not has_zero_optimizer_state: if self.fp16_enabled(): self.optimizer.load_state_dict(optim_checkpoint['optimizer'], load_optimizer_states=load_optimizer_states) else: self.optimizer.load_state_dict(optim_checkpoint['optimizer']) if load_lr_scheduler_states and self.lr_scheduler is not None: self.lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) if self.random_ltd_enabled() and self.random_ltd_scheduler is not None and 'random_ltd' in checkpoint: self.random_ltd_scheduler.load_state_dict(checkpoint['random_ltd']) if self.training_dataloader is not None and self.curriculum_learning_enabled( ) and 'data_sampler' in checkpoint: self.training_dataloader.data_sampler.load_state_dict(checkpoint['data_sampler']) def get_sparse_tensor_module_names(original_set, loaded_set, original_parameters, loaded_parameters): result = set() for name in original_set: if name in loaded_parameters and name not in loaded_set: continue # parameter existed in previous model and was not sparse result.add(name) for name in loaded_set: if name in original_parameters: result.add(name) # parameter exists in both configs and it was sparse return result if 'sparse_tensor_module_names' in checkpoint: sparse_tensor_module_names = checkpoint['sparse_tensor_module_names'] elif 'csr_tensor_module_names' in checkpoint: sparse_tensor_module_names = checkpoint['csr_tensor_module_names'] else: sparse_tensor_module_names = None if sparse_tensor_module_names is not None: if load_module_strict: self.sparse_tensor_module_names = sparse_tensor_module_names else: self.sparse_tensor_module_names = get_sparse_tensor_module_names( self.sparse_tensor_module_names, sparse_tensor_module_names, dict(self.module.named_parameters()), checkpoint["module"]) self.global_steps = checkpoint['global_steps'] self.global_samples = checkpoint.get('global_samples', self.global_steps * self.train_batch_size()) self.skipped_steps = checkpoint['skipped_steps'] self.loaded_checkpoint_mp_world_size = checkpoint['mp_world_size'] deepspeed_states = [ 'module', 'sparse_tensor_module_names', 'skipped_steps', 'global_steps', 'dp_world_size', 'mp_world_size', 'data_sampler', 'random_ltd' ] client_state = {} if load_lr_scheduler_states: deepspeed_states.append('lr_scheduler') if load_optimizer_states: deepspeed_states.append('optimizer') client_state = {key: value for key, value in checkpoint.items() if not key in deepspeed_states} if not load_optimizer_states and not load_module_only: client_state['optimizer'] = optim_checkpoint['optimizer'] return load_path, client_state def _load_zero_checkpoint(self, load_dir, tag, load_optimizer_states=True): if self.load_universal_checkpoint(): zero_sd_list = None checkpoint_folder = f'{os.path.join(load_dir, tag)}' else: if load_optimizer_states and self.dp_world_size != self.loaded_checkpoint_dp_world_size: raise ZeRORuntimeException("The checkpoint being loaded used a DP " \ f"world size of {self.loaded_checkpoint_dp_world_size} but the " \ f"current world size is {self.dp_world_size}. Automatic adjustment " \ "of ZeRO's optimizer state partitioning with a new world size is not " \ "currently supported.") checkpoint_folder = None zero_sd_list = self._get_all_zero_checkpoints(load_dir, tag) if zero_sd_list is None: return False self.optimizer.load_state_dict(state_dict_list=zero_sd_list, load_optimizer_states=load_optimizer_states, load_from_fp32_weights=self.zero_load_from_fp32_weights(), checkpoint_folder=checkpoint_folder) if self.load_universal_checkpoint(): logger.info(f'loaded universal zero checkpoints from {checkpoint_folder} for rank {self.global_rank}') else: logger.info(f"loading {len(zero_sd_list)} zero partition checkpoints for rank {self.global_rank}") return True def _get_mp_rank_zero_checkpoint_names(self, load_dir, tag, mp_rank, dp_world_size, bf16_mode): zero_ckpt_names = [] for dp_rank in range(dp_world_size): ckpt_name = self._get_rank_zero_ckpt_name(checkpoints_path=load_dir, tag=tag, mp_rank=mp_rank, dp_rank=dp_rank, bf16_mode=bf16_mode) zero_ckpt_names.append(ckpt_name) return zero_ckpt_names def _get_all_zero_checkpoint_names(self, load_dir, tag, bf16_mode): mp_rank = 0 if self.mpu is None else self.mpu.get_model_parallel_rank() zero_ckpt_names = self._get_mp_rank_zero_checkpoint_names(load_dir=load_dir, tag=tag, mp_rank=mp_rank, dp_world_size=self.loaded_checkpoint_dp_world_size, bf16_mode=bf16_mode) for i, ckpt_name in enumerate(zero_ckpt_names): if not os.path.exists(ckpt_name): # transparently handle the old file pattern for optim_states if "optim_states.pt" in ckpt_name: ckpt_name_try = ckpt_name.replace("_optim_states.pt", "optim_states.pt") if os.path.exists(ckpt_name_try): zero_ckpt_names[i] = ckpt_name_try continue return zero_ckpt_names def _get_all_zero_checkpoint_state_dicts(self, zero_ckpt_names): zero_sd_list = [] for i, ckpt_name in enumerate(zero_ckpt_names): _state = None if ckpt_name is None: _state = {OPTIMIZER_STATE_DICT: None} # Fully load state for current rank elif self.zero_elastic_checkpoint() or dist.get_rank(group=self.optimizer.dp_process_group) == i: _state = self.checkpoint_engine.load( ckpt_name, map_location='cpu', ) else: _state = {OPTIMIZER_STATE_DICT: None} zero_sd_list.append(_state) zero_optimizer_sd = [sd[OPTIMIZER_STATE_DICT] for sd in zero_sd_list] logger.info(f"successfully read {len(zero_optimizer_sd)} ZeRO state_dicts for rank {self.global_rank}") return zero_optimizer_sd def _get_all_zero_checkpoints(self, load_dir, tag): for bf16_mode in [self.bfloat16_enabled(), not self.bfloat16_enabled()]: zero_ckpt_names = self._get_all_zero_checkpoint_names(load_dir, tag, bf16_mode) if zero_ckpt_names is not None: # Warn if loading checkpoint of different bit16 type if bf16_mode is not self.bfloat16_enabled(): checkpoint_bit16 = BFLOAT16 if bf16_mode else FP16 engine_bit16 = BFLOAT16 if self.bfloat16_enabled() else FP16 logger.warn(f'Loading {checkpoint_bit16} zero checkpoints into {engine_bit16} training engine') return self._get_all_zero_checkpoint_state_dicts(zero_ckpt_names) return None def _checkpoint_tag_validation(self, tag): if self.checkpoint_tag_validation_enabled(): s_hash = hashlib.sha1(tag.encode()) bhash = torch.ByteTensor([s_hash.digest()]).flatten().to(self.device) max_bhash = bhash.clone() min_bhash = bhash.clone() dist.all_reduce(max_bhash, op=dist.ReduceOp.MAX) dist.all_reduce(min_bhash, op=dist.ReduceOp.MIN) valid = all(min_bhash == bhash) and all(max_bhash == bhash) msg = (f"[rank={dist.get_rank()}] The checkpoint tag name '{tag}' is not consistent across " "all ranks. Including rank unique information in checkpoint tag could cause issues when " "restoring with different world sizes.") if self.checkpoint_tag_validation_fail(): assert valid, msg elif not valid: logger.warning(msg) def save_checkpoint(self, save_dir, tag=None, client_state={}, save_latest=True): """Save training checkpoint Arguments: save_dir: Required. Directory for saving the checkpoint tag: Optional. Checkpoint tag used as a unique identifier for the checkpoint, global step is used if not provided. Tag name must be the same across all ranks. client_state: Optional. State dictionary used for saving required training states in the client code. save_latest: Optional. Save a file 'latest' pointing to the latest saved checkpoint. Important: all processes must call this method and not just the process with rank 0. It is because each process needs to save its master weights and scheduler+optimizer states. This method will hang waiting to synchronize with other processes if it's called just for the process with rank 0. """ if self.zero_optimization_partition_weights(): # Prepare for checkpoint save by ensuring all parameters are partitioned self.optimizer.checkpoint_event_prologue() rank = self.local_rank if self.use_node_local_storage() else self.global_rank # This is to make sure the checkpoint names are created without collision # There seems to be issue creating them in parallel # Ensure save_dir directory exists self.checkpoint_engine.makedirs(save_dir, exist_ok=True) dist.barrier() if tag is None: tag = f"global_step{self.global_steps}" # Ensure tag is a string tag = str(tag) self.checkpoint_engine.create(tag) # Ensure checkpoint tag is consistent across ranks self._checkpoint_tag_validation(tag) if self.has_moe_layers: self.save_non_zero_checkpoint = False self._create_checkpoint_file(save_dir, tag, False) self._save_moe_checkpoint(save_dir, tag, client_state=client_state) # We distribute the task of saving layer checkpoint files among # data parallel instances, so all procs should call _save_checkpoint. # All procs then call module_state_dict(), but only procs of data # parallel rank 0 save the general model params. if not self.has_moe_layers: self._create_checkpoint_file(save_dir, tag, False) self._save_checkpoint(save_dir, tag, client_state=client_state) if self.save_zero_checkpoint: self._create_zero_checkpoint_files(save_dir, tag) self._save_zero_checkpoint(save_dir, tag) if self.zero_optimization_partition_weights(): self.optimizer.checkpoint_event_epilogue() # Save latest checkpoint tag self.checkpoint_engine.commit(tag) if save_latest and rank == 0: with open(os.path.join(save_dir, 'latest'), 'w') as fd: fd.write(tag) dist.barrier() return True def _get_non_moe_state_dict(self, full_state_dict): """ Get the state dict of the non-moe layers """ for key in list(full_state_dict.keys()): if 'expert' in key and 'moe.gate.wg.weight' not in key: full_state_dict.pop(key) return full_state_dict def _save_moe_checkpoint(self, save_dir, tag, client_state={}): save_path = self._get_ckpt_name(save_dir, tag) # A hack to save the checkpointing directory. Pipeline parallelism overrides # module_state_dict() and uses this path to save the model. module_state_dict() # then instead just returns None. # Using layer_#_export_# to save the model's expert state_dict moe_layer_id = 0 for n_module, module in self.module.named_modules(): if isinstance(module, MoE): # and deepspeed.comm.get_rank() == 0: group_name = module.expert_group_name num_local_experts = module.num_local_experts expp_rank = groups._get_expert_parallel_rank(group_name) exp_dp_rank = groups._get_expert_data_parallel_rank(group_name) # print(expp_rank, exp_dp_rank) if exp_dp_rank != 0: moe_layer_id += 1 continue # get all moe parameters moe_state_dict = {} for n, p in module.state_dict().items(): if 'expert' in n and 'moe.gate.wg.weight' not in n: moe_state_dict[n_module + '.' + n] = p moe_str_prefix = '.deepspeed_moe.experts.deepspeed_experts.' # print(moe_state_dict.keys()) # until now, everything is fine. So the bug happens at next few lines # Reorder the moe name rank, so that each checkpoint only has one expert experts_state_dict = defaultdict(dict) for key in list(moe_state_dict.keys()): m = re.match(f".*{moe_str_prefix}([0-9]+).*", key) local_expert_id = None if not m: logger.warn(f'No expert found in key {key}.') else: local_expert_id = m.group(1) global_expert_id = expp_rank * \ num_local_experts + int(local_expert_id) expert_key = key.replace(f'{moe_str_prefix}{local_expert_id}', f'{moe_str_prefix}{global_expert_id}') # truncating extra tensor (shared) storage truncated = moe_state_dict.pop(key).clone().detach() experts_state_dict[str(global_expert_id)][expert_key] = truncated # let save the moe parameters for global_expert_id, expert_state_dict in experts_state_dict.items(): # save the moe parameters moe_save_path = self._get_expert_ckpt_name(save_dir, moe_layer_id, global_expert_id, tag, self.mpu) if self.random_ltd_enabled(): expert_state_dict = remove_random_ltd_state_dict(expert_state_dict) self.checkpoint_engine.save(expert_state_dict, moe_save_path) moe_layer_id += 1 self._curr_ckpt_path = os.path.join(save_dir, tag) largest_group_name = groups._get_max_expert_size_name() expp_rank = groups._get_expert_parallel_rank(largest_group_name) exp_dp_rank = groups._get_expert_data_parallel_rank(largest_group_name) # In the case of E + D parallelism, only the # first expert parallel group should save the expert weights # since each expert parallel group is a copy of the model's experts if exp_dp_rank != 0: return # Save optimizer states. They are different across each exp parallel rank. optimizer_state = { 'optimizer': self.optimizer.state_dict() if self.optimizer and not self.zero_optimization() else None } # TODO: why use BufferedWriter not the path file_path = self._get_optimizer_ckpt_name(save_dir, tag, expp_rank) self.checkpoint_engine.save(optimizer_state, file_path) # get non-moe parameters model_state_dict = self._get_non_moe_state_dict(self.module_state_dict()) if expp_rank == 0: # TODO: update num experts info,.. in checkpoint state = { 'module': model_state_dict, 'lr_scheduler': self.lr_scheduler.state_dict() if self.lr_scheduler is not None else None, 'data_sampler': self.training_dataloader.data_sampler.state_dict() if (self.training_dataloader is not None and self.curriculum_learning_enabled()) else None, 'random_ltd': self.random_ltd_scheduler.state_dict() if self.random_ltd_enabled() else None, 'sparse_tensor_module_names': self.sparse_tensor_module_names, 'skipped_steps': self.skipped_steps, 'global_steps': self.global_steps, 'global_samples': self.global_samples, 'dp_world_size': self.dp_world_size, 'mp_world_size': self.mp_world_size, 'num_experts': self.num_experts } state.update(client_state) logger.info(f'Saving model checkpoint: {save_path}') self.checkpoint_engine.save(state, save_path) self._curr_save_path = None def _create_checkpoint_file(self, save_dir, tag, zero_checkpoint): name_function = (self._get_zero_ckpt_name if zero_checkpoint else self._get_ckpt_name) try: checkpoint_name = name_function(save_dir, tag) path = os.path.dirname(checkpoint_name) self.checkpoint_engine.makedirs(path, exist_ok=True) except: logger.error(f"Failed saving model checkpoint to {save_dir} with tag {tag}") return False return True def _create_zero_checkpoint_files(self, save_dir, tag): success = True # zero checkpoint files are created sequentially for rank in range(self.world_size): if rank == self.global_rank: success = self._create_checkpoint_file(save_dir, tag, True) dist.barrier() return success def _save_checkpoint(self, save_dir, tag, client_state={}): save_path = self._get_ckpt_name(save_dir, tag) zero_optimizer_state = self.zero_optimization() or self.bfloat16_enabled() save_frozen_param = self.zero_optimization_partition_gradients() # A hack to save the checkpointing directory. Pipeline parallelism overrides # module_state_dict() and uses this path to save the model. module_state_dict() # then instead just returns None. The module_state_dict() implementation in # PipelineEngine expects the save path to be set in self._curr_ckpt_path. self._curr_ckpt_path = os.path.join(save_dir, tag) module = self.module_state_dict() self._curr_ckpt_path = None state = dict(module=module, buffer_names=self._get_buffer_names(), optimizer=self.optimizer.state_dict() if self.optimizer and not zero_optimizer_state else None, param_shapes=self._get_zero_param_shapes() if self.optimizer and zero_optimizer_state else None, frozen_param_shapes=self._get_zero_frozen_param_attributes(self._get_param_shape_func) if save_frozen_param else None, shared_params=self._get_shared_params() if self.optimizer and zero_optimizer_state else None, frozen_param_fragments=self._get_zero_frozen_param_attributes(self._get_param_fragment_func) if save_frozen_param else None, lr_scheduler=self.lr_scheduler.state_dict() if self.lr_scheduler is not None else None, data_sampler=self.training_dataloader.data_sampler.state_dict() if (self.training_dataloader is not None and self.curriculum_learning_enabled()) else None, random_ltd=self.random_ltd_scheduler.state_dict() if self.random_ltd_enabled() else None, sparse_tensor_module_names=self.sparse_tensor_module_names, skipped_steps=self.skipped_steps, global_steps=self.global_steps, global_samples=self.global_samples, dp_world_size=self.dp_world_size, mp_world_size=self.mp_world_size, ds_config=self.config, ds_version=version) state.update(client_state) if self.save_non_zero_checkpoint: log_dist(message=f'Saving model checkpoint: {save_path}', ranks=[0, 1]) self.checkpoint_engine.save(state, save_path) def _get_buffer_names(self): buffer_names = [] # we save buffer names so that we could extract later the real buffers from the saved # state_dict["module"] in the non-zero checkpoint - the buffers are already there but they # are intermixed with param placeholders # have to traverse the tree to be able to skip non-persistent buffers def get_layer_named_buffers(module, prefix=""): for name, buf in module.named_buffers(recurse=False): if buf is not None and name not in module._non_persistent_buffers_set: buffer_names.append(prefix + name) for name, child in module.named_children(): if child is not None: get_layer_named_buffers(child, prefix + name + ".") get_layer_named_buffers(self.module, prefix="") return buffer_names def _get_param_shape_func(self, param): return param.ds_shape if hasattr(param, 'ds_id') else param.shape def _get_param_fragment_func(self, param): return param.ds_tensor.detach().cpu() if hasattr(param, 'ds_id') else param.detach().cpu() def _get_zero_frozen_param_attributes(self, attr_func): frozen_param_fragments = OrderedDict() for param in self.module.parameters(): if param.requires_grad: continue if param not in self.param_names: raise ValueError(f"failed to find frozen {param} in named params") name = self.param_names[param] frozen_param_fragments[name] = attr_func(param) return frozen_param_fragments def _get_zero_param_shapes(self): """Returns a dict of name to shape mapping, only for the flattened fp32 weights saved by the optimizer. the names are exactly as in state_dict. The order is absolutely important, since the saved data is just flattened data with no identifiers and requires reconstruction in the same order it was saved. We can't rely on self.module.named_parameters() to get the saved tensors, as some params will be missing and others unsaved and then it'd be impossible to reconstruct state_dict from the flattened weights. optimizer.bit16_groups seems to be the easiest to use as it's in all zeroX versions. """ param_group_shapes = [] cnt = 0 numel = 0 # zero2 started using a round_robin_bit16_groups which is a shuffled version of bit16_groups - # if we don't use it, we get parameters ordered incorrectly if hasattr(self.optimizer, "round_robin_bit16_groups"): bit16_groups = self.optimizer.round_robin_bit16_groups elif self.bfloat16_enabled() and not self.zero_optimization(): bit16_groups = self.optimizer.bf16_groups else: bit16_groups = self.optimizer.bit16_groups if self.zero_optimization_stage( ) == 2 else self.optimizer.fp16_groups for bit16_group in bit16_groups: param_shapes = OrderedDict() for param in bit16_group: cnt += 1 numel += param.ds_numel if hasattr(param, "ds_numel") else param.numel() shape = param.ds_shape if hasattr(param, "ds_shape") else param.shape if param not in self.param_names: raise ValueError(f"failed to find optimizer param in named params") name = self.param_names[param] param_shapes[name] = shape # uncomment to debug zero_to_fp32.py problems # if self.global_rank == 0: print(f"saving param {name} {shape} (numel={shape.numel()})") param_group_shapes.append(param_shapes) # if self.global_rank == 0: print(f"Total saved {numel} numels in {cnt} params") return param_group_shapes def _get_shared_params(self): """ Returns a dict of shared params, which can later be used to reconstruct the original state dict, e.g. in `zero_to_fp32`. Each dict entry is a pair of param names, where the key is the name of the variable that isn't stored and the value is the actual param holding data. """ shared_ds_ids = {} shared_params_by_full_name = {} def get_layer_state_dict(module, prefix=""): # handle params for name, param in module.named_parameters(recurse=False): if param is None or not hasattr(param, "ds_id"): continue key = prefix + name # can't rely on param.data_ptr() as it will be reused as weights gets # gathered and reduced, but param.ds_id is unique across all zero weights # (and shared params will have the same param.ds_id) if param.ds_id in shared_ds_ids: # shared weights #print(f"`{key}` is shared with `{shared_ds_ids[param.ds_id]}`") shared_params_by_full_name[key] = shared_ds_ids[param.ds_id] else: shared_ds_ids[param.ds_id] = key for name, child in module.named_children(): if child is not None: get_layer_state_dict(child, prefix + name + ".") if dist.get_rank() == 0: get_layer_state_dict(self.module, prefix="") return shared_params_by_full_name def _copy_recovery_script(self, save_path): base_dir = os.path.dirname(os.path.dirname(__file__)) script = "zero_to_fp32.py" src = os.path.join(base_dir, "utils", script) dst = os.path.join(save_path, script) #logger.info(f"creating recovery script {dst}") copyfile(src, dst) # make executable os.chmod(dst, os.stat(dst).st_mode | stat.S_IEXEC) def _save_zero_checkpoint(self, save_path, tag): zero_checkpoint_name = self._get_zero_ckpt_name(save_path, tag) zero_sd = dict(optimizer_state_dict=self.optimizer.state_dict(), ds_config=self.config, ds_version=version) self.checkpoint_engine.save(zero_sd, zero_checkpoint_name) if self.global_rank == 0: self._copy_recovery_script(save_path) ckpt_type = 'zero' if self.zero_optimization() else 'bf16_zero' logger.info(f'{ckpt_type} checkpoint saved {zero_checkpoint_name}') def _zero3_consolidated_16bit_state_dict(self): """ Get a full non-partitioned state_dict with fp16 weights on cpu. Important: this function must be called on all ranks and not just rank 0. This is similar to nn.Module.state_dict (modelled after _save_to_state_dict), but: 1. consolidates the weights from different partitions on gpu0 2. works on one layer at a time to require as little gpu0 memory as possible, by moving the already consolidated weights to cpu 3. takes care to keep the shared params shared when gradually copying the params to cpu Returns: a consolidated fp16 ``state_dict`` on cpu on rank 0, ``None`` on other ranks """ if not self.zero_optimization_partition_weights(): raise ValueError("this function requires ZeRO-3 mode") state_dict = OrderedDict() if dist.get_rank() == 0 else None shared_params = {} def get_layer_state_dict(module, prefix=""): # gather one layer at a time to be memory-efficient # must use modifier_rank=0 to release GPU memory after each layer gathered #see_memory_usage("before GatheredParameters", force=True) with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0): if dist.get_rank() == 0: # handle params for name, param in module.named_parameters(recurse=False): if param is None: continue key = prefix + name # can't rely on param.data_ptr() as it will be reused as weights gets # gathered and reduced, but param.ds_id is unique across all zero weights # (and shared params will have the same param.ds_id) if param.ds_id in shared_params: # shared weights #print(f"`{key}` is shared with `{shared_params[param.ds_id]}`") state_dict[key] = state_dict[shared_params[param.ds_id]] else: state_dict[key] = param.detach().cpu() shared_params[param.ds_id] = key #print(f"param {param.ds_id} {param.shape} {key} ") # now buffers - not sure if need to take care of potentially shared weights here for name, buf in module.named_buffers(recurse=False): if (buf is not None and name not in module._non_persistent_buffers_set): state_dict[prefix + name] = buf.detach().cpu() #see_memory_usage("after GatheredParameters", force=True) for name, child in module.named_children(): if child is not None: get_layer_state_dict(child, prefix + name + ".") # Prepare for checkpoint save by ensuring all parameters are partitioned self.optimizer.checkpoint_event_prologue() see_memory_usage("before get_layer_state_dict", force=False) get_layer_state_dict(self.module, prefix="") see_memory_usage("after get_layer_state_dict", force=False) self.optimizer.checkpoint_event_epilogue() return state_dict def save_fp16_model(self, save_dir, save_filename="pytorch_model.bin"): """has been renamed to save_16bit_model, keeping this around for backwards compatibility""" return self.save_16bit_model(save_dir, save_filename) def save_16bit_model(self, save_dir, save_filename="pytorch_model.bin"): """ Save 16bit model weights This method saves the 16bit model weights at the desired destination. Arguments: save_dir: Required. Directory for saving the model save_filename: Optional. Filename to save to. Defaults to ``pytorch_model.bin`` Returns: ``True`` when a model has been saved, ``False`` otherwise. It will not be saved if stage3_gather_16bit_weights_on_model_save is ``False``. Important: all processes must call this method and not just the process with rank 0. It is because the processes need to work in sync to gather the weights. This method will hang waiting to synchronize with other processes if it's called just for the process with rank 0. """ path = os.path.join(save_dir, save_filename) if self.zero_optimization_partition_weights(): if self.zero_gather_16bit_weights_on_model_save(): # consolidation is expensive in time and memory and therefore isn't a default state_dict = self._zero3_consolidated_16bit_state_dict() else: # the model will be bogus if not consolidated so don't confuse the user by saving it logger.info( f"Did not save the model {path} because `stage3_gather_16bit_weights_on_model_save` is False") return False else: state_dict = self.module.state_dict() tag = f"global_step{self.global_steps}" tag = str(tag) self.checkpoint_engine.create(tag) if dist.get_rank() == 0: self.checkpoint_engine.makedirs(save_dir, exist_ok=True) logger.info(f"Saving model weights to {path}, tag: {tag}") self.checkpoint_engine.save(state_dict, path) self.checkpoint_engine.commit(tag) return True def empty_partition_cache(self): """ Release GPU memory consumed by offloaded model parameters. """ if hasattr(self.optimizer, 'empty_partition_cache'): self.optimizer.empty_partition_cache() gc.collect() get_accelerator().empty_cache()
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ Copyright NVIDIA/Megatron Helper functions and classes from multiple sources. """ from collections.abc import Iterable from deepspeed.moe.utils import is_moe_param import os import psutil import gc from math import sqrt from math import floor from bisect import bisect_left import torch from deepspeed import comm as dist try: from torch._six import inf except ModuleNotFoundError: from torch import inf from deepspeed.utils import groups, logger from deepspeed.runtime.constants import PIPE_REPLICATED from numpy import prod from deepspeed.accelerator import get_accelerator from deepspeed.module_inject.policy import transpose from torch.nn import functional as F torch_memory_reserved = get_accelerator().memory_reserved torch_max_memory_reserved = get_accelerator().max_memory_reserved class DummyOptim(): """ Dummy optimizer presents model parameters as a param group, this is primarily used to allow ZeRO-3 without an optimizer """ def __init__(self, params): self.param_groups = [] self.param_groups.append({'params': params}) def noop_decorator(func): return func def ensure_directory_exists(filename): """Create the directory path to ``filename`` if it does not already exist. Args: filename (str): A file path. """ dirname = os.path.dirname(filename) os.makedirs(dirname, exist_ok=True) def set_random_seed(seed): """Set the random seed for common PRNGs used during training: random, numpy, and torch. Args: seed (int): the seed to use """ import numpy import random random.seed(seed) numpy.random.seed(seed) torch.manual_seed(seed) def is_model_parallel_parameter(p) -> bool: if hasattr(p, 'model_parallel') and p.model_parallel: return True if hasattr(p, 'tensor_model_parallel') and p.tensor_model_parallel: return True return False def bwc_tensor_model_parallel_rank(mpu=None): """Backwards-compatible way of querying the tensor model parallel rank from an ``mpu`` object. *Tensor* model parallelism means that tensors are physically split across processes. This contrasts with *pipeline* model parallelism, in which the layers are partitioned but tensors left intact. The API for tensor model parallelism has changed across versions and this helper provides a best-effort implementation across versions of ``mpu`` objects. The preferred mechanism is ``mpu.get_tensor_model_parallel_rank()``. This should "just work" with both Megatron-LM and DeepSpeed's pipeline parallelism. Args: mpu (model parallel unit, optional): The tensor model parallel rank. If ``mpu=None``, returns 0. Defaults to ``None``. Returns: int: the rank """ if mpu is None: # No model parallelism in easy :) return 0 if hasattr(mpu, 'get_tensor_model_parallel_rank'): # New Megatron and DeepSpeed convention (post pipeline-parallelism release) return mpu.get_tensor_model_parallel_rank() elif hasattr(mpu, 'get_slice_parallel_rank'): # Some DeepSpeed + pipeline parallelism versions return mpu.get_slice_parallel_rank() else: # Deprecated Megatron and DeepSpeed convention return mpu.get_model_parallel_rank() def copy_to_device(item, device, criterion_func): """ Return a copy of tensor on specified device. Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts. Parameters: item: tensor to copy or (possibly nested) container of tensors to copy. device: target device criterion_func: Function to restrict copy operation to items meet criterion Returns: None """ if criterion_func(item): return item.to(device) elif isinstance(item, list): return [copy_to_device(v, device, criterion_func) for v in item] elif isinstance(item, tuple): return tuple([copy_to_device(v, device, criterion_func) for v in item]) elif isinstance(item, dict): return {k: copy_to_device(v, device, criterion_func) for k, v in item.items()} else: return item def move_to_device(item, device, criterion_func): """ Move tensor on to specified device by changing the storage. Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts. Parameters: item: tensor to move or (possibly nested) container of tensors to move. device: target device criterion_func: Function to restrict move operation to items meet criterion Returns: None """ if criterion_func(item): device_copy = item.to(device) item.data = device_copy.data return item elif isinstance(item, list): return [move_to_device(v, device, criterion_func) for v in item] elif isinstance(item, tuple): return tuple([move_to_device(v, device, criterion_func) for v in item]) elif isinstance(item, dict): return {k: move_to_device(v, device, criterion_func) for k, v in item.items()} else: return item class CheckOverflow(object): '''Checks for overflow in gradient across parallel process''' def __init__(self, param_groups=None, mpu=None, zero_reduce_scatter=False, deepspeed=None): self.mpu = mpu self.params = [] if param_groups else None self.zero_reduce_scatter = zero_reduce_scatter self.deepspeed = deepspeed self.has_moe_params = False if param_groups: for group in param_groups: for param in group: self.params.append(param) if is_moe_param(param): self.has_moe_params = True def check_using_norm(self, norm_group, reduce_overflow=True): # TODO: I don't think reduce_overflow is needed if mpu is None overflow = -1 in norm_group overflow_gpu = get_accelerator().FloatTensor([overflow]) if self.has_moe_params: # In this case, we need to do an all_reduce across # the expert_parallel_group, so that if there was # an overflow due to expert weights, we detect it # Only need to check groups.get_largest_expert_parallel_group() dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=groups._get_max_expert_parallel_group()) if self.mpu is not None: dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.mpu.get_model_parallel_group()) elif reduce_overflow: dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX) dist.barrier() overflow = overflow_gpu[0].item() return bool(overflow) def check(self, param_groups=None): params = [] has_moe_params = False if param_groups is None: params = self.params has_moe_params = self.has_moe_params else: assert param_groups is not None, \ "self.params and param_groups both cannot be none" for group in param_groups: for param in group: params.append(param) if is_moe_param(param): has_moe_params = True return self.has_overflow(params, has_moe_params=has_moe_params) # `params` is a list / generator of torch.Variable def has_overflow_serial(self, params): for i, p in enumerate(params): if p.grad is not None and self._has_inf_or_nan(p.grad.data, i): return True return False def has_overflow(self, params, has_moe_params=None): if has_moe_params is None: has_moe_params = self.has_moe_params overflow = self.has_overflow_serial(params) # Since each model parallel GPU carries only part of the model, # make sure overflow flag is synced across all the model parallel GPUs overflow_gpu = get_accelerator().ByteTensor([overflow]) # deepspeed.comm.all_reduce(overflow_gpu, # op=deepspeed.comm.ReduceOp.MAX, # group=mpu.get_model_parallel_group()) if has_moe_params: # All reduce this across expert_parallel_group, so that if an expert # overflows, we detect it here dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=groups._get_max_expert_parallel_group()) if self.zero_reduce_scatter: dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=dist.get_world_group()) elif self.mpu is not None: if self.deepspeed is not None: using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce') if (using_pipeline and self.deepspeed.pipeline_enable_backward_allreduce is False) or ( not using_pipeline and self.deepspeed.enable_backward_allreduce is False): dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.mpu.get_data_parallel_group()) dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.mpu.get_model_parallel_group()) elif self.deepspeed is not None and self.deepspeed.enable_backward_allreduce is False: dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=dist.get_world_group()) overflow = overflow_gpu[0].item() return bool(overflow) # `x` is a torch.Tensor @staticmethod def _has_inf_or_nan(x, i): try: # if x is half, the .float() incurs an additional deep copy, but it's necessary if # Pytorch's .sum() creates a one-element tensor of the same type as x # (which is true for some recent version of pytorch). cpu_sum = float(x.float().sum()) # More efficient version that can be used if .sum() returns a Python scalar # cpu_sum = float(x.sum()) except RuntimeError as instance: # We want to check if inst is actually an overflow exception. # RuntimeError could come from a different error. # If so, we still want the exception to propagate. if "value cannot be converted" not in instance.args[0]: raise return True else: if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum: return True return False def _handle_overflow(cpu_sum, x, i): import math rank = dist.get_rank() if rank == 0: t_i = -1 for v_i, v in enumerate(x.data.contiguous().view(-1)): if not math.isfinite(float(v)): t_i = v_i break logger.info(f"rank {rank} detected overflow {cpu_sum} in tensor {i}:{t_i} shape {x.shape}") def get_global_norm(norm_list): """ Compute total from a list of norms """ total_norm = 0.0 for norm in norm_list: total_norm += norm**2.0 # logger.info(f'norm_list = {norm_list} global = {sqrt(total_norm)}') return sqrt(total_norm) def clip_grad_norm_(parameters, max_norm, norm_type=2, mpu=None): """Clips gradient norm of an iterable of parameters. This has been adapted from Nvidia megatron. We add norm averaging to consider MoE params when calculating norm as they will result in different norms across different ranks. This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and added functionality to handle model parallel parameters. Note that the gradients are modified in place. Arguments: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the parameters (viewed as a single vector). """ if isinstance(parameters, torch.Tensor): parameters = [parameters] parameters = list(filter(lambda p: p.grad is not None, parameters)) max_norm = float(max_norm) norm_type = float(norm_type) if norm_type == inf: total_norm = max(p.grad.data.abs().max() for p in parameters) total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) # Take max across all GPUs. if mpu is not None: dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group()) total_norm = total_norm_cuda[0].item() else: total_norm = 0 for p in parameters: if mpu is not None: if (mpu.get_model_parallel_rank() == 0) or is_model_parallel_parameter(p): param_norm = p.grad.data.norm(norm_type) total_norm += param_norm.item()**norm_type else: param_norm = p.grad.data.float().norm(norm_type) total_norm += param_norm.item()**norm_type # Sum across all model parallel GPUs. total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) if mpu is not None: dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group()) total_norm = total_norm_cuda[0].item()**(1. / norm_type) # Need to average total_norm across different GPUs due to the presence of moe params pg = groups._get_data_parallel_group() scaled_norm = total_norm * 1.0 / float(dist.get_world_size(group=pg)) scaled_norm_tensor = get_accelerator().FloatTensor([float(scaled_norm)]) dist.all_reduce(scaled_norm_tensor, group=pg) total_norm = scaled_norm_tensor.item() clip_coef = max_norm / (total_norm + 1e-6) if clip_coef < 1: for p in parameters: p.grad.data.mul_(clip_coef) return total_norm def get_grad_norm(parameters, norm_type=2, mpu=None): """Get grad norm of an iterable of parameters. This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and added functionality to handle model parallel parameters. Note that the gradients are modified in place. Taken from Nvidia Megatron. Arguments: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the parameters (viewed as a single vector). """ if isinstance(parameters, torch.Tensor): parameters = [parameters] parameters = list(filter(lambda p: p.grad is not None, parameters)) norm_type = float(norm_type) if norm_type == inf: total_norm = max(p.grad.data.abs().max() for p in parameters) total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) # Take max across all GPUs. if mpu is not None: dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group()) total_norm = total_norm_cuda[0].item() else: total_norm = 0. tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=mpu) for p in parameters: # Pipeline parallelism may replicate parameters. Avoid multi-counting. if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated: continue # Filter to avoid over-counting replicated tensors from tensor # model parallelism if (tensor_mp_rank > 0) and not is_model_parallel_parameter(p): continue param_norm = p.grad.data.float().norm(norm_type) total_norm += param_norm.item()**norm_type # Sum across all model parallel GPUs. total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) if mpu is not None: dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group()) total_norm = total_norm_cuda[0].item()**(1. / norm_type) if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm: total_norm = -1 return total_norm def get_grad_zeros(parameters, mpu=None): """Compute the number of grads with zero values. This is adapted from get_grad_norm Arguments: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized Returns: Total number of params with zero values (viewed as a single vector). """ if isinstance(parameters, torch.Tensor): parameters = [parameters] parameters = list(filter(lambda p: p.grad is not None, parameters)) total_zeros = 0. tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=mpu) for p in parameters: # Pipeline parallelism may replicate parameters. Avoid multi-counting. if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated: continue # Filter to avoid over-counting replicated tensors from tensor # model parallelism if (tensor_mp_rank > 0) and not is_model_parallel_parameter(p): continue count_zeros = p.grad.numel() - torch.count_nonzero(p.grad) total_zeros += count_zeros.item() # Sum across all model parallel GPUs. total_zeros_cuda = get_accelerator().FloatTensor([float(total_zeros)]) if mpu is not None: dist.all_reduce(total_zeros_cuda, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group()) total_zeros = total_zeros_cuda[0].item() return total_zeros def get_weight_norm(parameters, norm_type=2, mpu=None): """Get norm of an iterable of parameters. This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and added functionality to handle model parallel parameters. Note that the gradients are modified in place. Taken from Nvidia Megatron. Arguments: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the parameters (viewed as a single vector). """ if isinstance(parameters, torch.Tensor): parameters = [parameters] norm_type = float(norm_type) if norm_type == inf: total_norm = max(p.data.abs().max() for p in parameters) total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) # Take max across all GPUs. if mpu is not None: dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group()) total_norm = total_norm_cuda[0].item() else: total_norm = 0. tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=mpu) for p in parameters: # Pipeline parallelism may replicate parameters. Avoid multi-counting. if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated: continue # Filter to avoid over-counting replicated tensors from tensor # model parallelism if (tensor_mp_rank > 0) and not is_model_parallel_parameter(p): continue param_norm = p.data.float().norm(norm_type) total_norm += param_norm**norm_type # Sum across all model parallel GPUs. total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) if mpu is not None: dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group()) total_norm = total_norm_cuda[0].item()**(1. / norm_type) if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm: total_norm = -1 return total_norm def prefix_sum_inc(weights): """ Compute an inclusive prefix sum. Example: >>> prefix_sum_inc([3,4,5]) [3, 7, 12] """ weights_ = [w for w in weights] for x in range(1, len(weights_)): weights_[x] += weights_[x - 1] return weights_ def partition_uniform(num_items, num_parts): parts = [0] * (num_parts + 1) # First check for the trivial edge case if num_items <= num_parts: for p in range(num_parts + 1): parts[p] = min(p, num_items) return parts chunksize = floor(num_items / num_parts) for p in range(num_parts): parts[p] = min(chunksize * p, num_items) parts[num_parts] = num_items return parts def _lprobe(weights, num_parts, bottleneck): num_items = len(weights) total_weight = weights[-1] # initialize partitioning parts = [0] * (num_parts + 1) for p in range(1, num_parts + 1): parts[p] = num_items bsum = bottleneck # running sum of target weight for pth partition chunksize = num_items // num_parts step = chunksize for p in range(1, num_parts): # Jump to the next bucket while (step < num_items) and (weights[step] < bsum): step += chunksize # Find the end index of partition p parts[p] = bisect_left(weights, bsum, lo=step - chunksize, hi=min(step, num_items)) # Nothing more to partition, return early if parts[p] == num_items: # See if the current partition is overweight. part_size = weights[-1] - weights[parts[p - 1]] return parts, part_size < bottleneck # Next partition target bsum = weights[parts[p] - 1] + bottleneck return parts, bsum >= total_weight def _rb_partition_balanced(weights, num_parts, eps): total_weight = weights[-1] lower = total_weight / num_parts # best case heaviest partition upper = total_weight # worst case heaviest partition # Do a binary search for the best partitioning while upper > lower + eps: mid = lower + ((upper - lower) / 2) parts, success = _lprobe(weights, num_parts, mid) if success: upper = mid else: lower = mid + eps return upper def partition_balanced(weights, num_parts, eps=1e-3): num_items = len(weights) # First check for the trivial edge case if num_items <= num_parts: return partition_uniform(num_items, num_parts) weights_ = prefix_sum_inc(weights) # Find the smallest bottleneck (weight of heaviest partition) bottleneck = _rb_partition_balanced(weights_, num_parts, eps=eps) # Now compute that partitioning parts, success = _lprobe(weights_, num_parts, bottleneck) assert success return parts class PartitionedTensor: def __init__(self, tensor, group, partition_meta=None): super().__init__() self.group = group self.num_parts = dist.get_world_size(group=self.group) self.rank = dist.get_rank(group=self.group) self.orig_size = list(tensor.size()) self.orig_device = tensor.device self.local_data, self.partition = self._partition_tensor(tensor) @classmethod def from_meta(cls, meta, local_part, group, device=get_accelerator().device_name()): assert meta.dtype == torch.long dummy = torch.ones(dist.get_world_size(group=group)) part_obj = cls(tensor=dummy, group=group) meta = meta.tolist() # [N, list0, ..., listN-1] part_obj.orig_size = meta[1:(1 + meta[0])] meta = meta[1 + meta[0]:] part_obj.orig_device = device part_obj.local_data = local_part.detach() part_obj.group = group # Partition is encoded like the rowptr of a CSR matrix: # [num_parts, rank, 0, part_1, ..., part_num_parts] # TODO: support shuffle between different partition granularities assert part_obj.num_parts == meta[0] assert part_obj.rank == meta[1] part_obj.partition = meta[2:] # length num_parts+1 return part_obj def _partition_tensor(self, tensor): partition = partition_uniform(num_items=tensor.numel(), num_parts=self.num_parts) start = partition[self.rank] length = partition[self.rank + 1] - start tensor_part = tensor.detach().contiguous().view(-1).narrow(0, start=start, length=length).clone() return tensor_part, partition def full(self, device=None): if device is None: device = self.orig_device # Allocate the full tensor as a flat buffer. full_numel = prod(self.full_size()) flat_tensor = torch.zeros([full_numel], dtype=self.local_data.dtype, device=device) # Prepare all-gather buffer partition_tensors = [] for part_id in range(self.num_parts): part_size = self.partition[part_id + 1] - self.partition[part_id] buf = flat_tensor.narrow(0, start=self.partition[part_id], length=part_size) if part_id == self.rank: buf.copy_(self.local_data) partition_tensors.append(buf) # Collect the full tensor dist.all_gather(partition_tensors, partition_tensors[self.rank], group=self.group) for i in range(len(partition_tensors)): partition_tensors[i].data = torch.zeros(1) partition_tensors[i] = None return flat_tensor.view(self.full_size()).clone().detach() def to_meta(self): """Returns a torch.LongTensor that encodes partitioning information. Can be used along with ``data()`` to serialize a ``PartitionedTensor`` for communication. Returns: torch.LongTensor: a tensor encoding the meta-information for the partitioning """ meta = [] meta.append(len(self.orig_size)) meta += list(self.orig_size) meta.append(self.num_parts) meta.append(self.rank) meta += self.partition return torch.LongTensor(data=meta).to(self.orig_device) def data(self): return self.local_data def local_size(self): return self.local_data.size() def full_size(self): return self.orig_size mem_alloced = 0 mem_cached = 0 def memory_status(msg, print_rank=-1, reset_max=False): global mem_alloced, mem_cached rank = dist.get_rank() if print_rank != -1 and rank != print_rank: return get_accelerator().synchronize() if reset_max: get_accelerator().reset_max_memory_cached() get_accelerator().reset_max_memory_allocated() new_alloced = get_accelerator().memory_allocated() new_cached = get_accelerator().memory_cached() delta_alloced = new_alloced - mem_alloced delta_cached = new_cached - mem_cached mem_cached = new_cached mem_alloced = new_alloced max_alloced = get_accelerator().max_memory_allocated() max_cached = get_accelerator().max_memory_cached() # convert to GB for printing new_alloced /= 1024**3 new_cached /= 1024**3 delta_alloced /= 1024**3 delta_cached /= 1024**3 max_alloced /= 1024**3 max_cached /= 1024**3 print( f'RANK={rank} MEMSTATS', msg, f'device={get_accelerator().current_device_name()} ' f'current alloc={new_alloced:0.4f}GB (delta={delta_alloced:0.4f}GB max={max_alloced:0.4f}GB) ' f'current cache={new_cached:0.4f}GB (delta={delta_cached:0.4f}GB max={max_cached:0.4f}GB)') def get_ma_status(): if dist.is_initialized() and not dist.get_rank() == 0: return 0 return get_accelerator().memory_allocated() def empty_cache(): get_accelerator().empty_cache() get_accelerator().reset_peak_memory_stats() def see_memory_usage(message, force=False): if not force: return if dist.is_initialized() and not dist.get_rank() == 0: return # python doesn't do real-time garbage collection so do it explicitly to get the correct RAM reports gc.collect() # Print message except when distributed but not rank 0 logger.info(message) logger.info(f"MA {round(get_accelerator().memory_allocated() / (1024 * 1024 * 1024),2 )} GB \ Max_MA {round(get_accelerator().max_memory_allocated() / (1024 * 1024 * 1024),2)} GB \ CA {round(torch_memory_reserved() / (1024 * 1024 * 1024),2)} GB \ Max_CA {round(torch_max_memory_reserved() / (1024 * 1024 * 1024))} GB ") vm_stats = psutil.virtual_memory() used_GB = round(((vm_stats.total - vm_stats.available) / (1024**3)), 2) logger.info(f'CPU Virtual Memory: used = {used_GB} GB, percent = {vm_stats.percent}%') # get the peak memory to report correct data, so reset the counter for the next call get_accelerator().reset_peak_memory_stats() def call_to_str(base, *args, **kwargs): """Construct a string representation of a call. Args: base (str): name of the call args (tuple, optional): args to ``base`` kwargs (dict, optional): kwargs supplied to ``base`` Returns: str: A string representation of base(*args, **kwargs) """ name = f'{base}(' if args: name += ', '.join(repr(arg) for arg in args) if kwargs: name += ', ' if kwargs: name += ', '.join(f'{key}={repr(arg)}' for key, arg in kwargs.items()) name += ')' return name def get_only_unique_item(items): item_set = set(items) if len(item_set) != 1: raise RuntimeError(f"expected there to be only one unique element in {items}") unique_item, = item_set return unique_item def clip_gradients(parameters, max_norm=1.0, global_grad_norm=None, mpu=None, eps=1e-6): """Clip the gradient of a list of parameters. Args: parameters: List of parameters whose .grad will be clipped. global_grad_norm (float, optional): Precomputed gradient norm. Defaults to None. mpu (optional): model parallelism unit. Defaults to None. eps (float, optional): epsilon value added to grad norm. Defaults to 1e-6 Returns: float: the global gradient norm """ if global_grad_norm is None: global_grad_norm = get_grad_norm(parameters, mpu=mpu) clip_coef = max_norm / (global_grad_norm + eps) if clip_coef < 1: for p in parameters: p.grad.detach().mul_(clip_coef) return global_grad_norm def get_global_norm_of_tensors(input_tensors, norm_type=2, mpu=None): """Get norm of an iterable of tensors. This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and added functionality to handle model parallel parameters. Taken from Nvidia Megatron. Arguments: input_tensors (Iterable[Tensor]): an iterable of Tensors will have norm computed norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the tensors (viewed as a single vector). """ assert isinstance(input_tensors, Iterable), f'expected Iterable type not {type(input_tensors)}' assert all([torch.is_tensor(t) for t in input_tensors]), f'expected list of only tensors' norm_type = float(norm_type) if norm_type == inf: total_norm = max(t.data.abs().max() for t in input_tensors) total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) if mpu is not None: dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group()) total_norm = total_norm_cuda[0].item() else: total_norm = sum([t.data.float().norm(norm_type).item()**norm_type for t in input_tensors]) total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) if mpu is not None: dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group()) total_norm = total_norm_cuda[0].item()**(1. / norm_type) if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm: total_norm = -1 return total_norm def clip_tensors_by_global_norm(input_tensors, max_norm=1.0, global_norm=None, mpu=None, eps=1e-6): """Clip list of tensors by global norm. Args: input_tensors: List of tensors to be clipped global_norm (float, optional): Precomputed norm. Defaults to None. mpu (optional): model parallelism unit. Defaults to None. eps (float, optional): epsilon value added to grad norm. Defaults to 1e-6 Returns: float: the global norm """ if global_norm is None: global_norm = get_global_norm_of_tensors(input_tensors, mpu=mpu) clip_coef = max_norm / (global_norm + eps) if clip_coef < 1: for t in input_tensors: t.detach().mul_(clip_coef) return global_norm def align_dense_tensors(tensor_list, alignment): num_elements = sum(t.numel() for t in tensor_list) remaining = num_elements % alignment if remaining: elements_to_add = alignment - remaining pad_tensor = torch.zeros(elements_to_add, device=tensor_list[0].device, dtype=tensor_list[0].dtype) padded_tensor_list = tensor_list + [pad_tensor] else: padded_tensor_list = tensor_list return padded_tensor_list def all_gather_dp_groups(partitioned_param_groups, dp_process_group, start_alignment_factor, allgather_bucket_size): for group_id, partitioned_params in enumerate(partitioned_param_groups): # Sequential AllGather Best of both worlds partition_id = dist.get_rank(group=dp_process_group[group_id]) dp_world_size = dist.get_world_size(group=dp_process_group[group_id]) num_shards = max(1, partitioned_params[partition_id].numel() * dp_world_size // allgather_bucket_size) shard_size = partitioned_params[partition_id].numel() // num_shards # Enforce nccl/rccl alignment of start location of each shard shard_size = shard_size - (shard_size % start_alignment_factor) num_elements = shard_size assert shard_size * num_shards <= partitioned_params[partition_id].numel() for shard_id in range(num_shards): if shard_id == (num_shards - 1): num_elements = partitioned_params[partition_id].numel() - shard_id * shard_size shard_list = [] for dp_id in range(dp_world_size): curr_shard = partitioned_params[dp_id].narrow(0, shard_id * shard_size, num_elements).detach() shard_list.append(curr_shard) dist.all_gather(shard_list, shard_list[partition_id], dp_process_group[group_id]) class TLinear(torch.nn.Linear): def __init__(self, orig_layer, name=""): self.name = name super().__init__(orig_layer.weight.shape[1], orig_layer.weight.shape[0], bias=(orig_layer.bias is not None)) self.weight.data = transpose(orig_layer.weight.data) self.bias = orig_layer.bias self._fwd_func = self._fwd_bias_add if self.bias is not None else self._fwd def _fwd(self, input): return F.linear(input, self.weight) def _fwd_bias_add(self, input): return F.linear(input, self.weight, bias=self.bias) def forward(self, input): return self._fwd_func(input) def get_inactive_params(param_list): from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus return [param for param in param_list if (hasattr(param, 'ds_id') and \ param.ds_status == ZeroParamStatus.NOT_AVAILABLE)]
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ Implementation of a compressed sparse tensor. Similar in functionality to TensorFlow's IndexedSlices implementation. """ import torch class SparseTensor(object): """ Compressed Sparse Tensor """ def __init__(self, dense_tensor=None): self.orig_dense_tensor = dense_tensor self.is_sparse = dense_tensor.is_sparse if dense_tensor is not None: if dense_tensor.is_sparse: dense_tensor = dense_tensor.coalesce() self.indices = dense_tensor.indices().flatten() self.values = dense_tensor.values() else: result = torch.sum(dense_tensor, dim=1) self.indices = result.nonzero().flatten() self.values = dense_tensor[self.indices] self.dense_size = list(dense_tensor.size()) else: self.indices = None self.values = None self.dense_size = None def to_coo_tensor(self): return torch.sparse_coo_tensor(self.indices.unsqueeze(0), self.values, self.dense_size) @staticmethod def type(): return "deepspeed.SparseTensor" def to_dense(self): it = self.indices.unsqueeze(1) full_indices = torch.cat([it for _ in range(self.dense_size[1])], dim=1) return self.values.new_zeros(self.dense_size).scatter_add_(0, full_indices, self.values) def sparse_size(self): index_size = list(self.indices.size()) index_size = index_size[0] value_size = list(self.values.size()) value_size = value_size[0] * value_size[1] dense_size = self.dense_size[0] * self.dense_size[1] return index_size + value_size, dense_size def add(self, b): assert self.dense_size == b.dense_size self.indices = torch.cat([self.indices, b.indices]) self.values = torch.cat([self.values, b.values]) def __str__(self): sparse_size, dense_size = self.sparse_size() return "DeepSpeed.SparseTensor(indices_size={}, values_size={}, " \ "dense_size={}, device={}, reduction_factor={})".format( self.indices.size(), self.values.size(), self.dense_size, self.indices.get_device(), dense_size / sparse_size ) def __repr__(self): return self.__str__()
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from torch.utils.data import DataLoader, RandomSampler from torch.utils.data.distributed import DistributedSampler from deepspeed.accelerator import get_accelerator from deepspeed.runtime.data_pipeline.data_sampling.data_sampler import DeepSpeedDataSampler from deepspeed.runtime.data_pipeline.constants import CURRICULUM_LEARNING, \ DATA_EFFICIENCY, DATA_SAMPLING_NUM_WORKERS from deepspeed.runtime.constants import GRADIENT_ACCUMULATION_STEPS, \ DATA_PARALLEL_GROUP, GLOBAL_RANK class RepeatingLoader: def __init__(self, loader): """Wraps an iterator to allow for infinite iteration. This is especially useful for DataLoader types that we wish to automatically restart upon completion. Args: loader (iterator): The data loader to repeat. """ self.loader = loader self.data_iter = iter(self.loader) def __iter__(self): return self def __next__(self): try: batch = next(self.data_iter) except StopIteration: self.data_iter = iter(self.loader) batch = next(self.data_iter) return batch class DeepSpeedDataLoader(object): def __init__(self, dataset, batch_size, pin_memory, local_rank, tput_timer, collate_fn=None, num_local_io_workers=None, data_sampler=None, data_parallel_world_size=None, data_parallel_rank=None, dataloader_drop_last=False, deepspeed_dataloader_config={}): self.deepspeed_dataloader_config = deepspeed_dataloader_config self.tput_timer = tput_timer self.batch_size = batch_size self.curriculum_learning_enabled = False if CURRICULUM_LEARNING in deepspeed_dataloader_config: self.curriculum_learning_enabled = deepspeed_dataloader_config[CURRICULUM_LEARNING] if self.curriculum_learning_enabled: data_sampler = DeepSpeedDataSampler(self.deepspeed_dataloader_config[DATA_EFFICIENCY], len(dataset), self.batch_size, data_parallel_rank, data_parallel_world_size, self.deepspeed_dataloader_config[DATA_PARALLEL_GROUP], self.deepspeed_dataloader_config[GRADIENT_ACCUMULATION_STEPS], self.deepspeed_dataloader_config[GLOBAL_RANK], drop_last=dataloader_drop_last) device_count = get_accelerator().device_count() num_local_io_workers = self.deepspeed_dataloader_config[DATA_SAMPLING_NUM_WORKERS] else: if local_rank >= 0: if data_sampler is None: data_sampler = DistributedSampler(dataset=dataset, num_replicas=data_parallel_world_size, rank=data_parallel_rank) device_count = 1 else: if data_sampler is None: data_sampler = RandomSampler(dataset) device_count = get_accelerator().device_count() batch_size *= device_count if num_local_io_workers is None: num_local_io_workers = 2 * device_count self.num_local_io_workers = num_local_io_workers self.data_sampler = data_sampler self.dataset = dataset self.collate_fn = collate_fn self.device_count = device_count self.batch_size = batch_size self.pin_memory = pin_memory self.data = None self.dataloader_drop_last = dataloader_drop_last self.post_process_func = None if self.dataloader_drop_last: self.len = len(self.data_sampler) // self.batch_size else: from math import ceil self.len = ceil(len(self.data_sampler) / self.batch_size) def __iter__(self): self._create_dataloader() return self def __len__(self): return self.len def __next__(self): if self.tput_timer: self.tput_timer.start() if self.curriculum_learning_enabled: data = next(self.data_iterator) if self.post_process_func is not None: data = self.post_process_func(data, self.data_sampler.state_dict()) return data else: return next(self.data) def _create_dataloader(self): if self.curriculum_learning_enabled: if self.collate_fn is None: self.dataloader = DataLoader(self.dataset, pin_memory=self.pin_memory, batch_sampler=self.data_sampler, num_workers=self.num_local_io_workers) else: self.dataloader = DataLoader(self.dataset, pin_memory=self.pin_memory, batch_sampler=self.data_sampler, collate_fn=self.collate_fn, num_workers=self.num_local_io_workers) self.data_iterator = iter(self.dataloader) return self.dataloader else: if self.collate_fn is None: self.dataloader = DataLoader(self.dataset, batch_size=self.batch_size, pin_memory=self.pin_memory, sampler=self.data_sampler, num_workers=self.num_local_io_workers, drop_last=self.dataloader_drop_last) else: self.dataloader = DataLoader(self.dataset, batch_size=self.batch_size, pin_memory=self.pin_memory, sampler=self.data_sampler, collate_fn=self.collate_fn, num_workers=self.num_local_io_workers, drop_last=self.dataloader_drop_last) self.data = (x for x in self.dataloader) return self.dataloader # DataLoader([(torch.randn(3, 3), torch.tensor(i % 2)) for i in range(10)], batch_size=2))
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import numpy as np from deepspeed.utils import log_dist class ProgressiveLayerDrop(object): r""" Progressive Layer Dropping (PLD) for model training. This implements the PLD technique for compressed model training from this paper: https://arxiv.org/pdf/2010.13369.pdf Args: theta (float): a hyper-parameter that controls the trade-off between training time and robustness. The lower the theta value, the faster the training speed. Default value: 0.5. gamma (float): a hyper-parameter that controls how fast the drop ratio increases. Default value: 0.001. """ def __init__(self, theta=0.5, gamma=0.001): super().__init__() self.theta = theta self.gamma = gamma self.current_theta = 1.0 log_dist(f'Enabled progressive layer dropping (theta = {self.theta})', ranks=[0]) def get_state(self): kwargs = {'progressive_layer_drop': True, 'pld_theta': self.get_theta()} return kwargs def get_theta(self): return self.current_theta def update_state(self, global_step): def _prob(x, gamma, p): return (1. - p) * np.exp(-gamma * x) + p self.current_theta = _prob(global_step, self.gamma, self.theta)
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import os import copy import collections import json from abc import ABC, abstractmethod from deepspeed.utils import logger from deepspeed.runtime.checkpoint_engine.torch_checkpoint_engine import TorchCheckpointEngine from .weight_quantizer import WeightQuantization AUTO_MODULE_KEY = 'auto' class SDLoaderFactory: @staticmethod def get_sd_loader_json(json_file, checkpoint_engine): if isinstance(json_file, str): with open(json_file) as f: data = json.load(f) else: assert isinstance(json_file, dict) data = json_file sd_type = data['type'] ckpt_list = data['checkpoints'] version = data['version'] ckpt_type = data.get('parallelization', 'pp') mp_size = data.get('mp_size', 0) if sd_type.lower() in ['bloom', 'ds_model']: return data return SDLoaderFactory.get_sd_loader(ckpt_list, checkpoint_engine, sd_type, version) @staticmethod def get_sd_loader(ckpt_list, checkpoint_engine, sd_type='Megatron', version=None): if sd_type == 'Megatron': return MegatronSDLoader(ckpt_list, version, checkpoint_engine) else: assert False, '{} checkpoint type is not supported'.format(sd_type) class SDLoaderBase(ABC): def __init__(self, ckpt_list, version, checkpoint_engine): self.module_key = None self.ckpt_list = ckpt_list self.version = version self.checkpoint_engine = TorchCheckpointEngine() if checkpoint_engine is None else checkpoint_engine self.check_ckpt_list() def load(self, mp_world_size, mp_rank, module_key=AUTO_MODULE_KEY, is_pipe_parallel=False, quantize=False, quantize_bits=8, quantize_groups=64, mlp_extra_grouping=True): self.module_key = module_key num_ckpt = len(self.ckpt_list) idx = mp_rank * num_ckpt // mp_world_size """ We have multiple cases to handle here for both training and inference: 1. PipeModule loading mp_rank_*.pt files, is_pipe_parallel=True, module_key is not None a. if no mp_size/pp_size resizing occurs, for both training & inference, loading the mp_rank related checkpoint directly. b. if has mp_size/pp_size resizing, only Megatron model inference is supported, in this case each mp_rank_*.pt have same content, we will load the first checkpoint file (idx=0), to avoid idx exceeding file list boundary. 2. PipeModule loading layer_*.pt files, is_pipe_parallel=True, module_key is None a. if no mp_size resizing occurs, for both training & inference, loading the mp_rank related checkpoint directly. b. if has mp_size resizing, only Megatron model inference is supported, checkpoint file(s) will be merged/split according to mp_rank, mp_world_size and checkpoint file list. 3. Non-PipeModule loading mp_rank_*.pt files, is_pipe_parallel=False Same with case (2). """ if is_pipe_parallel and module_key is not None and mp_world_size != num_ckpt: mp_world_size = num_ckpt idx = 0 load_path = self.ckpt_list[idx] merge_count = 1 if num_ckpt == mp_world_size: assert os.path.exists(load_path) #logger.info(f'rank: {mp_rank} loading checkpoint: {load_path}') sd = self.checkpoint_engine.load(load_path, map_location=lambda storage, \ loc: storage) if quantize: quantizer = WeightQuantization(mlp_extra_grouping=mlp_extra_grouping, mp_size=mp_world_size) sd_module, all_scales = quantizer.sd_quantize_megatron(self.get_module(sd), quantize_bits, quantize_groups) self.set_module(sd, sd_module) else: all_scales = None elif num_ckpt > mp_world_size: sd, all_scales, merge_count = self.merge_state_dict(mp_world_size, mp_rank, quantize, \ quantize_bits, quantize_groups, mlp_extra_grouping) else: sd, all_scales = self.split_state_dict(mp_world_size, mp_rank, quantize, quantize_bits, \ quantize_groups, mlp_extra_grouping) return load_path, sd, (all_scales, merge_count) def get_merge_state_dicts(self, mp_world_size, mp_rank): num_ckpt = len(self.ckpt_list) assert num_ckpt % mp_world_size == 0, 'Invalid checkpoints and world size for sd merge' num_to_merge = num_ckpt // mp_world_size ckpt_list = [self.ckpt_list[i] for i in range(num_to_merge * mp_rank, num_to_merge * (mp_rank + 1))] logger.info(f"mp_rank: {mp_rank}, ckpt_list: {ckpt_list}") sd_list = [self.checkpoint_engine.load(ckpt, map_location=lambda storage, loc: storage) for ckpt in ckpt_list] return sd_list def get_split_state_dict(self, mp_world_size, mp_rank): num_ckpt = len(self.ckpt_list) assert mp_world_size % num_ckpt == 0, 'Invalid checkpoints and world size for sd split' num_to_split = mp_world_size // num_ckpt ckpt_index = mp_rank // num_to_split ckpt_offset = mp_rank % num_to_split logger.info(f"mp_rank: {mp_rank}, ckpt_list: {self.ckpt_list[ckpt_index]}, offset: {ckpt_offset}") sd = self.checkpoint_engine.load(self.ckpt_list[ckpt_index], map_location=lambda storage, loc: storage) return sd, num_to_split, ckpt_offset def _choose_module_key(self, sd): assert not ('module' in sd and 'model' in sd), "checkpoint has both 'model' and 'module' keys, not sure how to proceed" assert 'module' in sd or 'model' in sd, "checkpoint contains neither 'model' or 'module' keys, not sure how to proceed" if 'module' in sd: return 'module' elif 'model' in sd: return 'model' def get_module(self, sd): if self.module_key is None: return sd elif self.module_key == AUTO_MODULE_KEY: return sd[self._choose_module_key(sd)] else: return sd[self.module_key] def set_module(self, sd, module): if self.module_key is None: sd = module elif self.module_key == AUTO_MODULE_KEY: sd[self._choose_module_key(sd)] = module else: sd[self.module_key] = module return sd def check_ckpt_list(self): #logger.info(f'checkpoint file list: {self.ckpt_list}') assert len(self.ckpt_list) > 0 sd = self.checkpoint_engine.load(self.ckpt_list[0], map_location=lambda storage, loc: storage) # check checkpoint count is same with saved mp_world_size if 'mp_world_size' in sd.keys(): assert len(self.ckpt_list) == sd[ 'mp_world_size'], f"checkpoint count {len(self.ckpt_list)} is different from saved mp_world_size {sd['mp_world_size']}" @abstractmethod def merge_state_dict(self, mp_world_size, mp_rank, quantize, quantize_bits, groups, mlp_extra_grouping): pass @abstractmethod def split_state_dict(self, mp_world_size, mp_rank, quantize, quantize_bits, groups, mlp_extra_grouping): pass @abstractmethod def sanity_check(self, ckpt_file_name): pass class MegatronSDLoader(SDLoaderBase): def __init__(self, ckpt_list, version, checkpoint_engine): super().__init__(ckpt_list, version, checkpoint_engine) """ ## Q/K/V data need special processing key: transformer.layers.0.attention.query_key_value.weight, shape: torch.Size([3192, 4256]) key: transformer.layers.0.attention.query_key_value.bias, shape: torch.Size([3192]) ## merge or split on axis=0 key: word_embeddings.weight, shape: torch.Size([12672, 4256]) key: transformer.layers.0.mlp.dense_h_to_4h.bias, shape: torch.Size([4256]) key: transformer.layers.0.mlp.dense_h_to_4h.weight, shape: torch.Size([4256, 4256]) ## merge or split on axis=1 key: transformer.layers.0.attention.dense.weight, shape: torch.Size([4256, 1064]) key: transformer.layers.0.mlp.dense_4h_to_h.weight, shape: torch.Size([4256, 4256]) ## no change required key: transformer.layers.0.mlp.dense_4h_to_h.bias, shape: torch.Size([4256]) key: transformer.final_layernorm.weight, shape: torch.Size([4256]) key: transformer.final_layernorm.bias, shape: torch.Size([4256]) key: transformer.layers.0.attention.dense.bias, shape: torch.Size([4256]) key: transformer.layers.0.post_attention_layernorm.weight, shape: torch.Size([4256]) key: transformer.layers.0.post_attention_layernorm.bias, shape: torch.Size([4256]) key: transformer.layers.0.input_layernorm.weight, shape: torch.Size([4256]) key: transformer.layers.0.input_layernorm.bias, shape: torch.Size([4256]) key: position_embeddings.weight, shape: torch.Size([1024, 4256]) """ def merge_query_key_value(self, param_list, ckpt_ver): """ Up to now we found 3 Q/K/V parameter formats in different Megatron checkpoint versions: 1. version 0, there is no version information saved in checkpoint. format: [(3 * np * hn), h] 2. version 1.0 format: [(np * hn * 3), h] 3. version 2.0 format: [(np * 3 * hn), h] h: hidden size n: number of attention heads p: number of model parallel partitions np: n/p hn: h/n """ new_qkv = None if ckpt_ver == 0: # [(3 * np * hn), h] assert param_list[0].shape[0] % 3 == 0 size_qkv = param_list[0].shape[0] // 3 split_tensors = [torch.split(param, size_qkv, dim=0) for param in param_list] tensors = [] for i in range(3): tensor_tuple = [t[i] for t in split_tensors] tensors.append(torch.cat(tensor_tuple, axis=0)) new_qkv = torch.cat(tensors, axis=0) elif ckpt_ver == 1.0 or ckpt_ver == 2.0: # [(np * hn * 3), h] or [(np * 3 * hn), h] new_qkv = torch.cat(param_list, axis=0) else: assert False, f'checkpoint version: {ckpt_ver} is not supported' return new_qkv def split_query_key_value(self, param, num_to_split, offset, ckpt_ver): """ Up to now we found 3 Q/K/V parameter formats in different Megatron checkpoint versions: 1. version 0, there is no version information saved in checkpoint. format: [(3 * np * hn), h] 2. version 1.0 format: [(np * hn * 3), h] 3. version 2.0 format: [(np * 3 * hn), h] h: hidden size n: number of attention heads p: number of model parallel partitions np: n/p hn: h/n """ new_qkv = None if ckpt_ver == 0: # [(3 * np * hn), h] assert param.shape[0] % 3 == 0 size_qkv = param.shape[0] // 3 split_tensors = torch.split(param, size_qkv, dim=0) assert split_tensors[0].shape[0] % num_to_split == 0 split_size = split_tensors[0].shape[0] // num_to_split tensors = [] for i in range(3): tensors.append(torch.split(split_tensors[i], split_size, dim=0)[offset]) new_qkv = torch.cat(tensors, axis=0) elif ckpt_ver == 1.0 or ckpt_ver == 2.0: # [(np * hn * 3), h] or [(np * 3 * hn), h] assert param.shape[0] % num_to_split == 0 size_qkv = param.shape[0] // num_to_split split_tensors = torch.split(param, size_qkv, dim=0) new_qkv = split_tensors[offset] else: assert False, f'checkpoint version: {ckpt_ver} is not supported' return new_qkv def merge_state_dict(self, mp_world_size, mp_rank, quantize=False, quantize_bits=8, groups=64, mlp_extra_grouping=True): self.sanity_check(self.ckpt_list[0]) sd_list = self.get_merge_state_dicts(mp_world_size, mp_rank) ds_sd = copy.deepcopy(sd_list[0]) new_client_sd = collections.OrderedDict() client_sd_list = [self.get_module(sd) for sd in sd_list] keys = client_sd_list[0].keys() ckpt_ver = self.get_checkpoint_version(ds_sd) logger.info(f"checkpoint version: {ckpt_ver}") if quantize: quantizer = WeightQuantization(mlp_extra_grouping=mlp_extra_grouping, mp_size=mp_world_size) for key in keys: value_list = [sd[key] for sd in client_sd_list] if "attention.dense.weight" in key or "mlp.dense_4h_to_h.weight" in key: if quantize: value_list = quantizer.Quantize(value_list, quantize_bits, groups, key=key, merge_dim=1) new_client_sd[key] = torch.cat(value_list, axis=1) elif "attention.query_key_value" in key: if quantize and "attention.query_key_value.weight" in key: value_list = quantizer.Quantize(value_list, quantize_bits, groups, key=key) new_client_sd[key] = torch.cat(value_list, axis=0) else: if quantize: new_client_sd[key] = torch.cat(value_list, axis=0) else: new_client_sd[key] = self.merge_query_key_value(value_list, ckpt_ver) elif "mlp.dense_h_to_4h.weight" in key or "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key: if quantize and "mlp.dense_h_to_4h.weight" in key: value_list = quantizer.Quantize(value_list, quantize_bits, groups, key=key) new_client_sd[key] = torch.cat(value_list, axis=0) else: new_client_sd[key] = value_list[0] if quantize: all_scales = quantizer.merge_scales() ds_sd = self.set_module(ds_sd, new_client_sd) return ds_sd, (all_scales if quantize else None), len(client_sd_list) def split_state_dict(self, mp_world_size, mp_rank, quantize=False, quantize_bits=8, groups=64, mlp_extra_grouping=True): #self.sanity_check(self.ckpt_list[0]) sd, num_to_split, ckpt_offset = self.get_split_state_dict(mp_world_size, mp_rank) ds_sd = copy.deepcopy(sd) new_client_sd = collections.OrderedDict() client_sd = self.get_module(sd) ckpt_ver = self.get_checkpoint_version(ds_sd) logger.info(f"checkpoint version: {ckpt_ver}") if quantize: quantizer = WeightQuantization(mlp_extra_grouping=mlp_extra_grouping, mp_size=mp_world_size) for key in client_sd.keys(): value = client_sd[key] if "attention.dense.weight" in key or "mlp.dense_4h_to_h.weight" in key: assert value.shape[1] % num_to_split == 0 split_size = value.shape[1] // num_to_split if quantize: q_vals = quantizer.Quantize([value], quantize_bits, groups, key) value = q_vals[0] new_client_sd[key] = torch.split(value, split_size, dim=1)[ckpt_offset] elif "attention.query_key_value" in key: if quantize and "attention.query_key_value.weight" in key: q_vals = quantizer.Quantize([value], quantize_bits, groups, key) value = q_vals[0] new_client_sd[key] = self.split_query_key_value(value, num_to_split, ckpt_offset, ckpt_ver) elif "mlp.dense_h_to_4h.weight" in key or "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key or "final_linear.weight" in key: assert value.shape[0] % num_to_split == 0 split_size = value.shape[0] // num_to_split if quantize and "mlp.dense_h_to_4h.weight" in key: q_vals = quantizer.Quantize([value], quantize_bits, groups, key) value = q_vals[0] new_client_sd[key] = torch.split(value, split_size, dim=0)[ckpt_offset] else: new_client_sd[key] = value if quantize: all_scales = quantizer.merge_scales_split(num_to_split) ds_sd = self.set_module(ds_sd, new_client_sd) return ds_sd, (all_scales if quantize else None) def sanity_check(self, ckpt_file_name): keys_to_check = [ "attention.dense.weight", "mlp.dense_4h_to_h.weight", "attention.query_key_value", "mlp.dense_h_to_4h.weight", "mlp.dense_h_to_4h.bias" ] sd = self.checkpoint_engine.load(ckpt_file_name, map_location=lambda storage, loc: storage) # partial_key is a sub-string of one key in the sd def check_key_exist(partial_key, sd): keys = sd.keys() found = False for k in keys: if partial_key in k: found = True break return found for key in keys_to_check: assert check_key_exist(key, self.get_module(sd)), f'key: {key} is not found in the checkpoint {ckpt_file_name}' def get_checkpoint_version(self, state_dict): # Use 0 if version info doesn't exist return self.version if self.version is not None else state_dict.get('checkpoint_version', 0)
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ Collection of DeepSpeed configuration utilities """ import json import collections import collections.abc from functools import reduce from pydantic import BaseModel from deepspeed.utils import logger class DeepSpeedConfigModel(BaseModel): """ This class should be used as a base for all DeepSpeed configs. It extends pydantic.BaseModel to allow for deprecated fields. To enable this feature, add deprecated=True to pydantic.Field: my_dep_field: int = Field(0, deprecated=True) Deprecated Field kwargs: - deprecated: [True|False], default False Enables / Disables deprecated fields - deprecated_msg: str, default "" Message to include with deprecation warning - new_param: str, default "" Name of the field replacing the deprecated field - set_new_param: [True|False], default True If new_param is provided, enables setting the value of that param with deprecated field value - new_param_fn: callable, default (lambda x: x) If new_param is provided and set_new_param is True, this function will modify the value of the deprecated field before placing that value in the new_param field Example: my_new_field is replacing a deprecated my_old_field. The expected type for my_new_field is int while the expected type for my_old_field is str. We want to maintain backward compatibility with our configs, so we define the fields with: class MyExampleConfig(DeepSpeedConfigModel): my_new_field: int = 0 my_old_field: str = Field('0', deprecated=True, new_param='my_new_field', new_param_fn=(lambda x: int(x))) """ def __init__(self, strict=False, **data): if (not strict): # This is temporary until we refactor all DS configs, allows HF to load models data = {k: v for k, v in data.items() if (v != "auto" or k == "replace_method")} super().__init__(**data) self._deprecated_fields_check(self) def _process_deprecated_field(self, pydantic_config, field): # Get information about the deprecated field fields_set = pydantic_config.__fields_set__ dep_param = field.name kwargs = field.field_info.extra new_param_fn = kwargs.get("new_param_fn", lambda x: x) param_value = new_param_fn(getattr(pydantic_config, dep_param)) new_param = kwargs.get("new_param", "") dep_msg = kwargs.get("deprecated_msg", "") if dep_param in fields_set: logger.warning(f"Config parameter {dep_param} is deprecated" + (f" use {new_param} instead" if new_param else "") + (f". {dep_msg}" if dep_msg else "")) # Check if there is a new param and if it should be set with a value if new_param and kwargs.get("set_new_param", True): # Remove the deprecate field if there is a replacing field try: delattr(pydantic_config, dep_param) except Exception as e: logger.error(f"Tried removing deprecated '{dep_param}' from config") raise e # Set new param value new_param_nested = new_param.split(".") if len(new_param_nested) > 1: # If the new param exists in a subconfig, we need to get # the fields set for that subconfig pydantic_config = reduce(getattr, new_param_nested[:-1], pydantic_config) fields_set = pydantic_config.__fields_set__ new_param_name = new_param_nested[-1] assert ( new_param_name not in fields_set ), f"Cannot provide deprecated parameter '{dep_param}' and replacing parameter '{new_param}' together" # A custom function for converting the old param value to new param value can be provided try: setattr(pydantic_config, new_param_name, param_value) except Exception as e: logger.error(f"Tried setting value for '{new_param}' with value from deprecated '{dep_param}'") raise e def _deprecated_fields_check(self, pydantic_config): fields = pydantic_config.__fields__ for field in fields.values(): if field.field_info.extra.get("deprecated", False): self._process_deprecated_field(pydantic_config, field) class Config: validate_all = True validate_assignment = True use_enum_values = True allow_population_by_field_name = True extra = "forbid" arbitrary_types_allowed = True def get_config_default(config, field_name): assert field_name in config.__fields__, f"'{field_name}' is not a field in {config}" assert not config.__fields__.get( field_name).required, f"'{field_name}' is a required field and does not have a default value" return config.__fields__.get(field_name).default class pp_int(int): """ A wrapper for integers that will return a custom string or comma-formatted string of the integer. For example, print(pp_int(1e5)) will return "10,000". This is useful mainly for auto-generated documentation purposes. """ def __new__(cls, val, custom_print_str=None): inst = super().__new__(cls, val) inst.custom_print_str = custom_print_str return inst def __repr__(self): if self.custom_print_str: return self.custom_print_str return f"{self.real:,}" # adapted from https://stackoverflow.com/a/50701137/9201239 class ScientificNotationEncoder(json.JSONEncoder): """ This class overrides ``json.dumps`` default formatter. This version keeps everything as normal except formats numbers bigger than 1e3 using scientific notation. Just pass ``cls=ScientificNotationEncoder`` to ``json.dumps`` to activate it """ def iterencode(self, o, _one_shot=False, level=0): indent = self.indent if self.indent is not None else 4 prefix_close = " " * level * indent level += 1 prefix = " " * level * indent if isinstance(o, bool): return "true" if o else "false" elif isinstance(o, float) or isinstance(o, int): if o > 1e3: return f"{o:e}" else: return f"{o}" elif isinstance(o, collections.abc.Mapping): x = [f'\n{prefix}"{k}": {self.iterencode(v, level=level)}' for k, v in o.items()] return "{" + ", ".join(x) + f"\n{prefix_close}" + "}" elif isinstance(o, collections.abc.Sequence) and not isinstance(o, str): return f"[{ f', '.join(map(self.iterencode, o)) }]" return "\n, ".join(super().iterencode(o, _one_shot)) class DeepSpeedConfigObject(object): """ For json serialization """ def repr(self): return self.__dict__ def __repr__(self): return json.dumps( self.__dict__, sort_keys=True, indent=4, cls=ScientificNotationEncoder, ) def get_scalar_param(param_dict, param_name, param_default_value): return param_dict.get(param_name, param_default_value) def get_list_param(param_dict, param_name, param_default_value): return param_dict.get(param_name, param_default_value) def get_dict_param(param_dict, param_name, param_default_value): return param_dict.get(param_name, param_default_value) def dict_raise_error_on_duplicate_keys(ordered_pairs): """Reject duplicate keys.""" d = dict((k, v) for k, v in ordered_pairs) if len(d) != len(ordered_pairs): counter = collections.Counter([pair[0] for pair in ordered_pairs]) keys = [key for key, value in counter.items() if value > 1] raise ValueError("Duplicate keys in DeepSpeed config: {}".format(keys)) return d
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import math from deepspeed.utils import logger from .constants import * class CurriculumScheduler(object): def __init__(self, config): super().__init__() self.state = {} assert CURRICULUM_LEARNING_MIN_DIFFICULTY in config, \ f"Curriculum learning requires the config '{CURRICULUM_LEARNING_MIN_DIFFICULTY}'" assert CURRICULUM_LEARNING_MAX_DIFFICULTY in config, \ f"Curriculum learning requires the config '{CURRICULUM_LEARNING_MAX_DIFFICULTY}'" assert CURRICULUM_LEARNING_SCHEDULE_TYPE in config, \ f"Curriculum learning requires the config '{CURRICULUM_LEARNING_SCHEDULE_TYPE}'" self.state[CURRICULUM_LEARNING_MIN_DIFFICULTY] = config[CURRICULUM_LEARNING_MIN_DIFFICULTY] self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY] = config[CURRICULUM_LEARNING_MAX_DIFFICULTY] self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] = config[CURRICULUM_LEARNING_MIN_DIFFICULTY] self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] = config[CURRICULUM_LEARNING_SCHEDULE_TYPE] self.first_step = True if config[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_DISCRETE: """ The schedule_config is a list of difficulty and a list of max step belonging to each difficulty. Example json config: "schedule_config": { "difficulty": [1,2,3], "max_step": [5,10] } The "max_step" has one less element than "difficulty", because the last difficulty will be used for all following steps. The self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] is a dictionary of difficulty : [max step for this difficulty, next difficulty]. """ assert CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ f"Curriculum learning with fixed_discrete schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY}'" assert CURRICULUM_LEARNING_SCHEDULE_MAX_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ f"Curriculum learning with fixed_discrete schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_MAX_STEP}'" assert len(config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_MAX_STEP]) > 0 assert len(config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY]) > 0 assert len(config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY]) == len( config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_MAX_STEP]) + 1 self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] = config[CURRICULUM_LEARNING_SCHEDULE_CONFIG] elif config[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT: """ The schedule_config includes: total_curriculum_step: how many steps the curriculum learning takes to go from min difficulty to max difficulty. difficulty_step: the difficulty level determined every time must be a multiple of this difficulty_step. This is used to determine the step of difficulty increase, and to ensure the use of NVIDIA Tensor Core acceleration (requires multiple of 8 (FP16) or 16 (INT8)). root_degree: the degree of the root function. Degree of 2 means square root and degree of 3 means cube root. Degree of 1 is equivalent to linear. "schedule_config": { "total_curriculum_step": 30000, "difficulty_step": 8, "root_degree": 2 } """ assert CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ f"Curriculum learning with fixed_root schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP}'" assert CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ f"Curriculum learning with fixed_root schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP}'" assert CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ f"Curriculum learning with fixed_root schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE}'" if config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP] % 8 != 0: logger.warning( f'When using seqlen metric, the difficulty_step for curriculum learning has to be multiple of 8 (for FP16 data) or 16 (for INT8 data) to enable NVIDIA Tensor Core acceleration. Disregard this warning if this is unrelated to your metric/hardware.' ) self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] = config[CURRICULUM_LEARNING_SCHEDULE_CONFIG] elif config[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_LINEAR: """ The schedule_config is the same as CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT but without the root_degree. "schedule_config": { "total_curriculum_step": 30000, "difficulty_step": 8 } """ assert CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ f"Curriculum learning with fixed_linear schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP}'" assert CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP in config[CURRICULUM_LEARNING_SCHEDULE_CONFIG], \ f"Curriculum learning with fixed_linear schedule requires the schedule_config '{CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP}'" if config[CURRICULUM_LEARNING_SCHEDULE_CONFIG][CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP] % 8 != 0: logger.warning( f'When using seqlen metric, the difficulty_step for curriculum learning has to be multiple of 8 (for FP16 data) or 16 (for INT8 data) to enable NVIDIA Tensor Core acceleration. Disregard this warning if this is unrelated to your metric/hardware.' ) self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] = config[CURRICULUM_LEARNING_SCHEDULE_CONFIG] elif config[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_CUSTOM: """ Fully customized schedule. User need to provide a custom schedule function by using the set_custom_curriculum_learning_schedule API in deepspeed/runtime/engine.py """ self.custom_get_difficulty = None else: raise RuntimeError('Unsupported curriculum schedule type') def get_current_difficulty(self): return self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] def set_current_difficulty(self, difficulty): self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] = difficulty def set_custom_get_difficulty(self, schedule_function): self.custom_get_difficulty = schedule_function def get_state(self): return self.state def set_state(self, state): self.state = state def __fixed_discrete_get_difficulty(self, global_steps): s_state = self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] if global_steps > s_state[CURRICULUM_LEARNING_SCHEDULE_MAX_STEP][-1]: return s_state[CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY][-1] for i in range(len(s_state[CURRICULUM_LEARNING_SCHEDULE_MAX_STEP])): if global_steps <= s_state[CURRICULUM_LEARNING_SCHEDULE_MAX_STEP][i]: return s_state[CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY][i] def __fixed_root_get_difficulty(self, global_steps, root_degree=None): s_state = self.state[CURRICULUM_LEARNING_SCHEDULE_CONFIG] if root_degree is None: root_degree = s_state[CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE] next_difficulty = (float(global_steps) / s_state[CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP])**(1.0 / root_degree) next_difficulty = math.floor( next_difficulty * (self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY] - self.state[CURRICULUM_LEARNING_MIN_DIFFICULTY]) + self.state[CURRICULUM_LEARNING_MIN_DIFFICULTY]) next_difficulty -= (next_difficulty % s_state[CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP]) next_difficulty = min(next_difficulty, self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY]) return next_difficulty def get_difficulty(self, global_steps): if self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_DISCRETE: return self.__fixed_discrete_get_difficulty(global_steps) elif self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_LINEAR: return self.__fixed_root_get_difficulty(global_steps, 1) elif self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT: return self.__fixed_root_get_difficulty(global_steps) elif self.state[CURRICULUM_LEARNING_SCHEDULE_TYPE] == CURRICULUM_LEARNING_SCHEDULE_CUSTOM: return self.custom_get_difficulty(global_steps) else: raise RuntimeError('Unsupported curriculum schedule type') def update_difficulty(self, global_steps): if self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] < self.state[CURRICULUM_LEARNING_MAX_DIFFICULTY]: self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY] = self.get_difficulty(global_steps) return self.state[CURRICULUM_LEARNING_CURRENT_DIFFICULTY]
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from .constants import * import copy from ..config_utils import get_scalar_param # TODO: Reducing config verbosity by returning None or {} when disabled. # One challenge is that we still need to somehow include the default values, # for example the *_ENABLED has default of false. def get_data_efficiency_config(param_dict): output = {} output[DATA_EFFICIENCY_ENABLED] = get_data_efficiency_enabled(param_dict) output[DATA_EFFICIENCY_SEED] = get_data_efficiency_seed(param_dict) if DATA_EFFICIENCY not in param_dict.keys(): param_dict[DATA_EFFICIENCY] = {} sub_param_dict = param_dict[DATA_EFFICIENCY] output[DATA_SAMPLING] = get_data_sampling(sub_param_dict) output[DATA_ROUTING] = get_data_routing(sub_param_dict) return output def get_data_efficiency_enabled(param_dict): if DATA_EFFICIENCY in param_dict.keys(): return get_scalar_param(param_dict[DATA_EFFICIENCY], DATA_EFFICIENCY_ENABLED, DATA_EFFICIENCY_ENABLED_DEFAULT) else: return False def get_data_efficiency_seed(param_dict): if DATA_EFFICIENCY in param_dict.keys(): return get_scalar_param(param_dict[DATA_EFFICIENCY], DATA_EFFICIENCY_SEED, DATA_EFFICIENCY_SEED_DEFAULT) else: return DATA_EFFICIENCY_SEED_DEFAULT def get_data_sampling(param_dict): output = {} output[DATA_SAMPLING_ENABLED] = get_data_sampling_enabled(param_dict) output[DATA_SAMPLING_NUM_EPOCHS] = get_data_sampling_num_epochs(param_dict) output[DATA_SAMPLING_NUM_WORKERS] = get_data_sampling_num_workers(param_dict) if DATA_SAMPLING not in param_dict.keys(): param_dict[DATA_SAMPLING] = {} sub_param_dict = param_dict[DATA_SAMPLING] output[CURRICULUM_LEARNING] = get_curriculum_learning(sub_param_dict) return output def get_data_sampling_enabled(param_dict): if DATA_SAMPLING in param_dict.keys(): return get_scalar_param(param_dict[DATA_SAMPLING], DATA_SAMPLING_ENABLED, DATA_SAMPLING_ENABLED_DEFAULT) else: return False def get_data_sampling_num_epochs(param_dict): if DATA_SAMPLING in param_dict.keys(): return get_scalar_param(param_dict[DATA_SAMPLING], DATA_SAMPLING_NUM_EPOCHS, DATA_SAMPLING_NUM_EPOCHS_DEFAULT) else: return DATA_SAMPLING_NUM_EPOCHS_DEFAULT def get_data_sampling_num_workers(param_dict): if DATA_SAMPLING in param_dict.keys(): return get_scalar_param(param_dict[DATA_SAMPLING], DATA_SAMPLING_NUM_WORKERS, DATA_SAMPLING_NUM_WORKERS_DEFAULT) else: return DATA_SAMPLING_NUM_WORKERS_DEFAULT def get_curriculum_learning(param_dict): output = {} output[CURRICULUM_LEARNING_ENABLED] = get_curriculum_learning_enabled(param_dict) if CURRICULUM_LEARNING not in param_dict.keys(): param_dict[CURRICULUM_LEARNING] = {} sub_param_dict = param_dict[CURRICULUM_LEARNING] if output[CURRICULUM_LEARNING_ENABLED]: assert CURRICULUM_LEARNING_METRICS in sub_param_dict.keys( ), f"Curriculum learning is enabled, {CURRICULUM_LEARNING_METRICS} must be specified" for key, val in get_curriculum_learning_params(param_dict).items(): output[key] = val return output def get_curriculum_learning_enabled(param_dict): if CURRICULUM_LEARNING in param_dict.keys(): return get_scalar_param(param_dict[CURRICULUM_LEARNING], CURRICULUM_LEARNING_ENABLED, CURRICULUM_LEARNING_ENABLED_DEFAULT) else: return False def get_curriculum_learning_params(param_dict): if CURRICULUM_LEARNING in param_dict.keys(): curriculum_learning_params = copy.copy(param_dict[CURRICULUM_LEARNING]) curriculum_learning_params.pop(CURRICULUM_LEARNING_ENABLED) return curriculum_learning_params else: return {} def get_curriculum_enabled_legacy(param_dict): if CURRICULUM_LEARNING_LEGACY in param_dict.keys(): return get_scalar_param(param_dict[CURRICULUM_LEARNING_LEGACY], CURRICULUM_ENABLED_LEGACY, CURRICULUM_ENABLED_DEFAULT_LEGACY) else: return False def get_curriculum_params_legacy(param_dict): if CURRICULUM_LEARNING_LEGACY in param_dict.keys(): curriculum_params = copy.copy(param_dict[CURRICULUM_LEARNING_LEGACY]) curriculum_params.pop(CURRICULUM_ENABLED_LEGACY) return curriculum_params else: return False def get_data_routing(param_dict): output = {} output[DATA_ROUTING_ENABLED] = get_data_routing_enabled(param_dict) if DATA_ROUTING not in param_dict.keys(): param_dict[DATA_ROUTING] = {} sub_param_dict = param_dict[DATA_ROUTING] output[RANDOM_LTD] = get_random_ltd(sub_param_dict) return output def get_data_routing_enabled(param_dict): if DATA_ROUTING in param_dict.keys(): return get_scalar_param(param_dict[DATA_ROUTING], DATA_ROUTING_ENABLED, DATA_ROUTING_ENABLED_DEFAULT) else: return False def get_random_ltd(param_dict): output = {} output[RANDOM_LTD_ENABLED] = RANDOM_LTD_ENABLED_DEFAULT output[RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE] = {} output[RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE][ RANDOM_LTD_LAYER_TOKEN_LR_ENABLED] = RANDOM_LTD_LAYER_TOKEN_LR_ENABLED_DEFAULT if get_random_ltd_enabled(param_dict): output[RANDOM_LTD_ENABLED] = get_random_ltd_enabled(param_dict) for key, val in get_random_ltd_params(param_dict).items(): output[key] = val return output def get_random_ltd_enabled(param_dict): if RANDOM_LTD in param_dict.keys(): return get_scalar_param(param_dict[RANDOM_LTD], RANDOM_LTD_ENABLED, RANDOM_LTD_ENABLED_DEFAULT) else: return False def get_random_ltd_params(param_dict): if RANDOM_LTD in param_dict.keys(): random_ltd_params = copy.copy(param_dict[RANDOM_LTD]) random_ltd_params.pop(RANDOM_LTD_ENABLED) return random_ltd_params else: return {}
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ Data efficiency library See sample config at https://www.deepspeed.ai/docs/config-json/data-efficiency """ DATA_EFFICIENCY = "data_efficiency" DATA_EFFICIENCY_ENABLED = "enabled" DATA_EFFICIENCY_ENABLED_DEFAULT = False DATA_EFFICIENCY_SEED = "seed" DATA_EFFICIENCY_SEED_DEFAULT = 1234 ######################################### # Data efficiency - Data Sampling ######################################### DATA_SAMPLING = "data_sampling" DATA_SAMPLING_ENABLED = "enabled" DATA_SAMPLING_ENABLED_DEFAULT = False DATA_SAMPLING_NUM_EPOCHS = "num_epochs" DATA_SAMPLING_NUM_EPOCHS_DEFAULT = 1000 DATA_SAMPLING_NUM_WORKERS = "num_workers" DATA_SAMPLING_NUM_WORKERS_DEFAULT = 0 ######################################### # Data efficiency - Data Sampling - Curriculum Learning ######################################### CURRICULUM_LEARNING = "curriculum_learning" CURRICULUM_LEARNING_ENABLED = "enabled" CURRICULUM_LEARNING_ENABLED_DEFAULT = False CURRICULUM_LEARNING_CLUSTER_PATH = "data_cluster_path" CURRICULUM_LEARNING_METRICS = "curriculum_metrics" CURRICULUM_LEARNING_SAMPLE_PATH = "index_to_sample_path" CURRICULUM_LEARNING_METRIC_PATH = "index_to_metric_path" CURRICULUM_LEARNING_CLUSTERING_TYPE = "clustering_type" CURRICULUM_LEARNING_SINGLE_CLUSTER = "single_cluster" CURRICULUM_LEARNING_CLUSTER_PREFIX = "cluster" CURRICULUM_LEARNING_DIFFICULTY_TYPE = "difficulty_type" CURRICULUM_LEARNING_VALUE_BASED = "value" CURRICULUM_LEARNING_PERCENTILE_BASED = "percentile" CURRICULUM_LEARNING_MIN_DIFFICULTY = "min_difficulty" CURRICULUM_LEARNING_MAX_DIFFICULTY = "max_difficulty" CURRICULUM_LEARNING_SCHEDULE_TYPE = "schedule_type" CURRICULUM_LEARNING_SCHEDULE_CONFIG = "schedule_config" CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY = "difficulty" CURRICULUM_LEARNING_SCHEDULE_MAX_STEP = "max_step" CURRICULUM_LEARNING_SCHEDULE_TOTAL_STEP = "total_curriculum_step" CURRICULUM_LEARNING_SCHEDULE_DIFFICULTY_STEP = "difficulty_step" CURRICULUM_LEARNING_SCHEDULE_ROOT_DEGREE = "root_degree" CURRICULUM_LEARNING_SCHEDULE_FIXED_DISCRETE = "fixed_discrete" CURRICULUM_LEARNING_SCHEDULE_FIXED_ROOT = "fixed_root" CURRICULUM_LEARNING_SCHEDULE_FIXED_LINEAR = "fixed_linear" CURRICULUM_LEARNING_SCHEDULE_CUSTOM = "custom" CURRICULUM_LEARNING_CURRENT_DIFFICULTY = "current_difficulty" CURRICULUM_LEARNING_BATCH = "batch" CURRICULUM_LEARNING_CONSUMED_SAMPLES = "consumed_samples" CURRICULUM_LEARNING_STEP = "curriculum_step" CURRICULUM_LEARNING_CURRENT_DIFFICULTIES = "current_difficulties" CURRICULUM_LEARNING_DATA_CLUSTER_PATHS = "data_cluster_paths" CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION = "data_cluster_current_position" CURRICULUM_LEARNING_NP_RNG_STATE = "np_rng_state" ######################################### # Curriculum Learning legacy implementation ######################################### CURRICULUM_LEARNING_LEGACY = "curriculum_learning" CURRICULUM_ENABLED_LEGACY = "enabled" CURRICULUM_ENABLED_DEFAULT_LEGACY = False ######################################### # Data efficiency - Data Routing ######################################### DATA_ROUTING = "data_routing" DATA_ROUTING_ENABLED = "enabled" DATA_ROUTING_ENABLED_DEFAULT = False ######################################### # Data efficiency - Data Routing - Random LTD ######################################### RANDOM_LTD = "random_ltd" RANDOM_LTD_ENABLED = "enabled" RANDOM_LTD_ENABLED_DEFAULT = False RANDOM_LTD_MODEL_MASK_NAME = "model_mask_name" RANDOM_LTD_MODEL_TYPE = "model_type" RANDOM_LTD_MICRO_BATCH_SIZE = "micro_batch_size" RANDOM_LTD_GLOBAL_BATCH_SIZE = "global_batch_size" RANDOM_LTD_SAMPLE_INDEX = "sample_idx" RANDOM_LTD_ATTENTION_MASK = "attention_mask" RANDOM_LTD_HIDDEN_STATE_ORDER = "hidden_state_order" RANDOM_LTD_LAYER_NUM = "random_ltd_layer_num" RANDOM_LTD_LAYER_ID = "random_ltd_layer_id" RANDOM_LTD_TOTAL_LAYER_NUM = "total_layer_num" RANDOM_LTD_CONSUMED_LAYER_TOKENS = "consumed_layer_tokens" # scheduler RANDOM_LTD_SCHEDULER = "random_ltd_schedule" RANDOM_LTD_MAX_VALUE = "max_value" RANDOM_LTD_MIN_VALUE = "min_value" RANDOM_LTD_CURRENT_VALUE = "current_value" RANDOM_LTD_SCHEDULE_CONFIG = "schedule_config" RANDOM_LTD_INCREASE_STEP = "seq_per_step" RANDOM_LTD_REQUIRE_STEP = "require_steps" RANDOM_LTD_SCHEDULER_TYPE = "schedule_type" RANDOM_LTD_CURR_STEP = "current_steps" # learning rate schedulers RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE = "layer_token_lr_schedule" RANDOM_LTD_LAYER_TOKEN_LR_ENABLED = "enabled" RANDOM_LTD_LAYER_TOKEN_LR_ENABLED_DEFAULT = False RANDOM_LTD_TOTAL_LAYER_TOKENS = "total_layer_tokens" RANDOM_LTD_WARMUP_TYPE = "warmup_type" RANDOM_LTD_WARMUP_LAYER_TOKENS = "warmup_layer_tokens"
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team '''Copyright The Microsoft DeepSpeed Team'''
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team '''Copyright The Microsoft DeepSpeed Team'''
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import math import numpy as np from deepspeed.utils import logger from .indexed_dataset import MMapIndexedDatasetBuilder def find_fit_int_dtype(min_value, max_value): if min_value >= 0: if max_value <= 255: return np.uint8 elif max_value <= 65535: return np.uint16 elif max_value <= 4294967295: return np.uint32 else: return np.uint64 else: if max_value <= 127 and min_value >= -128: return np.int8 elif max_value <= 32767 and min_value >= -32768: return np.int16 elif max_value <= 2147483647 and min_value >= -2147483648: return np.int32 else: return np.int64 def split_index(start_idx, end_idx, num_partitions): partition_size = math.ceil((end_idx - start_idx) / num_partitions) partitions = [[start_idx + x * partition_size, min(end_idx, start_idx + (x + 1) * partition_size)] for x in range(num_partitions)] return partitions def split_dataset(dataset, num_workers, worker_id, num_threads): worker_splits = split_index(0, len(dataset), num_workers) thread_splits = split_index(worker_splits[worker_id][0], worker_splits[worker_id][1], num_threads) return worker_splits, thread_splits def create_mmap_dataset_builder(fname, dtype): logger.info(f"Creating mmap dataset builder at {fname}.") return MMapIndexedDatasetBuilder(f"{fname}.bin", dtype=dtype) def close_mmap_dataset_builder(builder, fname): builder.end_document() builder.finalize(f"{fname}.idx") logger.info(f"Finalized mmap dataset builder at {fname}.")
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ Part of this code was adopted from https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/data/indexed_dataset.py """ # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # copied from fairseq/fairseq/data/indexed_dataset.py # Removed IndexedRawTextDataset since it relied on Fairseq dictionary # other slight modifications to remove fairseq dependencies # Added document index to index file and made it accessible. # An empty sentence no longer separates documents. # Some of the fixes/improvements are adopted from # https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/main/megatron/data/indexed_dataset.py from functools import lru_cache import os import shutil import struct from itertools import accumulate import numpy as np import torch def __best_fitting_dtype(vocab_size=None): if vocab_size is not None and vocab_size < 65500: return np.uint16 else: return np.int32 def get_available_dataset_impl(): return ['lazy', 'cached', 'mmap'] def infer_dataset_impl(path): if IndexedDataset.exists(path): with open(index_file_path(path), 'rb') as f: magic = f.read(8) if magic == IndexedDataset._HDR_MAGIC: return 'cached' elif magic == MMapIndexedDataset.Index._HDR_MAGIC[:8]: return 'mmap' else: return None else: print(f"Dataset does not exist: {path}") print("Path should be a basename that both .idx and .bin can be appended to get full filenames.") return None def make_builder(out_file, impl, vocab_size=None): if impl == 'mmap': return MMapIndexedDatasetBuilder(out_file, dtype=__best_fitting_dtype(vocab_size)) else: return IndexedDatasetBuilder(out_file) def make_dataset(path, impl, skip_warmup=False): if not IndexedDataset.exists(path): print(f"Dataset does not exist: {path}") print("Path should be a basename that both .idx and .bin can be appended to get full filenames.") return None if impl == 'infer': impl = infer_dataset_impl(path) if impl == 'lazy' and IndexedDataset.exists(path): return IndexedDataset(path) elif impl == 'cached' and IndexedDataset.exists(path): return IndexedCachedDataset(path) elif impl == 'mmap' and MMapIndexedDataset.exists(path): return MMapIndexedDataset(path, skip_warmup) print(f"Unknown dataset implementation: {impl}") return None def dataset_exists(path, impl): if impl == 'mmap': return MMapIndexedDataset.exists(path) else: return IndexedDataset.exists(path) def read_longs(f, n): a = np.empty(n, dtype=np.int64) f.readinto(a) return a def write_longs(f, a): f.write(np.array(a, dtype=np.int64)) dtypes = { 1: np.uint8, 2: np.int8, 3: np.int16, 4: np.int32, 5: np.int64, 6: np.float64, 7: np.double, 8: np.uint16, 9: np.uint32, 10: np.uint64 } def code(dtype): for k in dtypes.keys(): if dtypes[k] == dtype: return k raise ValueError(dtype) def index_file_path(prefix_path): return prefix_path + '.idx' def data_file_path(prefix_path): return prefix_path + '.bin' def create_doc_idx(sizes): doc_idx = [0] for i, s in enumerate(sizes): if s == 0: doc_idx.append(i + 1) return doc_idx class IndexedDataset(torch.utils.data.Dataset): """Loader for IndexedDataset""" _HDR_MAGIC = b'TNTIDX\x00\x00' def __init__(self, path): super().__init__() self.path = path self.data_file = None self.read_index(path) def read_index(self, path): with open(index_file_path(path), 'rb') as f: magic = f.read(8) assert magic == self._HDR_MAGIC, ('Index file doesn\'t match expected format. ' 'Make sure that --dataset-impl is configured properly.') version = f.read(8) assert struct.unpack('<Q', version) == (1, ) code, self.element_size = struct.unpack('<QQ', f.read(16)) self.dtype = dtypes[code] self._len, self.s = struct.unpack('<QQ', f.read(16)) self.doc_count = struct.unpack('<Q', f.read(8)) self.dim_offsets = read_longs(f, self._len + 1) self.data_offsets = read_longs(f, self._len + 1) self.sizes = read_longs(f, self.s) self.doc_idx = read_longs(f, self.doc_count) def read_data(self, path): self.data_file = open(data_file_path(path), 'rb', buffering=0) def check_index(self, i): if i < 0 or i >= self._len: raise IndexError('index out of range') def __del__(self): if self.data_file: self.data_file.close() # @lru_cache(maxsize=8) def __getitem__(self, idx): if not self.data_file: self.read_data(self.path) if isinstance(idx, int): i = idx self.check_index(i) tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]] a = np.empty(tensor_size, dtype=self.dtype) self.data_file.seek(self.data_offsets[i] * self.element_size) self.data_file.readinto(a) return a elif isinstance(idx, slice): start, stop, step = idx.indices(len(self)) if step != 1: raise ValueError("Slices into indexed_dataset must be contiguous") sizes = self.sizes[self.dim_offsets[start]:self.dim_offsets[stop]] size = sum(sizes) a = np.empty(size, dtype=self.dtype) self.data_file.seek(self.data_offsets[start] * self.element_size) self.data_file.readinto(a) offsets = list(accumulate(sizes)) sents = np.split(a, offsets[:-1]) return sents def __len__(self): return self._len def num_tokens(self, index): return self.sizes[index] def size(self, index): return self.sizes[index] @staticmethod def exists(path): return (os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))) @property def supports_prefetch(self): return False # avoid prefetching to save memory class IndexedCachedDataset(IndexedDataset): def __init__(self, path): super().__init__(path) self.cache = None self.cache_index = {} @property def supports_prefetch(self): return True def prefetch(self, indices): if all(i in self.cache_index for i in indices): return if not self.data_file: self.read_data(self.path) indices = sorted(set(indices)) total_size = 0 for i in indices: total_size += self.data_offsets[i + 1] - self.data_offsets[i] self.cache = np.empty(total_size, dtype=self.dtype) ptx = 0 self.cache_index.clear() for i in indices: self.cache_index[i] = ptx size = self.data_offsets[i + 1] - self.data_offsets[i] a = self.cache[ptx:ptx + size] self.data_file.seek(self.data_offsets[i] * self.element_size) self.data_file.readinto(a) ptx += size if self.data_file: # close and delete data file after prefetch so we can pickle self.data_file.close() self.data_file = None # @lru_cache(maxsize=8) def __getitem__(self, idx): if isinstance(idx, int): i = idx self.check_index(i) tensor_size = self.sizes[self.dim_offsets[i]:self.dim_offsets[i + 1]] a = np.empty(tensor_size, dtype=self.dtype) ptx = self.cache_index[i] np.copyto(a, self.cache[ptx:ptx + a.size]) return a elif isinstance(idx, slice): # Hack just to make this work, can optimizer later if necessary sents = [] for i in range(*idx.indices(len(self))): sents.append(self[i]) return sents class IndexedDatasetBuilder(object): element_sizes = {np.uint8: 1, np.int8: 1, np.int16: 2, np.int32: 4, np.int64: 8, np.float64: 4, np.double: 8} def __init__(self, out_file, dtype=np.int32): self.out_file = open(out_file, 'wb') self.dtype = dtype self.data_offsets = [0] self.dim_offsets = [0] self.sizes = [] self.element_size = self.element_sizes[self.dtype] self.doc_idx = [0] def add_item(self, tensor): bytes = self.out_file.write(np.array(tensor.numpy(), dtype=self.dtype)) self.data_offsets.append(self.data_offsets[-1] + bytes / self.element_size) for s in tensor.size(): self.sizes.append(s) self.dim_offsets.append(self.dim_offsets[-1] + len(tensor.size())) def end_document(self): self.doc_idx.append(len(self.sizes)) def merge_file_(self, another_file): index = IndexedDataset(another_file) assert index.dtype == self.dtype doc_offset = len(self.sizes) begin = self.data_offsets[-1] for data_offset in index.data_offsets[1:]: self.data_offsets.append(begin + data_offset) self.sizes.extend(index.sizes) begin = self.dim_offsets[-1] for dim_offset in index.dim_offsets[1:]: self.dim_offsets.append(begin + dim_offset) self.doc_idx.extend((doc_offset + index.doc_idx)[1:]) with open(data_file_path(another_file), 'rb') as f: while True: data = f.read(1024) if data: self.out_file.write(data) else: break def finalize(self, index_file): self.out_file.close() index = open(index_file, 'wb') index.write(b'TNTIDX\x00\x00') index.write(struct.pack('<Q', 1)) index.write(struct.pack('<QQ', code(self.dtype), self.element_size)) index.write(struct.pack('<QQ', len(self.data_offsets) - 1, len(self.sizes))) index.write(struct.pack('<Q', len(self.doc_idx))) write_longs(index, self.dim_offsets) write_longs(index, self.data_offsets) write_longs(index, self.sizes) write_longs(index, self.doc_idx) index.close() def _warmup_mmap_file(path): with open(path, 'rb') as stream: while stream.read(100 * 1024 * 1024): pass def exscan_from_cumsum_(arr): # given an array holding the result of an inclusive scan (cumsum), # convert to an exclusive scan (shift to the right) # [10, 30, 35, 50] --> [0, 10, 30, 35] if arr.size > 1: arr[1:] = arr[:-1] if arr.size > 0: arr[0] = 0 def get_pointers_with_total(sizes, elemsize, dtype): """Return a numpy array of type np.dtype giving the byte offsets. Multiplies values in the sizes array by elemsize (bytes), and then computes an exclusive scan to get byte offsets. Returns the total number of bytes as second item in a tuple. """ # scale values in sizes array by elemsize to get sizes in bytes pointers = np.array(sizes, dtype=dtype) pointers *= elemsize np.cumsum(pointers, axis=0, out=pointers) # get total number of bytes from all sizes (last element) bytes_last = pointers[-1] if len(sizes) > 0 else 0 # convert to byte offsets exscan_from_cumsum_(pointers) return pointers, bytes_last class MMapIndexedDataset(torch.utils.data.Dataset): class Index(object): _HDR_MAGIC = b'MMIDIDX\x00\x00' @classmethod def writer(cls, path, dtype): class _Writer(object): def __enter__(self): self._file = open(path, 'wb') self._file.write(cls._HDR_MAGIC) self._file.write(struct.pack('<Q', 1)) self._file.write(struct.pack('<B', code(dtype))) return self @staticmethod def _get_pointers(sizes, npdtype): """Return a numpy array of byte offsets given a list of sizes. Multiplies values in the sizes array by dtype size (bytes), and then computes an exclusive scan to get byte offsets. """ # compute element sizes in bytes pointers, _ = get_pointers_with_total(sizes, dtype().itemsize, npdtype) return pointers def write(self, sizes, doc_idx): self._file.write(struct.pack('<Q', len(sizes))) self._file.write(struct.pack('<Q', len(doc_idx))) sizes32 = np.array(sizes, dtype=np.int32) self._file.write(sizes32.tobytes(order='C')) del sizes32 pointers = self._get_pointers(sizes, np.int64) del sizes self._file.write(pointers.tobytes(order='C')) del pointers doc_idx = np.array(doc_idx, dtype=np.int64) self._file.write(doc_idx.tobytes(order='C')) def __exit__(self, exc_type, exc_val, exc_tb): self._file.close() return _Writer() def __init__(self, path, skip_warmup=False): with open(path, 'rb') as stream: magic_test = stream.read(9) assert self._HDR_MAGIC == magic_test, ('Index file doesn\'t match expected format. ' 'Make sure that --dataset-impl is configured properly.') version = struct.unpack('<Q', stream.read(8)) assert (1, ) == version dtype_code, = struct.unpack('<B', stream.read(1)) self._dtype = dtypes[dtype_code] self._dtype_size = self._dtype().itemsize self._len = struct.unpack('<Q', stream.read(8))[0] self._doc_count = struct.unpack('<Q', stream.read(8))[0] offset = stream.tell() if not skip_warmup: print(" warming up index mmap file...") _warmup_mmap_file(path) self._bin_buffer_mmap = np.memmap(path, mode='r', order='C') self._bin_buffer = memoryview(self._bin_buffer_mmap) print(" reading sizes...") self._sizes = np.frombuffer(self._bin_buffer, dtype=np.int32, count=self._len, offset=offset) print(" reading pointers...") self._pointers = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._len, offset=offset + self._sizes.nbytes) print(" reading document index...") self._doc_idx = np.frombuffer(self._bin_buffer, dtype=np.int64, count=self._doc_count, offset=offset + self._sizes.nbytes + self._pointers.nbytes) def __del__(self): self._bin_buffer_mmap._mmap.close() del self._bin_buffer_mmap @property def dtype(self): return self._dtype @property def sizes(self): return self._sizes @property def doc_idx(self): return self._doc_idx @lru_cache(maxsize=8) def __getitem__(self, i): return self._pointers[i], self._sizes[i] def __len__(self): return self._len def __init__(self, path, skip_warmup=False): super().__init__() self._path = None self._index = None self._bin_buffer = None self._do_init(path, skip_warmup) def __getstate__(self): return self._path def __setstate__(self, state): self._do_init(state) def _do_init(self, path, skip_warmup): self._path = path self._index = self.Index(index_file_path(self._path), skip_warmup) if not skip_warmup: print(" warming up data mmap file...") _warmup_mmap_file(data_file_path(self._path)) print(" creating numpy buffer of mmap...") self._bin_buffer_mmap = np.memmap(data_file_path(self._path), mode='r', order='C') print(" creating memory view of numpy buffer...") self._bin_buffer = memoryview(self._bin_buffer_mmap) def __del__(self): self._bin_buffer_mmap._mmap.close() del self._bin_buffer_mmap del self._index def __len__(self): return len(self._index) # @lru_cache(maxsize=8) def __getitem__(self, idx): if isinstance(idx, int): ptr, size = self._index[idx] np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=size, offset=ptr) return np_array elif isinstance(idx, slice): start, stop, step = idx.indices(len(self)) if step != 1: raise ValueError("Slices into indexed_dataset must be contiguous") ptr = self._index._pointers[start] sizes = self._index._sizes[idx] offsets = list(accumulate(sizes)) total_size = sum(sizes) np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=total_size, offset=ptr) sents = np.split(np_array, offsets[:-1]) return sents def get(self, idx, offset=0, length=None): """ Retrieves a single item from the dataset with the option to only return a portion of the item. get(idx) is the same as [idx] but get() does not support slicing. """ ptr, size = self._index[idx] if length is None: length = size - offset ptr += offset * np.dtype(self._index.dtype).itemsize np_array = np.frombuffer(self._bin_buffer, dtype=self._index.dtype, count=length, offset=ptr) return np_array @property def sizes(self): return self._index.sizes def size(self, index): return self._index.sizes[index] @property def doc_idx(self): return self._index.doc_idx def get_doc_idx(self): return self._index._doc_idx def set_doc_idx(self, doc_idx_): self._index._doc_idx = doc_idx_ @property def supports_prefetch(self): return False @staticmethod def exists(path): return (os.path.exists(index_file_path(path)) and os.path.exists(data_file_path(path))) @property def dtype(self): return self._index.dtype class MMapIndexedDatasetBuilder(object): def __init__(self, out_file, dtype=np.int64): self._data_file = open(out_file, 'wb') self._dtype = dtype self._sizes = [] self._doc_idx = [0] def add_item(self, tensor): np_array = np.array(tensor.numpy(), dtype=self._dtype) self._data_file.write(np_array.tobytes(order='C')) self._sizes.append(np_array.size) def add_item_numpy(self, np_array): if np_array.dtype != self._dtype: np_array = np_array.astype(self._dtype) self._data_file.write(np_array.tobytes(order='C')) self._sizes.append(np_array.size) def end_document(self): self._doc_idx.append(len(self._sizes)) def merge_file_(self, another_file): # Concatenate index index = MMapIndexedDataset.Index(index_file_path(another_file)) assert index.dtype == self._dtype total_len = len(index.sizes) + len(self._sizes) print(f" concat {another_file} size={len(index.sizes)} for a total size of {total_len}") offset = len(self._sizes) self._sizes.extend(index.sizes) self._doc_idx.extend((offset + index.doc_idx)[1:]) # Concatenate data with open(data_file_path(another_file), 'rb') as f: shutil.copyfileobj(f, self._data_file) def finalize(self, index_file): self._data_file.close() with MMapIndexedDataset.Index.writer(index_file, self._dtype) as index: index.write(self._sizes, self._doc_idx)
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os from collections import defaultdict import csv import time from multiprocessing import Process, Manager import numpy as np import torch from torch.utils.data import BatchSampler, SequentialSampler, DataLoader, Subset from deepspeed.utils import logger from .indexed_dataset import MMapIndexedDataset from .utils import split_dataset, split_index, create_mmap_dataset_builder, close_mmap_dataset_builder, find_fit_int_dtype class DataAnalyzer(object): def __init__(self, dataset, num_workers=1, worker_id=0, num_threads=1, num_threads_reduce=1, specific_threads=[], batch_size=1, metric_names=[], metric_functions=[], metric_types=[], metric_dtypes=[], save_path="./", collate_fn=None, custom_map_init=None, custom_map_update=None, custom_map_finalize=None, custom_reduce=None): super().__init__() self.dataset = dataset self.num_workers = num_workers self.worker_id = worker_id self.num_threads = num_threads self.num_threads_reduce = num_threads_reduce self.specific_threads = specific_threads self.batch_size = batch_size self.metric_names = metric_names self.metric_functions = metric_functions self.metric_types = metric_types self.metric_dtypes = metric_dtypes self.save_path = save_path self.collate_fn = collate_fn self.custom_map_init = custom_map_init self.custom_map_update = custom_map_update self.custom_map_finalize = custom_map_finalize self.custom_reduce = custom_reduce def init_metric_results(self, thread_id, metric_names, metric_types, metric_dtypes, save_path, worker_id): metric_results = [] for m_idx in range(len(metric_names)): metric_name, metric_type, metric_dtype = metric_names[m_idx], \ metric_types[m_idx], metric_dtypes[m_idx] assert metric_dtype not in [ np.float64, np.double ], "Currently floating point metric values are not supported. Please change your metric into integer values (and potentially multiply a larger coefficient to keep the precision)." metric_save_path = f"{save_path}/{metric_name}/worker{worker_id}_thread{thread_id}/" os.makedirs(metric_save_path, exist_ok=True) if metric_type == 'single_value_per_sample': sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric" sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_dtype) metric_to_sample_fname = f"{metric_save_path}/{metric_name}_metric_to_sample" os.system(f"rm -rf {metric_to_sample_fname}*") metric_to_sample_dict = defaultdict(list) metric_results.append({ "sample_to_metric_fname": sample_to_metric_fname, "sample_to_metric_builder": sample_to_metric_builder, "metric_to_sample_fname": metric_to_sample_fname, "metric_to_sample_dict": metric_to_sample_dict }) elif metric_type == 'accumulate_value_over_samples': metric_value = None metric_value_fname = f"{metric_save_path}/{metric_name}_metric_value" metric_results.append({"metric_value": metric_value, "metric_value_fname": metric_value_fname}) return metric_results def update_metric_results(self, data, metric_types, metric_functions, metric_results): for m_idx in range(len(metric_types)): metric_type, metric_function, metric_result = metric_types[m_idx], \ metric_functions[m_idx], metric_results[m_idx] if metric_type == 'single_value_per_sample': metric_values = metric_function(data) for row in range(metric_values.size()[0]): metric_result["sample_to_metric_builder"].add_item(metric_values[row].reshape(-1)) metric_result["metric_to_sample_dict"][metric_values[row].item()].append( data['index'][row][0].item()) for m_value in metric_result["metric_to_sample_dict"]: if len(metric_result["metric_to_sample_dict"][m_value]) > 100: metric_fname = metric_result["metric_to_sample_fname"] with open(f"{metric_fname}_{m_value}.csv", 'a') as f: writer = csv.writer(f) writer.writerows([metric_result["metric_to_sample_dict"][m_value]]) metric_result["metric_to_sample_dict"][m_value] = [] elif metric_type == 'accumulate_value_over_samples': metric_values = metric_function(data) if metric_result["metric_value"] is None: metric_result["metric_value"] = metric_values else: metric_result["metric_value"].add_(metric_values) def finalize_metric_results(self, metric_types, metric_dtypes, metric_results): for m_idx in range(len(metric_types)): metric_type, metric_dtype, metric_result = metric_types[m_idx], \ metric_dtypes[m_idx], metric_results[m_idx] if metric_type == 'single_value_per_sample': metric_fname = metric_result["sample_to_metric_fname"] close_mmap_dataset_builder(metric_result["sample_to_metric_builder"], metric_fname) for m_value in metric_result["metric_to_sample_dict"]: if len(metric_result["metric_to_sample_dict"][m_value]) > 0: metric_fname = metric_result["metric_to_sample_fname"] with open(f"{metric_fname}_{m_value}.csv", 'a') as f: writer = csv.writer(f) writer.writerows([metric_result["metric_to_sample_dict"][m_value]]) metric_result["metric_to_sample_dict"][m_value] = [] elif metric_type == 'accumulate_value_over_samples': if metric_result["metric_value"] is not None: metric_value_builder = create_mmap_dataset_builder(metric_result["metric_value_fname"], metric_dtype) metric_value_builder.add_item(metric_result["metric_value"].reshape(-1)) close_mmap_dataset_builder(metric_value_builder, metric_result["metric_value_fname"]) def run_map_helper(self, thread_id): start_idx, end_idx = self.thread_splits[thread_id][0], \ self.thread_splits[thread_id][1] logger.info(f"worker {self.worker_id} thread {thread_id}: start working " \ f"on data subset {start_idx} to {end_idx}") thread_dataset = Subset(self.dataset, list(range(start_idx, end_idx))) sampler = BatchSampler(SequentialSampler(thread_dataset), batch_size=self.batch_size, drop_last=False) if self.collate_fn is None: iterator = iter(DataLoader(thread_dataset, batch_sampler=sampler, num_workers=0, pin_memory=False)) else: iterator = iter( DataLoader(thread_dataset, batch_sampler=sampler, num_workers=0, collate_fn=self.collate_fn, pin_memory=False)) if self.custom_map_init is None: metric_results = self.init_metric_results(thread_id, self.metric_names, self.metric_types, self.metric_dtypes, self.save_path, self.worker_id) else: metric_results = self.custom_map_init(thread_id, self.metric_names, self.metric_types, self.metric_dtypes, self.save_path, self.worker_id) total_sample = len(thread_dataset) processed_sample = 0 start = time.time() while True: try: data = next(iterator) if self.custom_map_update is None: self.update_metric_results(data, self.metric_types, self.metric_functions, metric_results) else: self.custom_map_update(data, self.metric_types, self.metric_functions, metric_results) processed_sample += self.batch_size duration = (time.time() - start) / 3600.0 remain_duration = duration * total_sample / processed_sample - duration logger.info( f"worker {self.worker_id} thread {thread_id}: {processed_sample} " \ f"out of {total_sample} processed in {duration:.2f} hr, " \ f"estimated to finish in {remain_duration:.2f} hr") except StopIteration: logger.info(f"worker {self.worker_id} thread {thread_id}: reach end of file") break if self.custom_map_finalize is None: self.finalize_metric_results(self.metric_types, self.metric_dtypes, metric_results) else: self.custom_map_finalize(self.metric_types, self.metric_dtypes, metric_results) logger.info(f"worker {self.worker_id} thread {thread_id}: finished") def run_map(self): self.worker_splits, self.thread_splits = split_dataset(self.dataset, self.num_workers, self.worker_id, self.num_threads) if len(self.specific_threads) > 0: threads_to_run = self.specific_threads else: threads_to_run = list(range(self.num_threads)) if self.num_threads > 1: p = [] for thread in threads_to_run: p.append(Process(target=self.run_map_helper, args=(thread, ))) p[thread].start() for thread in threads_to_run: p[thread].join() else: assert self.num_threads == 1 self.run_map_helper(0) def get_metric_value_percentiles(self, metric_name, num_sample_per_value, total_num_samples): logger.info(f"Checking the value percentiles of metric {metric_name}...") processed_samples = 0 current_percentile = 5 for key in sorted(num_sample_per_value.keys()): processed_samples += num_sample_per_value[key] if processed_samples >= total_num_samples * current_percentile / 100.0: logger.info(f"Metric {metric_name} {current_percentile}th percentile: {key}") current_percentile += 5 def merge_gather_map_stats(self, num_workers, num_threads, num_threads_reduce, t_idx_reduce, metric_save_path, metric_name, return_dict): results = [] for w_idx in range(num_workers): for t_idx in range(num_threads): if (w_idx * num_threads + t_idx) % num_threads_reduce == t_idx_reduce: w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/" w_sample_to_metric_fname = f"{w_metric_save_path}/{metric_name}_sample_to_metric" w_sample_to_metric = MMapIndexedDataset(w_sample_to_metric_fname, skip_warmup=True) unique_v = list(np.unique(w_sample_to_metric)) sample_to_metric_count = len(w_sample_to_metric) logger.info(f"Finished gathering map stats from worker {w_idx} thread {t_idx}.") results.append([unique_v, sample_to_metric_count]) return_dict[t_idx_reduce] = results def merge_sample_to_metric(self, t_idx_reduce, metric_save_path, metric_name, metric_value_dtype, map_worker_thread): sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric_thread{t_idx_reduce}" sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_value_dtype) for w_t in map_worker_thread: w_metric_save_path = f"{metric_save_path}/worker{w_t[0]}_thread{w_t[1]}/" w_sample_to_metric_fname = f"{w_metric_save_path}/{metric_name}_sample_to_metric" w_data = MMapIndexedDataset(w_sample_to_metric_fname, skip_warmup=True) for row in range(len(w_data)): sample_to_metric_builder.add_item(torch.tensor(w_data[row].astype(np.int64), dtype=torch.long)) logger.info(f"Finished merge_sample_to_metric from worker {w_t[0]} thread {w_t[1]}.") close_mmap_dataset_builder(sample_to_metric_builder, sample_to_metric_fname) def merge_metric_to_sample(self, t_idx_reduce, metric_save_path, metric_name, sample_idx_dtype, metric_value_dtype, unique_metric_values, num_workers, num_threads): index_to_sample_fname = f"{metric_save_path}/{metric_name}_index_to_sample_thread{t_idx_reduce}" index_to_sample_builder = create_mmap_dataset_builder(index_to_sample_fname, sample_idx_dtype) index_to_metric_fname = f"{metric_save_path}/{metric_name}_index_to_metric_thread{t_idx_reduce}" index_to_metric_builder = create_mmap_dataset_builder(index_to_metric_fname, metric_value_dtype) for unique_v in unique_metric_values: samples = [] for w_idx in range(num_workers): for t_idx in range(num_threads): w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/" w_metric_to_sample_fname = f"{w_metric_save_path}/{metric_name}_metric_to_sample_{unique_v}.csv" if os.path.isfile(w_metric_to_sample_fname): with open(w_metric_to_sample_fname, 'r') as f: datareader = csv.reader(f) for row in datareader: samples += [int(x) for x in row] index_to_sample_builder.add_item(torch.tensor(samples, dtype=torch.long)) index_to_metric_builder.add_item(torch.tensor([unique_v], dtype=torch.long)) logger.info(f"Finished reducing metric {metric_name} value {unique_v}.") close_mmap_dataset_builder(index_to_sample_builder, index_to_sample_fname) close_mmap_dataset_builder(index_to_metric_builder, index_to_metric_fname) def merge_map_results(self, dataset, metric_names, metric_types, save_path, num_workers, num_threads, num_threads_reduce): total_num_samples = len(dataset) sample_idx_dtype = find_fit_int_dtype(0, total_num_samples - 1) logger.info( f"Total number of data samples: {total_num_samples}. Will use {sample_idx_dtype} to store the sample indexes." ) for m_idx in range(len(metric_names)): metric_name, metric_type = metric_names[m_idx], metric_types[m_idx] if metric_type == 'single_value_per_sample': metric_save_path = f"{save_path}/{metric_name}/" sample_to_metric_count = 0 unique_metric_values = set([]) manager = Manager() return_dict = manager.dict() p = [] for t_idx_reduce in range(num_threads_reduce): p.append( Process(target=self.merge_gather_map_stats, args=( num_workers, num_threads, num_threads_reduce, t_idx_reduce, metric_save_path, metric_name, return_dict, ))) p[t_idx_reduce].start() for t_idx_reduce in range(num_threads_reduce): p[t_idx_reduce].join() for t_idx_reduce in range(num_threads_reduce): results = return_dict[t_idx_reduce] for res in results: unique_metric_values = unique_metric_values.union(set(res[0])) sample_to_metric_count += res[1] value_max = max(unique_metric_values) value_min = min(unique_metric_values) assert sample_to_metric_count == total_num_samples, "The number of samples in map result files are not correct. It's possible that some map worker didn't finish successfully." metric_value_dtype = find_fit_int_dtype(value_min, value_max) logger.info( f"Metric {metric_name} has values between {value_min} and {value_max}. Will use {metric_value_dtype} to store the metric values." ) # sample_to_metric map_worker_thread = [] for w_idx in range(num_workers): for t_idx in range(num_threads): map_worker_thread.append([w_idx, t_idx]) thread_splits = split_index(0, len(map_worker_thread), num_threads_reduce) p = [] for t_idx_reduce in range(num_threads_reduce): start_idx, end_idx = thread_splits[t_idx_reduce][0], thread_splits[t_idx_reduce][1] p.append( Process(target=self.merge_sample_to_metric, args=( t_idx_reduce, metric_save_path, metric_name, metric_value_dtype, map_worker_thread[start_idx:end_idx], ))) p[t_idx_reduce].start() for t_idx_reduce in range(num_threads_reduce): p[t_idx_reduce].join() sample_to_metric_fname = f"{metric_save_path}/{metric_name}_sample_to_metric" sample_to_metric_builder = create_mmap_dataset_builder(sample_to_metric_fname, metric_value_dtype) for t_idx_reduce in range(num_threads_reduce): chunk_fname = f"{metric_save_path}/{metric_name}_sample_to_metric_thread{t_idx_reduce}" logger.info(f"Merging file {chunk_fname}") sample_to_metric_builder.merge_file_(chunk_fname) close_mmap_dataset_builder(sample_to_metric_builder, sample_to_metric_fname) sample_to_metric = MMapIndexedDataset(sample_to_metric_fname, skip_warmup=True) assert len(sample_to_metric) == total_num_samples # metric_to_sample unique_metric_values = list(sorted(unique_metric_values)) thread_splits = split_index(0, len(unique_metric_values), num_threads_reduce) p = [] for t_idx_reduce in range(num_threads_reduce): start_idx, end_idx = thread_splits[t_idx_reduce][0], thread_splits[t_idx_reduce][1] p.append( Process(target=self.merge_metric_to_sample, args=( t_idx_reduce, metric_save_path, metric_name, sample_idx_dtype, metric_value_dtype, unique_metric_values[start_idx:end_idx], num_workers, num_threads, ))) p[t_idx_reduce].start() for t_idx_reduce in range(num_threads_reduce): p[t_idx_reduce].join() index_to_sample_fname = f"{metric_save_path}/{metric_name}_index_to_sample" index_to_sample_builder = create_mmap_dataset_builder(index_to_sample_fname, sample_idx_dtype) index_to_metric_fname = f"{metric_save_path}/{metric_name}_index_to_metric" index_to_metric_builder = create_mmap_dataset_builder(index_to_metric_fname, metric_value_dtype) for t_idx_reduce in range(num_threads_reduce): chunk_is_fname = f"{metric_save_path}/{metric_name}_index_to_sample_thread{t_idx_reduce}" logger.info(f"Merging file {chunk_is_fname}") index_to_sample_builder.merge_file_(chunk_is_fname) chunk_im_fname = f"{metric_save_path}/{metric_name}_index_to_metric_thread{t_idx_reduce}" logger.info(f"Merging file {chunk_im_fname}") index_to_metric_builder.merge_file_(chunk_im_fname) close_mmap_dataset_builder(index_to_sample_builder, index_to_sample_fname) close_mmap_dataset_builder(index_to_metric_builder, index_to_metric_fname) num_sample_per_value = {} index_to_sample = MMapIndexedDataset(index_to_sample_fname, skip_warmup=True) index_to_metric = MMapIndexedDataset(index_to_metric_fname, skip_warmup=True) index_to_sample_merged_fname = f"{metric_save_path}/{metric_name}_index_to_sample_percentile_merged" index_to_sample_merged_builder = create_mmap_dataset_builder(index_to_sample_merged_fname, sample_idx_dtype) for v_idx in range(len(index_to_sample)): if v_idx > 0: assert index_to_metric[v_idx] > index_to_metric[v_idx - 1] num_sample_per_value[index_to_metric[v_idx][0]] = len(index_to_sample[v_idx]) assert sum(num_sample_per_value.values()) == total_num_samples merge_step = len(index_to_sample) // 100 for v_idx in range(0, len(index_to_sample), merge_step): merged_samples = np.copy( np.concatenate(index_to_sample[v_idx:min(len(index_to_sample), (v_idx + merge_step))], axis=None)) index_to_sample_merged_builder.add_item( torch.tensor(merged_samples.astype(np.int64), dtype=torch.long)) logger.info(f"Finished merging index_to_sample {v_idx} to {v_idx+merge_step}.") close_mmap_dataset_builder(index_to_sample_merged_builder, index_to_sample_merged_fname) self.get_metric_value_percentiles(metric_name, num_sample_per_value, total_num_samples) elif metric_type == 'accumulate_value_over_samples': metric_save_path = f"{save_path}/{metric_name}/" metric_value = None for w_idx in range(num_workers): for t_idx in range(num_threads): w_metric_save_path = f"{metric_save_path}/worker{w_idx}_thread{t_idx}/" w_metric_value_fname = f"{w_metric_save_path}/{metric_name}_metric_value" w_metric_value = MMapIndexedDataset(w_metric_value_fname, skip_warmup=True) if metric_value is None: metric_value = np.copy(w_metric_value[0]) else: metric_value += np.copy(w_metric_value[0]) value_max = int(max(metric_value)) value_min = int(min(metric_value)) metric_value_dtype = find_fit_int_dtype(value_min, value_max) metric_value_fname = f"{metric_save_path}/{metric_name}_metric_value" metric_value_builder = create_mmap_dataset_builder(metric_value_fname, metric_value_dtype) metric_value_builder.add_item(torch.tensor(metric_value.astype(np.int64), dtype=torch.long)) close_mmap_dataset_builder(metric_value_builder, metric_value_fname) def run_reduce(self): if self.custom_reduce is None: self.merge_map_results(self.dataset, self.metric_names, self.metric_types, self.save_path, self.num_workers, self.num_threads, self.num_threads_reduce) else: self.custom_reduce(self.dataset, self.metric_names, self.metric_types, self.save_path, self.num_workers, self.num_threads, self.num_threads_reduce)
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ coding=utf-8 Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Part of this code was adopted from https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/data/data_samplers.py """ import torch import os import numpy as np import deepspeed.comm as dist from deepspeed.utils import logger from deepspeed.accelerator import get_accelerator from ..constants import * from ..curriculum_scheduler import CurriculumScheduler from .indexed_dataset import MMapIndexedDataset from .utils import create_mmap_dataset_builder, close_mmap_dataset_builder, find_fit_int_dtype class DeepSpeedDataSampler(object): def __init__(self, data_efficiency_config, one_epoch_total_samples, micro_batch_size, data_parallel_rank, data_parallel_size, data_parallel_group, gradient_accumulation_steps, global_rank, drop_last=True): # Keep a copy of input params for later use. self.data_efficiency_config = data_efficiency_config self.one_epoch_total_samples = one_epoch_total_samples self.index_dtype = find_fit_int_dtype(0, one_epoch_total_samples) self.total_samples = one_epoch_total_samples * self.data_efficiency_config[DATA_SAMPLING][ DATA_SAMPLING_NUM_EPOCHS] self.micro_batch_size = micro_batch_size self.data_parallel_rank = data_parallel_rank self.data_parallel_group = data_parallel_group self.micro_batch_times_data_parallel_size = \ self.micro_batch_size * data_parallel_size self.gradient_accumulation_steps = gradient_accumulation_steps self.global_batch_size = self.micro_batch_times_data_parallel_size * \ self.gradient_accumulation_steps self.global_rank = global_rank self.drop_last = drop_last self.np_rng = np.random.default_rng(self.data_efficiency_config[DATA_EFFICIENCY_SEED]) self.state = {} self.batch = [] self.consumed_samples = 0 if self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_ENABLED]: self.curriculum_step = 0 self.current_difficulties = {} self.data_cluster_paths = [] self.data_cluster_current_position = [] self.curriculum_schedulers = {} self.curriculum_index_to_sample = {} self.curriculum_index_to_metric = {} self.difficulty_type = {} self.clustering_type = {} self.data_1epoch_size = None if self.global_rank == 0: self.data_clusters = [] self.data_cluster_sizes = [] cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ CURRICULUM_LEARNING_CLUSTER_PATH] if not os.path.exists(cluster_path): os.makedirs(cluster_path) for metric in self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS]: self.curriculum_schedulers[metric] = CurriculumScheduler( data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS][metric]) self.difficulty_type[metric] = data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ CURRICULUM_LEARNING_METRICS][metric][CURRICULUM_LEARNING_DIFFICULTY_TYPE] self.clustering_type[metric] = data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ CURRICULUM_LEARNING_METRICS][metric][CURRICULUM_LEARNING_CLUSTERING_TYPE] if self.global_rank == 0: if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER: self.curriculum_index_to_sample[metric] = MMapIndexedDataset( data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS] [metric][CURRICULUM_LEARNING_SAMPLE_PATH], skip_warmup=True) if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED: self.curriculum_index_to_metric[metric] = MMapIndexedDataset( data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS] [metric][CURRICULUM_LEARNING_METRIC_PATH], skip_warmup=True) # Sanity checks. assert self.total_samples > 0, \ 'no sample to consume: {}'.format(self.total_samples) assert self.micro_batch_size > 0 assert data_parallel_size > 0 assert self.data_parallel_rank < data_parallel_size, \ 'data_parallel_rank should be smaller than data size: {}, ' \ '{}'.format(self.data_parallel_rank, data_parallel_size) def __len__(self): return self.total_samples def set_custom_curriculum_learning_schedule(self, schedule_func_dict): for metric in self.curriculum_schedulers: if metric in schedule_func_dict: self.curriculum_schedulers[metric].set_custom_get_difficulty(schedule_func_dict[metric]) def get_start_end_idx(self): start_idx = self.data_parallel_rank * self.micro_batch_size end_idx = start_idx + self.micro_batch_size return start_idx, end_idx def get_sample_based_on_metric_value(self, metric, value_start, value_end): new_samples = None for row in range(len(self.curriculum_index_to_sample[metric])): if self.curriculum_index_to_metric[metric][row] <= value_end and self.curriculum_index_to_metric[metric][ row] > value_start: row_samples = np.copy(self.curriculum_index_to_sample[metric][row]) new_samples = row_samples if new_samples is None else np.concatenate( (new_samples, row_samples), axis=None) return new_samples def get_sample_based_on_metric_percentile(self, metric, percentile_start, percentile_end): new_samples = None if self.data_1epoch_size is None: self.data_1epoch_size = sum(len(x) for x in self.curriculum_index_to_sample[metric]) max_percentile = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_METRICS][ metric][CURRICULUM_LEARNING_MAX_DIFFICULTY] sample_per_percentile = self.data_1epoch_size // max_percentile start_count = sample_per_percentile * percentile_start end_count = sample_per_percentile * percentile_end if percentile_end == max_percentile: end_count = self.data_1epoch_size current_count = 0 for row in range(len(self.curriculum_index_to_sample[metric])): row_size = len(self.curriculum_index_to_sample[metric][row]) if current_count + row_size > start_count: row_start = max(0, start_count - current_count) if current_count + row_size <= end_count: row_end = row_size else: row_end = end_count - current_count row_samples = np.copy(self.curriculum_index_to_sample[metric][row][row_start:row_end]) new_samples = row_samples if new_samples is None else np.concatenate( (new_samples, row_samples), axis=None) current_count += row_size if current_count >= end_count: break return new_samples def get_new_cluster(self, previous_difficulties): cluster_fname = CURRICULUM_LEARNING_CLUSTER_PREFIX for metric in self.curriculum_schedulers: cluster_fname = f"{cluster_fname}_{metric}{self.current_difficulties[metric]}" cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ CURRICULUM_LEARNING_CLUSTER_PATH] cluster_path = f"{cluster_path}/{cluster_fname}" if self.global_rank == 0: new_cluster = None need_clustering = 0 for metric in self.clustering_type: if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER: need_clustering += 1 if need_clustering > 1: for metric in self.curriculum_schedulers: if self.clustering_type[metric] == CURRICULUM_LEARNING_SINGLE_CLUSTER: metric_cluster = np.arange(start=0, stop=self.one_epoch_total_samples, step=1, dtype=self.index_dtype) else: if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED: metric_cluster = self.get_sample_based_on_metric_value(metric, float('-inf'), self.current_difficulties[metric]) elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED: metric_cluster = self.get_sample_based_on_metric_percentile( metric, 0, self.current_difficulties[metric]) new_cluster = metric_cluster if new_cluster is None else \ np.intersect1d(new_cluster, metric_cluster, assume_unique=True) for cluster in self.data_clusters: new_cluster = np.setdiff1d(new_cluster, cluster[0], assume_unique=True) else: if len(self.data_clusters) == 0: new_cluster = np.arange(start=0, stop=self.one_epoch_total_samples, step=1, dtype=self.index_dtype) for metric in self.curriculum_schedulers: if self.clustering_type[metric] != CURRICULUM_LEARNING_SINGLE_CLUSTER: if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED: new_cluster = self.get_sample_based_on_metric_value(metric, previous_difficulties[metric], self.current_difficulties[metric]) elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED: new_cluster = self.get_sample_based_on_metric_percentile( metric, previous_difficulties[metric], self.current_difficulties[metric]) if new_cluster is not None and len(new_cluster) > 0: logger.info( f"new data cluster (previous_difficulties {previous_difficulties}, current_difficulties {self.current_difficulties}) with size {len(new_cluster)} generated." ) self.np_rng.shuffle(new_cluster) cluster_builder = create_mmap_dataset_builder(cluster_path, self.index_dtype) cluster_builder.add_item_numpy(new_cluster) close_mmap_dataset_builder(cluster_builder, cluster_path) self.data_clusters.append(MMapIndexedDataset(cluster_path, skip_warmup=True)) self.data_cluster_sizes.append(len(self.data_clusters[-1][0])) else: logger.info( f"new data cluster (previous_difficulties {previous_difficulties}, current_difficulties {self.current_difficulties}) has no matched data thus skipped." ) dist.barrier(group=self.data_parallel_group) if os.path.isfile(f"{cluster_path}.bin"): self.data_cluster_paths.append(cluster_fname) self.data_cluster_current_position.append(0) def sample_from_clusters(self): num_clusters = len(self.data_clusters) weight_sum = sum(self.data_cluster_sizes) weights = [x / weight_sum for x in self.data_cluster_sizes] samples = self.np_rng.choice(num_clusters, self.global_batch_size, replace=True, p=weights) samples = np.bincount(samples, minlength=num_clusters) return samples def reshuffle_clusters(self, cidx): cluster_fname = self.data_cluster_paths[cidx] cluster_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ CURRICULUM_LEARNING_CLUSTER_PATH] cluster_path = f"{cluster_path}/{cluster_fname}" cluster = np.copy(self.data_clusters[cidx][0]) self.np_rng.shuffle(cluster) cluster_builder = create_mmap_dataset_builder(cluster_path, self.index_dtype) cluster_builder.add_item_numpy(cluster) close_mmap_dataset_builder(cluster_builder, cluster_path) self.data_clusters[cidx] = MMapIndexedDataset(cluster_path, skip_warmup=True) def get_sample_from_cluster(self, cidx, num_samples): start_idx = self.data_cluster_current_position[cidx] samples = list(np.copy(self.data_clusters[cidx][0][start_idx:(start_idx + num_samples)])) self.data_cluster_current_position[cidx] += num_samples if len(samples) < num_samples: num_samples_remained = num_samples - len(samples) logger.info(f"reshuffling cluster {cidx}.") self.reshuffle_clusters(cidx) samples += list(np.copy(self.data_clusters[cidx][0][:num_samples_remained])) self.data_cluster_current_position[cidx] = num_samples_remained return samples def get_next_global_batch(self): if self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][CURRICULUM_LEARNING_ENABLED]: self.curriculum_step += 1 new_cluster = False previous_difficulties = {} for metric in self.curriculum_schedulers: next_difficulty = self.curriculum_schedulers[metric].update_difficulty(self.curriculum_step) if metric not in self.current_difficulties or \ next_difficulty != self.current_difficulties[metric]: new_cluster = True if metric in self.current_difficulties: previous_difficulties[metric] = self.current_difficulties[metric] else: if self.difficulty_type[metric] == CURRICULUM_LEARNING_VALUE_BASED: previous_difficulties[metric] = float('-inf') elif self.difficulty_type[metric] == CURRICULUM_LEARNING_PERCENTILE_BASED: previous_difficulties[metric] = 0 self.current_difficulties[metric] = next_difficulty if new_cluster: self.get_new_cluster(previous_difficulties) if self.global_rank == 0: samples_per_cluster = self.sample_from_clusters() batch = [] for cidx in range(len(samples_per_cluster)): batch += self.get_sample_from_cluster(cidx, samples_per_cluster[cidx]) self.np_rng.shuffle(batch) batch = torch.tensor(batch, device=get_accelerator().current_device_name(), dtype=torch.long).view(-1) else: batch = torch.empty(self.global_batch_size, device=get_accelerator().current_device_name(), dtype=torch.long) dist.broadcast(batch, 0, group=self.data_parallel_group) self.batch = batch.tolist() def __iter__(self): while self.consumed_samples <= self.total_samples: if len(self.batch) == 0: self.get_next_global_batch() current_batch = self.batch[:self.micro_batch_times_data_parallel_size] self.batch = self.batch[self.micro_batch_times_data_parallel_size:] if len(current_batch) == self.micro_batch_times_data_parallel_size or \ (len(current_batch) > 0 and not self.drop_last): start_idx, end_idx = self.get_start_end_idx() yield current_batch[start_idx:end_idx] self.consumed_samples += len(current_batch) current_batch = [] def state_dict(self): return { CURRICULUM_LEARNING_BATCH: self.batch, CURRICULUM_LEARNING_CONSUMED_SAMPLES: self.consumed_samples, CURRICULUM_LEARNING_STEP: self.curriculum_step, CURRICULUM_LEARNING_CURRENT_DIFFICULTIES: self.current_difficulties, CURRICULUM_LEARNING_DATA_CLUSTER_PATHS: self.data_cluster_paths, CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION: self.data_cluster_current_position, CURRICULUM_LEARNING_NP_RNG_STATE: np.random.get_state() } def load_state_dict(self, state_dict): self.batch = state_dict[CURRICULUM_LEARNING_BATCH] self.consumed_samples = state_dict[CURRICULUM_LEARNING_CONSUMED_SAMPLES] self.curriculum_step = state_dict[CURRICULUM_LEARNING_STEP] self.current_difficulties = state_dict[CURRICULUM_LEARNING_CURRENT_DIFFICULTIES] self.data_cluster_paths = state_dict[CURRICULUM_LEARNING_DATA_CLUSTER_PATHS] self.data_cluster_current_position = state_dict[CURRICULUM_LEARNING_DATA_CLUSTER_CURRENT_POSITION] np.random.set_state(state_dict[CURRICULUM_LEARNING_NP_RNG_STATE]) cluster_root_path = self.data_efficiency_config[DATA_SAMPLING][CURRICULUM_LEARNING][ CURRICULUM_LEARNING_CLUSTER_PATH] # Backward compatibility: previously data_cluster_paths were stored as # absolute paths. Now we changed it to just the file name so that even # if user moved the cluster files, the checkpoint loading still works # as long as user set the correct new CURRICULUM_LEARNING_CLUSTER_PATH # in deepspeed json config. for idx in range(len(self.data_cluster_paths)): if '/' in self.data_cluster_paths[idx]: self.data_cluster_paths[idx] = self.data_cluster_paths[idx].split('/')[-1] if self.global_rank == 0: for cluster_fname in self.data_cluster_paths: cluster_path = f"{cluster_root_path}/{cluster_fname}" self.data_clusters.append(MMapIndexedDataset(cluster_path, skip_warmup=True)) self.data_cluster_sizes.append(len(self.data_clusters[-1][0]))
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from deepspeed.utils import logger from torch import Tensor from torch.nn import Module from ..constants import * from deepspeed.ops.random_ltd.dropping_utils import gpt_sample_tokens, bert_sample_tokens, GatherTokens, ScatterTokens #####based on the paper random-ltd: https://arxiv.org/abs/2211.11586 class RandomLayerTokenDrop(Module): """ A layer wrapper for random LTD """ def __init__(self, layer: Module): super(RandomLayerTokenDrop, self).__init__() self.random_ltd_layer = layer self.reserved_length = None #config['max_value'] self.random_ltd_scheduler = None self.max_length = None self.reserved_length = -1 self.curr_seq = -1 self.batch_first = False def init_config(self, config, scheduler, random_ltd_layer_id): self.random_ltd_scheduler = scheduler self.random_ltd_layer_id = random_ltd_layer_id self.max_length = self.random_ltd_scheduler.state[RANDOM_LTD_MAX_VALUE] self.mask_name = config[RANDOM_LTD_MODEL_MASK_NAME] self.micro_bs = config[RANDOM_LTD_MICRO_BATCH_SIZE] self.random_ltd_num_layer = self.random_ltd_scheduler.random_ltd_layer_num hs_order = config[RANDOM_LTD_HIDDEN_STATE_ORDER] self.model_type = config[RANDOM_LTD_MODEL_TYPE] if hs_order == 'batch_seq_dim': self.get_hidden_tensor_shape = self.get_bsh self.batch_first = True elif hs_order == 'seq_batch_dim': self.get_hidden_tensor_shape = self.get_sbh self.batch_first = False else: logger.warning( "************For now, we only support batch_seq_dim or seq_batch_dim inputs. You can easily \ your own input dimension orders************") raise NotImplementedError if self.model_type == 'encoder': self.index_generator = bert_sample_tokens elif self.model_type == 'decoder': self.index_generator = gpt_sample_tokens else: logger.warning("************For now, we only support encoder-only or decoder-only models************") raise NotImplementedError def get_bsh(self, hidden_stats): self.curr_seq, self.curr_micro_batch = hidden_stats.size()[1], hidden_stats.size()[0] def get_sbh(self, hidden_stats): self.curr_seq, self.curr_micro_batch = hidden_stats.size()[0], hidden_stats.size()[1] def forward(self, hidden_states, **kwargs) -> Tensor: if self.random_ltd_scheduler is not None: self.reserved_length = self.random_ltd_scheduler.get_current_seq() self.get_hidden_tensor_shape(hidden_states) if self.training and self.random_ltd_scheduler is not None and self.reserved_length < self.curr_seq: if self.mask_name is not None: mask = kwargs[self.mask_name] else: mask = None if self.random_ltd_layer_id == 0: sampled_indices, part_attention_mask = self.index_generator(self.reserved_length,\ self.curr_seq, \ self.curr_micro_batch, \ self.random_ltd_num_layer, \ hidden_states.device, mask) self.random_ltd_scheduler.state[RANDOM_LTD_SAMPLE_INDEX] = sampled_indices self.random_ltd_scheduler.state[RANDOM_LTD_ATTENTION_MASK] = part_attention_mask else: sampled_indices = self.random_ltd_scheduler.state[RANDOM_LTD_SAMPLE_INDEX] part_attention_mask = self.random_ltd_scheduler.state[RANDOM_LTD_ATTENTION_MASK] hidden_states, part_hidden_states = GatherTokens.apply(hidden_states, sampled_indices[self.random_ltd_layer_id, :, :], self.batch_first) if self.mask_name is not None: if self.model_type == 'encoder': kwargs[self.mask_name] = part_attention_mask[self.random_ltd_layer_id] else: kwargs[self.mask_name] = part_attention_mask outputs = self.random_ltd_layer(part_hidden_states, **kwargs) if isinstance(outputs, tuple): hidden_states = ScatterTokens.apply(hidden_states, outputs[0], sampled_indices[self.random_ltd_layer_id, :, :], self.batch_first) my_list = list(outputs) my_list[0] = hidden_states return tuple(my_list) elif isinstance(outputs, Tensor): hidden_states = ScatterTokens.apply(hidden_states, outputs, sampled_indices[self.random_ltd_layer_id, :, :], self.batch_first) return hidden_states else: logger.warning("************For now, we only support tuple and tensor output. \ You need to adjust the output according to the layer in your model************") raise NotImplementedError else: return self.random_ltd_layer(hidden_states, **kwargs)
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team '''Copyright The Microsoft DeepSpeed Team'''
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch def bsh_decoder_gather(reserved_length, hidden_states, mask): # random-layer-token-drop rand_list = [] part_hidden_states = [] # batch, seq, hidden ## different from megatron for k in range(hidden_states.size(0)): B_tmp = torch.randperm(hidden_states.size(1), device=hidden_states.device)[:reserved_length] B = B_tmp.sort()[0] rand_list.append(B) part_hidden_states.append(hidden_states[k:k + 1, B, :]) part_hidden_states = torch.cat(part_hidden_states, dim=0) part_mask = mask[:, :, :reserved_length, :reserved_length] return part_hidden_states, rand_list, part_mask def bsh_decoder_scatter(hidden_states, part_hidden_states, rand_list): for k in range(hidden_states.size(0)): hidden_states[k, rand_list[k], :] = part_hidden_states[k, :, :] return hidden_states
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from .basic_layer import RandomLayerTokenDrop from collections import OrderedDict from deepspeed.compression.helper import recursive_getattr, recursive_setattr def convert_to_random_ltd(model, convert_type): if hasattr(model, 'module'): c_model = model.module else: c_model = model for name, module in c_model.named_modules(): if isinstance(module, convert_type): old_module = recursive_getattr(c_model, name) new_module = RandomLayerTokenDrop(old_module) recursive_setattr(c_model, name, new_module) model.random_ltd_initialize() return model def save_without_random_ltd(model): if hasattr(model, 'module'): c_model = model.module else: c_model = model model_dic = c_model.state_dict() return remove_random_ltd_state_dict(model_dic) def remove_random_ltd_state_dict(state_dict): new_state_dict = OrderedDict() for key, value in state_dict.items(): if '.random_ltd_layer' in key: new_key = ''.join(key.split('.random_ltd_layer')) else: new_key = key new_state_dict[new_key] = value return new_state_dict
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import math from deepspeed.utils import logger # from deepspeed.runtime.lr_schedules import WarmupLR from ..constants import * #####based on the paper random-ltd: https://arxiv.org/abs/2211.11586 class BaseScheduler(object): def __init__(self): self.state = {} def __fixed_root_get_value(self, global_steps, root_degree=None): s_state = self.state[RANDOM_LTD_SCHEDULE_CONFIG] if root_degree is None: root_degree = s_state['root_degree'] next_seq = (float(global_steps) / s_state[RANDOM_LTD_REQUIRE_STEP])**(1.0 / root_degree) next_seq = math.floor(next_seq * (self.state[RANDOM_LTD_MAX_VALUE] - self.state[RANDOM_LTD_MIN_VALUE]) + self.state[RANDOM_LTD_MIN_VALUE]) next_seq -= (next_seq % s_state[RANDOM_LTD_INCREASE_STEP]) next_seq = min(next_seq, self.state[RANDOM_LTD_MAX_VALUE]) return next_seq def get_value(self, global_steps): if self.state[RANDOM_LTD_SCHEDULER_TYPE] == 'fixed_linear': return self.__fixed_root_get_value(global_steps, 1) else: raise RuntimeError('Unsupported random LTD schedule type') class RandomLTDScheduler(BaseScheduler): def __init__(self, config): super().__init__() self.model_layer_num = config[RANDOM_LTD_TOTAL_LAYER_NUM] self.random_ltd_layer_num = config[RANDOM_LTD_LAYER_NUM] self.config_schedule = config[RANDOM_LTD_SCHEDULER] self.global_batch_size = config[RANDOM_LTD_GLOBAL_BATCH_SIZE] self.reset_to_init() if config[RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE][RANDOM_LTD_LAYER_TOKEN_LR_ENABLED]: logger.warning("**********Work In Progress************") raise NotImplementedError self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] = 0 # self.first_step = True def get_total_layer_tokens(self, train_iters): for step in range(train_iters): self.update_seq(step) return self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] def reset_to_init(self): if self.config_schedule is not None: self.state[RANDOM_LTD_MIN_VALUE] = self.config_schedule[RANDOM_LTD_MIN_VALUE] self.state[RANDOM_LTD_MAX_VALUE] = self.config_schedule[RANDOM_LTD_MAX_VALUE] self.state[RANDOM_LTD_CURRENT_VALUE] = self.config_schedule[RANDOM_LTD_MIN_VALUE] self.state[RANDOM_LTD_SCHEDULE_CONFIG] = self.config_schedule[RANDOM_LTD_SCHEDULE_CONFIG] self.state[RANDOM_LTD_SCHEDULER_TYPE] = self.config_schedule[RANDOM_LTD_SCHEDULER_TYPE] self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] = 0 self.state[RANDOM_LTD_CURR_STEP] = -1 def get_current_seq(self): return self.state[RANDOM_LTD_CURRENT_VALUE] def set_current_seq(self, seq_length): self.state[RANDOM_LTD_CURRENT_VALUE] = seq_length def get_random_ltd_layer_num(self): return self.random_ltd_layer_num def get_state(self): return self.state def set_state(self, state): self.state = state def update_seq(self, global_steps): if self.state[RANDOM_LTD_CURRENT_VALUE] < self.state[RANDOM_LTD_MAX_VALUE]: self.state[RANDOM_LTD_CURRENT_VALUE] = self.get_value(global_steps) if global_steps != self.state[RANDOM_LTD_CURR_STEP]: self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] += self.global_batch_size*(self.state[RANDOM_LTD_CURRENT_VALUE] * self.random_ltd_layer_num \ + self.state[RANDOM_LTD_MAX_VALUE] * (self.model_layer_num - self.random_ltd_layer_num)) self.state[RANDOM_LTD_CURR_STEP] = global_steps def state_dict(self): return { RANDOM_LTD_CONSUMED_LAYER_TOKENS: self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS], RANDOM_LTD_CURR_STEP: self.state[RANDOM_LTD_CURR_STEP], RANDOM_LTD_CURRENT_VALUE: self.state[RANDOM_LTD_CURRENT_VALUE], RANDOM_LTD_MIN_VALUE: self.state[RANDOM_LTD_MIN_VALUE], RANDOM_LTD_MAX_VALUE: self.state[RANDOM_LTD_MAX_VALUE], } def load_state_dict(self, state_dict): self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] = state_dict[RANDOM_LTD_CONSUMED_LAYER_TOKENS] self.state[RANDOM_LTD_CURR_STEP] = state_dict[RANDOM_LTD_CURR_STEP] self.state[RANDOM_LTD_CURRENT_VALUE] = state_dict[RANDOM_LTD_CURRENT_VALUE] self.state[RANDOM_LTD_MIN_VALUE] = state_dict[RANDOM_LTD_MIN_VALUE] self.state[RANDOM_LTD_MAX_VALUE] = state_dict[RANDOM_LTD_MAX_VALUE]
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import pickle import typing import torch from deepspeed import comm as dist # To query whether we have send/recv support from packaging.version import Version from deepspeed.git_version_info import torch_info from deepspeed.accelerator import get_accelerator _groups = None _grid = None _async = [] def can_send_recv() -> bool: torch_version = Version(torch_info['version']) sendrecv_min = Version('1.8') return torch_version >= sendrecv_min #initializes adjacent process groups #run this only after deepspeed.init_distributed() has been called def init_process_groups(grid): global _groups, _grid _grid = grid assert _grid.pipe_parallel_size > 1, "There is no pipeline parallelism" if not can_send_recv(): _groups = [dist.new_group(ranks=group) for group in _grid.p2p_groups] def _is_valid_send_recv(src_stage, dest_stage): first_stage = 0 last_stage = _grid.pipe_parallel_size - 1 assert abs(src_stage-dest_stage) == 1 or \ (src_stage == first_stage and dest_stage == last_stage) or \ (src_stage == last_stage and dest_stage == first_stage), \ "Functionality currently limited to send and receive between adjacent ranks only" def send(tensor, dest_stage, async_op=False): global _groups assert async_op == False, "Doesn't support async_op true" src_stage = _grid.get_stage_id() _is_valid_send_recv(src_stage, dest_stage) dest_rank = _grid.stage_to_global(stage_id=dest_stage) if async_op: global _async op = dist.isend(tensor, dest_rank) _async.append(op) else: if can_send_recv(): return dist.send(tensor, dest_rank) else: group = _get_send_recv_group(src_stage, dest_stage) src_rank = _grid.stage_to_global(stage_id=src_stage) return dist.broadcast(tensor, src_rank, group=group, async_op=async_op) def recv(tensor, src_stage, async_op=False): global _groups assert async_op == False, "Doesn't support async_op true" dest_stage = _grid.get_stage_id() _is_valid_send_recv(src_stage, dest_stage) src_rank = _grid.stage_to_global(stage_id=src_stage) if async_op: global _async op = dist.irecv(tensor, src_rank) _async.append(op) else: if can_send_recv(): return dist.recv(tensor, src_rank) else: group = _get_send_recv_group(src_stage, dest_stage) return dist.broadcast(tensor, src_rank, group=group, async_op=async_op) def wait(): global _async for op in _async: op.wait() _async = [] get_accelerator().synchronize() def send_obj(msg: typing.Any, dest: int): """Send an arbitrary python object to ``dest``. Note: ``msg`` must be pickleable. WARN: This incurs a CPU -> GPU transfer and should be used sparingly for performance reasons. Args: msg (typing.Any): The object to send. dest (int): Destination rank. """ # serialize the message msg = pickle.dumps(msg) # construct a tensor to send msg = torch.ByteTensor(torch.ByteStorage.from_buffer(msg)).to(get_accelerator().device_name()) # Send meta and message length_tensor = torch.tensor([len(msg)], dtype=torch.long).to(get_accelerator().device_name()) dist.send(length_tensor, dst=dest) dist.send(msg, dst=dest) def recv_obj(sender: int) -> typing.Any: """Receive an arbitrary python object from ``sender``. WARN: This incur a CPU <-> GPU transfers and should be used sparingly for performance reasons. Args: sender (int): The rank sending the message. """ # Get message meta length = torch.tensor([0], dtype=torch.long).to(get_accelerator().device_name()) dist.recv(length, src=sender) # Receive and deserialize msg = torch.empty(length.item(), dtype=torch.uint8).to(get_accelerator().device_name()) dist.recv(msg, src=sender) msg = pickle.loads(msg.cpu().numpy().tobytes()) def _to(x): """Recursively move to the current device.""" if torch.is_tensor(x): return x.to(get_accelerator().device_name()) if isinstance(x, (tuple, list)): ret = [_to(x_) for x_ in x] if isinstance(x, tuple): ret = tuple(ret) return ret # handle kwargs if isinstance(x, dict): ret = dict() for key, val in x.items(): ret[_to(key)] = _to(val) return ret # Anything else is a no-op return x msg = _to(msg) return msg def _get_send_recv_group(src_stage, dest_stage): '''the group id is always the smaller rank unless its a wrap around''' stage_id = None first_stage = 0 last_stage = _grid.pipe_parallel_size - 1 if (src_stage == first_stage and dest_stage == last_stage or dest_stage == first_stage and src_stage == last_stage): stage_id = last_stage elif src_stage > dest_stage: stage_id = dest_stage else: stage_id = src_stage '''group_id corresponds to group of [group_id, group_id+1] unless group_id is the rank of the last stage in which case group_id corresponds to group[group_id-num_stages+1, group_id] ''' group_id = _grid.stage_to_global(stage_id=stage_id) return _groups[group_id]
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from .module import PipelineModule, LayerSpec, TiedLayerSpec from .topology import ProcessTopology
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from types import MethodType import torch from deepspeed import comm as dist from deepspeed.utils import logger from deepspeed.utils.timer import ThroughputTimer from deepspeed.accelerator import get_accelerator from ..engine import DeepSpeedEngine, MEMORY_OPT_ALLREDUCE_SIZE from ..utils import PartitionedTensor from ..dataloader import RepeatingLoader from ..zero.config import ZeroStageEnum from ..activation_checkpointing import checkpointing as ds_checkpointing from .module import PipelineModule, PipelineError from . import p2p from . import schedule TARGET_ID = -2 LOG_STAGE = -2 DATA_PARALLEL_ID = -2 def is_even(number): return number % 2 == 0 mem_alloced = 0 mem_cached = 0 def _tensor_bytes(tensor): return tensor.numel() * tensor.element_size() class PipelineEngine(DeepSpeedEngine): """ A training engine hybrid pipeline, data, and model parallel training. This engine is created by ``deepspeed.initialize()`` when a :class:`PipelineModule` is provided. """ ID_TO_DTYPE = [ torch.float32, torch.float64, torch.complex64, torch.complex128, torch.float16, torch.bfloat16, torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64, torch.bool ] DTYPE_TO_ID = {dtype: id_ for id_, dtype in enumerate(ID_TO_DTYPE)} def __init__(self, has_bool_tensors=False, *super_args, **super_kwargs): super().__init__(*super_args, **super_kwargs) assert isinstance(self.module, PipelineModule), "model must base PipelineModule" assert self.zero_optimization_stage() < 2, "ZeRO-2 and ZeRO-3 are incompatible with pipeline parallelism" # We schedule the all-reduces, so disable it in super().backward() self.enable_backward_allreduce = False self.has_bool_tensors = has_bool_tensors self.eval_return_logits = False self.outputs = None # used to disable the pipeline all-reduce when used with 1-bit Adam/1-bit LAMB self.pipeline_enable_backward_allreduce = True if self.elasticity_enabled(): if not self.is_elastic_model_parallel_supported(): assert not self.elasticity_enabled(), "Elasticity is not currently supported" \ " with pipeline parallelism." # pipeline step for logging self.log_batch_step_id = -1 self.micro_batch_size = self.train_micro_batch_size_per_gpu() self.micro_batches = self.gradient_accumulation_steps() # Set Grid and Communication Groups self.grid = self.module._grid if self.grid.get_global_rank() == 0: logger.info(f'CONFIG: micro_batches={self.micro_batches} ' f'micro_batch_size={self.micro_batch_size}') self.global_rank = self.grid.get_global_rank() assert self.dp_world_size == self.grid.data_parallel_size assert self.train_batch_size() == \ self.micro_batch_size * self.micro_batches * self.grid.data_parallel_size # Set Stage Inf self.num_stages = self.grid.pipe_parallel_size self.stage_id = self.grid.get_stage_id() self.prev_stage = self.stage_id - 1 self.next_stage = self.stage_id + 1 self.data_iterator = None self.batch_fn = None self._force_grad_boundary = False self.batch_timer = ThroughputTimer(batch_size=self.train_batch_size(), logging_fn=self.tput_log, monitor_memory=False, steps_per_output=self.steps_per_print()) # PipelineEngine needs to handle data loading specially due to only the first # and last stages loading inputs/labels. We construct a sampler that uses if self.training_data: self._build_data_iter(self.training_data) self.is_pipe_parallel = self.grid.pipe_parallel_size > 1 self.is_data_parallel = self.grid.data_parallel_size > 1 self.is_model_parallel = self.grid.model_parallel_size > 1 # Partition input/output buffers # XXX temporarily disable while I revert some partition hacks. self.is_pipe_partitioned = self.is_model_parallel self.is_grad_partitioned = self.is_model_parallel model_parameters = filter(lambda p: p.requires_grad, self.module.parameters()) num_params = sum([p.numel() for p in model_parameters]) unique_params = num_params # Subtract tied parameters if we don't own them if self.module.tied_comms: tied_params = 0 for key, d in self.module.tied_comms.items(): if self.global_rank != min(d['ranks']): tied_params += sum(p.numel() for p in d['module'].parameters()) unique_params -= tied_params params_tensor = torch.LongTensor(data=[num_params, unique_params]).to(self.device) dist.all_reduce(params_tensor, group=self.grid.get_model_parallel_group()) params_tensor = params_tensor.tolist() total_params = params_tensor[0] unique_params = params_tensor[1] if self.grid.data_parallel_id == 0: logger.info(f'RANK={self.global_rank} ' f'STAGE={self.stage_id} ' f'LAYERS={self.module._local_stop - self.module._local_start} ' f'[{self.module._local_start}, {self.module._local_stop}) ' f'STAGE_PARAMS={num_params} ({num_params/1e6:0.3f}M) ' f'TOTAL_PARAMS={total_params} ({total_params/1e6:0.3f}M) ' f'UNIQUE_PARAMS={unique_params} ({unique_params/1e6:0.3f}M)') #initialize peer-2-peer communication and allreduce groups if self.is_pipe_parallel: p2p.init_process_groups(self.grid) # Pipeline buffers self.num_pipe_buffers = 0 self.pipe_buffers = { 'inputs': [], # batch input and received activations 'labels': [], # labels from batch input 'outputs': [], # activations 'output_tensors': [], # tensor object to preserve backward graph } self.pipe_recv_buf = None self.grad_layer = None self.meta_buffer = None self.first_output_send = True self.first_gradient_send = True #stores the loss for the current micro batch being processed self.loss = torch.tensor(0.0).to(self.device) #stores the loss for the entire batch self.total_loss = None self.agg_loss = torch.tensor(0.0, requires_grad=False).to(self.device) self.dp_group_loss = torch.tensor(0.0, requires_grad=False).to(self.device) if self._config.pipeline['activation_checkpoint_interval'] > 0: self.module.activation_checkpoint_interval = self._config.pipeline['activation_checkpoint_interval'] self.module.checkpoint_parallel_write_pipeline = self._config.checkpoint_parallel_write_pipeline if self.is_last_stage(): self.loss_model = self.module.loss_fn self.has_attention_mask = self.module.__class__.__name__ == 'GPT2ModelPipe' # Initialize pipeline communicators. Just send a 0. if is_even(self.stage_id): if not self.is_last_stage(): p2p.send(self.loss, self.next_stage) if not self.is_first_stage(): p2p.recv(self.loss, self.prev_stage) else: if not self.is_first_stage(): p2p.recv(self.loss, self.prev_stage) if not self.is_last_stage(): p2p.send(self.loss, self.next_stage) # XXX look into timer reporting timing # Initialize some timers because of early weirdness. if self.wall_clock_breakdown(): self.timers('forward_microstep').start() self.timers('forward_microstep').stop() self.timers('backward_microstep').start() self.timers('backward_microstep').stop() self.timers('backward_inner_microstep').start() self.timers('backward_inner_microstep').stop() self.timers('backward_allreduce_microstep').start() self.timers('backward_allreduce_microstep').stop() self.timers('backward_allreduce').start() self.timers('backward_allreduce').stop() self.timers('step_microstep').start() self.timers('step_microstep').stop() def set_has_attention_mask(self, value): assert isinstance(value, bool) self.has_attention_mask = value def _build_data_iter(self, dataset): sampler = torch.utils.data.distributed.DistributedSampler(dataset, num_replicas=self.dp_world_size, rank=self.mpu.get_data_parallel_rank(), shuffle=False) # Build a loader and make it repeating. pipe_dataloader = self.deepspeed_io(dataset, data_sampler=sampler) pipe_dataloader = RepeatingLoader(pipe_dataloader) self.set_dataloader(pipe_dataloader) def _exec_reduce_tied_grads(self): # We need to run this first to write to self.averaged_gradients; # since this class turns `enable_backward_allreduce` off, # `self.overlapping_partition_gradients_reduce_epilogue()` defined in the DeepSpeedEngine # never actually runs. I suspect this is because of efficiency problems; get_flat_partition in # stage2.py might do something expensive; someone will have to look into that later. But # in the meantime, this fixes ZeRO2 + Pipelining enough to run a demo. Further profiling # needed to decide if it actually breaks everything. # (see https://github.com/EleutherAI/gpt-neox/issues/62#issuecomment-761471944) if self.zero_optimization_partition_gradients(): self.optimizer.overlapping_partition_gradients_reduce_epilogue() weight_group_list = self.module.get_tied_weights_and_groups() for weight, group in weight_group_list: grad = weight._hp_grad if self.bfloat16_enabled() else weight.grad dist.all_reduce(grad, group=group) def _exec_reduce_grads(self): self._force_grad_boundary = True if self.pipeline_enable_backward_allreduce: if self.bfloat16_enabled(): if self.zero_optimization_stage() < ZeroStageEnum.gradients: self._bf16_reduce_grads() else: raise NotImplementedError("PP+BF16 only work for ZeRO Stage 1") else: self.allreduce_gradients(bucket_size=MEMORY_OPT_ALLREDUCE_SIZE) self._force_grad_boundary = False def _bf16_reduce_grads(self): # Make our own list of gradients from the optimizer's FP32 grads grads = [] self.buffered_allreduce_fallback(grads=self.optimizer.get_grads_for_reduction(), elements_per_buffer=MEMORY_OPT_ALLREDUCE_SIZE) def _reserve_pipe_buffers(self, num_buffers): """Ensure that each pipeline buffer has at least ``num_buffers`` slots. This method only reserves slots and does not allocate tensors. Args: num_buffers (int): The number of buffers to reserve. """ if self.num_pipe_buffers >= num_buffers: return num_added = num_buffers - self.num_pipe_buffers for key in self.pipe_buffers: self.pipe_buffers[key].extend([None] * num_added) self.num_pipe_buffers = num_buffers def reset_activation_shape(self): """Reset the buffers when the shape of activation and gradient change. For example, for curriculum learning that changes the seqlen of each sample, we need to call this whenever the seqlen is going to change. """ self.first_output_send = True self.pipe_recv_buf = None self.grad_layer = None self.meta_buffer = None def train_batch(self, data_iter=None): """Progress the pipeline to train the next batch of data. The engine will ingest ``self.train_batch_size()`` total samples collectively across all workers. An iterator that over training data should be provided as an argument unless ``deepspeed.initialize()`` was provided a training set. In that event, the training data will automatically be read. .. warning:: A total of ``self.gradient_accumulation_steps()`` entries will be pulled from ``data_iter`` by each pipeline. There must be sufficient data left in ``data_iter`` or else a ``StopIteration`` will halt training. DeepSpeed provides a convenience class :class:`deepspeed.utils.RepeatingLoader` that wraps data loaders to automatically restart upon a ``StopIteration``. Args: data_iter (Iterator, optional): Iterator of training data. Returns: The arithmetic mean of the losses computed this batch. """ if not torch._C.is_grad_enabled(): raise RuntimeError(f'train_batch() requires gradients enabled. Use eval_batch() instead.') # Curriculum learning could change activation shape if self.curriculum_enabled_legacy(): new_difficulty = self.curriculum_scheduler_legacy.update_difficulty( \ self.global_steps + 1) if self.global_steps == 0 or self.curriculum_scheduler_legacy.first_step: self.reset_activation_shape() self.curriculum_scheduler_legacy.first_step = False elif new_difficulty != self.curriculum_scheduler_legacy.get_difficulty( \ self.global_steps): self.reset_activation_shape() if data_iter: self.set_dataiterator(data_iter) self.module.train() self.total_loss = None self._compute_loss = True # Do the work self.timers('train_batch').start() sched = schedule.TrainSchedule(micro_batches=self.micro_batches, stages=self.num_stages, stage_id=self.stage_id) self._exec_schedule(sched) self.agg_train_loss = self._aggregate_total_loss() self.timers('train_batch').stop() if self.global_steps % self.steps_per_print() == 0: if self.global_rank == 0: elapsed = self.timers('train_batch').elapsed(reset=True) / 1000.0 iter_time = elapsed / self.steps_per_print() tput = self.train_batch_size() / iter_time print(f'steps: {self.global_steps} ' f'loss: {self.agg_train_loss:0.4f} ' f'iter time (s): {iter_time:0.3f} ' f'samples/sec: {tput:0.3f}') # Monitoring if self.global_rank == 0 and self.monitor.enabled: self.summary_events = [(f'Train/Samples/train_loss', self.agg_train_loss.mean().item(), self.global_samples)] self.monitor.write_events(self.summary_events) if self.wall_clock_breakdown() and self.global_steps % self.steps_per_print() == 0: self.timers.log(['pipe_send_output', 'pipe_send_grad', 'pipe_recv_input', 'pipe_recv_grad']) # TODO: should return precisely what loss returned and allow others to be queried? return self.agg_train_loss def eval_batch(self, data_iter, return_logits=False, compute_loss=True, reduce_output='avg'): """Evaluate the pipeline on a batch of data from ``data_iter``. The engine will evaluate ``self.train_batch_size()`` total samples collectively across all workers. This method is equivalent to: .. code-block:: python module.eval() with torch.no_grad(): output = module(batch) .. warning:: A total of ``self.gradient_accumulation_steps()`` entries will be pulled from ``data_iter`` by each pipeline. There must be sufficient data left in ``data_iter`` or else a ``StopIteration`` will halt training. DeepSpeed provides a convenience class :class:`deepspeed.utils.RepeatingLoader` that wraps data loaders to automatically restart upon a ``StopIteration``. Args: data_iter (Iterator): Iterator of data to evaluate. Returns: The arithmetic mean of the losses computed this batch. """ self.eval_return_logits = return_logits self.module.eval() # Curriculum learning could change activation shape if self.curriculum_enabled_legacy(): new_difficulty = self.curriculum_scheduler_legacy.update_difficulty( \ self.global_steps + 1) if self.global_steps == 0 or self.curriculum_scheduler_legacy.first_step: self.reset_activation_shape() self.curriculum_scheduler_legacy.first_step = False elif new_difficulty != self.curriculum_scheduler_legacy.get_difficulty( \ self.global_steps): self.reset_activation_shape() eval_output = None self._compute_loss = compute_loss # Use the provided data iterator train_iterator = self.data_iterator self.set_dataiterator(data_iter) # Do the work sched = schedule.InferenceSchedule(micro_batches=self.micro_batches, stages=self.num_stages, stage_id=self.stage_id) # prevent dead-lock with multiple evals sequence dist.barrier() with torch.no_grad(): self._exec_schedule(sched) if self.is_last_stage(): eval_output = self._reduce_outputs(self.fwd_outputs, reduce=reduce_output) if compute_loss: eval_output = self._bcast_pipe_scalar(eval_output) if self.global_rank == 0 and self.monitor.enabled: self.summary_events = [(f'Train/Samples/eval_loss', eval_output.mean().item(), self.global_samples)] self.monitor.write_events(self.summary_events) # Restore the training iterator self.set_dataiterator(train_iterator) # Reset any buffers that may have been populated during the forward passes. #ds_checkpointing.reset() self.eval_return_logits = False if return_logits: outputs = self.outputs self.outputs = None return eval_output, outputs return eval_output def set_train_batch_size(self, train_batch_size): """Adjust the global batch size by increasing or decreasing the number of micro-batches (i.e., gradient accumulation steps). The size of each micro-batch (i.e., ``train_micro_batch_size_per_gpu``) is not changed. Args: train_batch_size (int): The new global batch size for training. Raises: ValueError: if ``train_batch_size`` is not divisible by the configured micro-batch size and data parallelism. """ super().set_train_batch_size(train_batch_size) self.micro_batches = self.gradient_accumulation_steps() def is_first_stage(self): """True if this process is in the first stage in the pipeline.""" return self.stage_id == 0 def is_last_stage(self): """True if this process is in the last stage in the pipeline.""" return self.stage_id == self.num_stages - 1 def _reduce_outputs(self, outputs, reduce='avg', reduce_dp=True): if reduce is None: return outputs if reduce.lower() == 'avg': # first sum over all microbatches if torch.is_tensor(outputs[0]): reduced = sum(outputs) else: assert isinstance(outputs, (list, tuple)) reduced = [torch.zeros_like(o) for o in outputs[0]] for idx, out in outputs: reduced[idx] += out # Average over the microbatches reduced = self._scale_loss_by_gas(reduced) # Average over DP groups if reduce_dp and self.is_data_parallel: if torch.is_tensor(reduced): dist.all_reduce(reduced, group=self.mpu.get_data_parallel_group()) reduced /= self.dp_world_size else: for idx in range(len(reduced)): dist.all_reduce(reduced[idx], group=self.mpu.get_data_parallel_group()) reduced[idx] /= self.dp_world_size return reduced else: raise NotImplementedError(f'reduction type {reduce} not supported.') def _bcast_pipe_scalar(self, data, src_rank=None, dtype=torch.float32): # Default to last stage (e.g., for broadcasting loss) if src_rank is None: src_rank = self.grid.stage_to_global(self.num_stages - 1) assert src_rank in self.grid.pp_group if self.global_rank == src_rank: result = data.clone().detach().type(dtype).to(self.device) else: result = torch.Tensor([0.]).type(dtype).to(self.device) dist.broadcast(tensor=result, src=src_rank, group=self.mpu.get_pipe_parallel_group()) return result def _aggregate_total_loss(self): # Scale loss, average among DP ranks, and bcast loss to the rest of my DP group if self.is_last_stage(): loss = self._scale_loss_by_gas(self.total_loss) self.dp_group_loss = loss.clone().detach() ## Average loss across all data-parallel groups agg_loss = self.dp_group_loss.clone().detach() #print(f'RANK={self.global_rank} bcast SENDER src={self.global_rank} group={self.grid.pp_group}', flush=True) if self.is_data_parallel: dist.all_reduce(agg_loss, group=self.mpu.get_data_parallel_group()) agg_loss /= self.dp_world_size assert self.global_rank in self.grid.pp_group losses = torch.Tensor([self.dp_group_loss, agg_loss]).to(self.device) dist.broadcast(tensor=losses, src=self.global_rank, group=self.mpu.get_pipe_parallel_group()) else: # Get loss from last stage src_rank = self.grid.stage_to_global(self.num_stages - 1) assert src_rank in self.grid.pp_group losses = torch.Tensor([0., 0.]).to(self.device) dist.broadcast(tensor=losses, src=src_rank, group=self.grid.get_pipe_parallel_group()) self.dp_group_loss = losses[0].clone().detach() agg_loss = losses[1].clone().detach() return agg_loss def set_dataloader(self, loader): """""" if self.is_first_stage() or self.is_last_stage(): self.training_dataloader = loader self.data_iterator = iter(self.training_dataloader) def set_dataiterator(self, iterator): """ Store an iterator to sample for training data. """ if self.is_first_stage() or self.is_last_stage(): self.training_dataloader = None self.data_iterator = iterator def set_batch_fn(self, fn): """Execute a post-processing function on input data. Args: fn (function): The function to run. """ self.batch_fn = fn def is_gradient_accumulation_boundary(self): """True if the engine is executing a gradient reduction or optimizer step instruction. This is overridden from :class:`DeepSpeedEngine` to force reductions and steps when the pipeline engine is instructed to do so. Returns: bool: whether reductions and optimizer steps should occur. """ return self._force_grad_boundary def log_for_device(self, *msg): if LOG_STAGE == self.stage_id or LOG_STAGE == -1: if DATA_PARALLEL_ID == self.grid.data_parallel_id or DATA_PARALLEL_ID == -1: print( f'RANK={dist.get_rank()} ' f'PIPE-ID={self.stage_id} ' f'DATA-ID={self.grid.data_parallel_id} ' f'MBATCH-ID={self.microbatch_id} ' f'STEP-ID={self.log_batch_step_id} ' '::', *msg, flush=True) def tput_log(self, *msg): if self.global_rank == 0 and self.global_steps % self.steps_per_print() == 0: print(*msg) def _next_batch(self): # If using 3D parallelism, only some first-stage ranks may do IO batch = None if self.data_iterator is not None: batch = next(self.data_iterator) # Any post-processing, like broadcasting across a slice-parallel group. if self.batch_fn: batch = self.batch_fn(batch) return batch def _exec_forward_pass(self, buffer_id): self.tput_timer.start() self.mem_status('BEFORE FWD', reset_max=True) if isinstance(self.pipe_buffers['inputs'][buffer_id], tuple): inputs = tuple(t.clone() for t in self.pipe_buffers['inputs'][buffer_id]) else: inputs = self.pipe_buffers['inputs'][buffer_id].clone() # collect the partitioned input from the previous stage if self.is_pipe_partitioned and not self.is_first_stage(): part_input = PartitionedTensor.from_meta(meta=inputs[0], local_part=inputs[1], group=self.grid.get_slice_parallel_group()) inputs = (part_input.full(), *inputs[2:]) inputs[0].requires_grad = True # skip mask #inputs[1].requires_grad = True part_input = None inputs = inputs[0] if len(inputs) == 1 else inputs self.pipe_buffers['inputs'][buffer_id] = inputs # Zero out the gradients each time we use the tensor because only the data in # tensor changes across batches self._zero_grads(inputs) outputs = super().forward(inputs) # Reset activation checkpointing buffers. # Need to call this between evaluation iterations if not self.module.training: ds_checkpointing.reset() # Partition the outputs if we are not the last stage if self.is_pipe_partitioned and not self.is_last_stage(): if isinstance(outputs, tuple): first_output = outputs[0] # TODO: Improve pipe partitioning to pass multiple tensors that require grads assert all([torch.is_tensor(elt) and elt.requires_grad is False for elt in outputs[1:]]) outputs_tail = outputs[1:] elif torch.is_tensor(outputs): first_output = outputs outputs_tail = [] else: raise ValueError("expecting a tensor or a tuple of tensors") part = PartitionedTensor(tensor=first_output, group=self.grid.get_slice_parallel_group()) # Clear the large output data, but save the computation graph first_output.data = torch.zeros(1) self.pipe_buffers['output_tensors'][buffer_id] = first_output # Inject the partitioned tensor into the output before sending outputs = (part.to_meta(), part.data(), *outputs_tail) part = None self.pipe_buffers['outputs'][buffer_id] = outputs # Optionally compute loss on the last device if self.is_last_stage(): if self._compute_loss and self.module.loss_fn is not None: labels = self.pipe_buffers['labels'][buffer_id] self.loss = self.module.loss_fn(outputs, labels) else: # Some models just return loss from forward() self.loss = outputs if self.eval_return_logits: self.outputs = outputs if isinstance(self.loss, torch.Tensor): self.fwd_outputs.append(self.loss.detach()) if self.total_loss is None: self.total_loss = torch.zeros_like(self.loss) self.total_loss += self.loss.detach() else: self.fwd_outputs.append([l.detach() for l in self.loss]) if self.total_loss is None: self.total_loss = [torch.zeros_like(l) for l in self.loss] for idx, l in enumerate(self.loss): self.total_loss[idx] += l.detach() def _exec_backward_pass(self, buffer_id): assert self.optimizer is not None, "must provide optimizer during " \ "init in order to use backward" self.mem_status('BEFORE BWD', reset_max=True) # The last stage just runs backward on the loss using DeepSpeed's typical # mechanisms. if self.is_last_stage(): super().backward(self.loss) self.mem_status('AFTER BWD') return outputs = self.pipe_buffers['outputs'][buffer_id] if self.wall_clock_breakdown(): self.timers('backward_microstep').start() self.timers('backward').start() self.timers('backward_inner_microstep').start() self.timers('backward_inner').start() # Reconstruct if we previously partitioned the output. We must be # careful to also restore the computational graph of the tensors we partitioned. if self.is_pipe_partitioned: if self.is_grad_partitioned: part_output = PartitionedTensor.from_meta(meta=outputs[0], local_part=outputs[1], group=self.grid.get_slice_parallel_group()) self.pipe_buffers['output_tensors'][buffer_id].data = part_output.full() outputs = (self.pipe_buffers['output_tensors'][buffer_id], *outputs[2:]) else: # Already restored from partition self.pipe_buffers['output_tensors'][buffer_id].data = outputs[0] outputs = (self.pipe_buffers['output_tensors'][buffer_id], *outputs[1:]) grad_tensors = self.grad_layer if self.is_grad_partitioned: #print(f'RANK={self.global_rank} BEFORE-BWD restoring grad={self.grad_layer[0].size()} {self.grad_layer[1].size()}') part_grad = PartitionedTensor.from_meta(meta=self.grad_layer[0], local_part=self.grad_layer[1], group=self.grid.get_slice_parallel_group()) grad_tensors = (part_grad.full(), *grad_tensors[2:]) part_grad = None #print(f'RANK={self.global_rank} BEFORE-BWD restored grad={self.grad_layer[0].size()} {self.grad_layer[1].size()}') if self.bfloat16_enabled() and not self.is_last_stage(): # manually call because we don't call optimizer.backward() self.optimizer.clear_lp_grads() # This handles either a single tensor or tuple of tensors. if isinstance(outputs, tuple): out_tensors = [t for t in outputs if t.is_floating_point()] assert len(out_tensors) == len(grad_tensors) torch.autograd.backward(tensors=out_tensors, grad_tensors=grad_tensors) else: torch.autograd.backward(tensors=(outputs, ), grad_tensors=(grad_tensors, )) if self.bfloat16_enabled() and not self.is_last_stage(): # manually call because we don't call optimizer.backward() self.optimizer.update_hp_grads(clear_lp_grads=False) # Free up the memory from the output of forward() self.pipe_buffers['output_tensors'][buffer_id] = None self.pipe_buffers['outputs'][buffer_id] = None grad_tensors = None if self.wall_clock_breakdown(): self.timers('backward_inner').stop() self.timers('backward_inner_microstep').stop() self.timers('backward').stop() self.timers('backward_microstep').stop() self.mem_status('AFTER BWD') def _exec_load_micro_batch(self, buffer_id): if self.wall_clock_breakdown(): self.timers('batch_input').start() batch = self._next_batch() if self.is_first_stage(): loaded = None if torch.is_tensor(batch[0]): loaded = batch[0].clone().to(self.device).detach() loaded.requires_grad = loaded.is_floating_point() else: assert isinstance(batch[0], (tuple, list)) # Assume list or tuple loaded = [] for x in batch[0]: assert torch.is_tensor(x) mine = x.clone().detach().to(self.device) mine.requires_grad = mine.is_floating_point() loaded.append(mine) loaded = tuple(loaded) self.pipe_buffers['inputs'][buffer_id] = loaded if self.is_last_stage(): loaded = batch[1] if torch.is_tensor(batch[1]): loaded = batch[1].to(self.device) elif isinstance(batch[1], tuple): loaded = [] for x in batch[1]: assert torch.is_tensor(x) x = x.to(self.device).detach() loaded.append(x) loaded = tuple(loaded) self.pipe_buffers['labels'][buffer_id] = loaded if self.wall_clock_breakdown(): self.timers('batch_input').stop() def _send_tensor_meta(self, buffer, recv_stage): """ Communicate metadata about upcoming p2p transfers. Metadata is communicated in this order: * type (0: tensor, 1: list) * num_tensors if type=list foreach tensor in buffer: * ndims * shape """ send_bytes = 0 if isinstance(buffer, torch.Tensor): type_tensor = torch.LongTensor(data=[0]).to(self.device) p2p.send(type_tensor, recv_stage) send_shape = torch.LongTensor(data=buffer.size()).to(self.device) send_ndims = torch.LongTensor(data=[len(buffer.size())]).to(self.device) p2p.send(send_ndims, recv_stage) p2p.send(send_shape, recv_stage) send_bytes += _tensor_bytes(buffer) elif isinstance(buffer, list): assert (False) type_tensor = torch.LongTensor(data=[1]).to(self.device) p2p.send(type_tensor, recv_stage) count_tensor = torch.LongTensor(data=[len(buffer)]).to(self.device) p2p.send(count_tensor, recv_stage) for tensor in buffer: assert isinstance(tensor, torch.Tensor) send_shape = torch.LongTensor(data=tensor.size()).to(self.device) send_ndims = torch.LongTensor(data=[len(tensor.size())]).to(self.device) p2p.send(send_ndims, recv_stage) p2p.send(send_shape, recv_stage) send_bytes += _tensor_bytes(tensor) elif isinstance(buffer, tuple): type_tensor = torch.LongTensor(data=[2]).to(self.device) p2p.send(type_tensor, recv_stage) count_tensor = torch.LongTensor(data=[len(buffer)]).to(self.device) p2p.send(count_tensor, recv_stage) for idx, tensor in enumerate(buffer): assert isinstance(tensor, torch.Tensor) send_shape = torch.LongTensor(data=tensor.size()).to(self.device) send_ndims = torch.LongTensor(data=[len(tensor.size())]).to(self.device) send_dtype = torch.LongTensor(data=[self.DTYPE_TO_ID[tensor.dtype]]).to(self.device) p2p.send(send_dtype, recv_stage) p2p.send(send_ndims, recv_stage) p2p.send(send_shape, recv_stage) # Useful for performance debugging. ''' new_bytes = _tensor_bytes(tensor) send_bytes += _tensor_bytes(tensor) # Useful for performance debugging. if self.grid.data_parallel_id == 0: print( f'STAGE={self.stage_id} pipe-send-volume[{idx}]: shape={send_shape} {new_bytes/1024**2:0.2f}MB' ) ''' else: raise NotImplementedError(f'Could not send meta type {type(buffer)}') # Useful for performance debugging. ''' if self.grid.data_parallel_id == 0: print(f'STAGE={self.stage_id} pipe-send-volume: {send_bytes/1024**2:0.2f}MB') ''' def _recv_tensor_meta(self, send_stage): """Receive metadata about upcoming p2p transfers and return allocated buffers. Metadata is communicated in this order: * type (0: tensor, 1: list) * num_tensors if type=list foreach tensor in buffer: * ndims * shape Returns: Allocated buffer for receiving from send_stage. """ type_tensor = torch.LongTensor(data=[0]).to(self.device) p2p.recv(type_tensor, send_stage) recv_type = type_tensor.item() # A single tensor will be sent. if recv_type == 0: recv_ndims = torch.LongTensor(data=[0]).to(self.device) p2p.recv(recv_ndims, send_stage) recv_ndims = recv_ndims.item() recv_shape = torch.LongTensor([1] * recv_ndims).to(self.device) p2p.recv(recv_shape, send_stage) recv_shape = recv_shape.tolist() return self._allocate_buffer(recv_shape, num_buffers=1)[0] # List or tuple of tensors elif recv_type == 1 or recv_type == 2: count_tensor = torch.LongTensor(data=[0]).to(self.device) p2p.recv(count_tensor, send_stage) num_tensors = count_tensor.item() recv_shapes_and_dtypes = [] for idx in range(num_tensors): recv_dtype = torch.LongTensor(data=[0]).to(self.device) p2p.recv(recv_dtype, send_stage) recv_dtype = self.ID_TO_DTYPE[recv_dtype.item()] recv_ndims = torch.LongTensor(data=[0]).to(self.device) p2p.recv(recv_ndims, send_stage) recv_ndims = recv_ndims.item() recv_shape = torch.LongTensor([1] * recv_ndims).to(self.device) p2p.recv(recv_shape, send_stage) recv_shapes_and_dtypes.append((recv_shape.tolist(), recv_dtype)) buffers = self._allocate_buffers(recv_shapes_and_dtypes, num_buffers=1)[0] # Convert to tuples if requested. if recv_type == 2: buffers = tuple(buffers) return buffers else: raise NotImplementedError(f'Could not receive type {type(recv_type)}') def _exec_send_activations(self, buffer_id): if self.wall_clock_breakdown(): self.timers('pipe_send_output').start() outputs = self.pipe_buffers['outputs'][buffer_id] # NCCL does not like to send torch.BoolTensor types, so cast the mask to half(). # We could do char, but with half() we can eventually flatten with other fp16 # messages (TODO) if self.has_attention_mask or self.has_bool_tensors: outputs = list(outputs) outputs[-1] = outputs[-1].half() outputs = tuple(outputs) if self.first_output_send: self.first_output_send = False self._send_tensor_meta(outputs, self.next_stage) if isinstance(outputs, torch.Tensor): p2p.send(outputs, self.next_stage) elif isinstance(outputs, tuple): for idx, buffer in enumerate(outputs): p2p.send(buffer, self.next_stage) else: raise NotImplementedError('Could not send output of type ' f'{type(outputs)}') # Restore the boolean tensor if self.has_attention_mask or self.has_bool_tensors: outputs = list(outputs) outputs[-1] = outputs[-1].bool() outputs = tuple(outputs) if self.wall_clock_breakdown(): self.timers('pipe_send_output').stop() def _exec_send_grads(self, buffer_id): if self.wall_clock_breakdown(): self.timers('pipe_send_grad').start() inputs = self.pipe_buffers['inputs'][buffer_id] # Partition the gradient if self.is_grad_partitioned: if isinstance(inputs, tuple): first_input = inputs[0] assert all([torch.is_tensor(elt) for elt in inputs[1:]]) inputs_grad_tail = [elt.grad for elt in inputs[1:] if elt.grad is not None] elif torch.is_tensor(inputs): first_input = inputs inputs_grad_tail = [] else: raise ValueError("expecting a tensor or a tuple of tensors") assert torch.is_tensor(first_input) part = PartitionedTensor(tensor=first_input.grad, group=self.grid.get_slice_parallel_group()) inputs = (part.to_meta(), part.data(), *inputs_grad_tail) # XXX Terrible hack # Drop the attention mask from the input buffer here. It does not have # a grad that needs to be communicated. We free the buffer immediately # after, so no need to restore it. The receiver also has a hack that skips # the recv. This is because NCCL does not let us send torch.BoolTensor :-(. if self.has_attention_mask or self.has_bool_tensors: inputs = list(inputs) inputs.pop() inputs = tuple(inputs) if isinstance(inputs, torch.Tensor): assert inputs.grad is not None p2p.send(inputs.grad, self.prev_stage) else: # XXX terrible hacky branch if self.is_grad_partitioned: # First two sends are partitioned gradient p2p.send(inputs[0], self.prev_stage) p2p.send(inputs[1], self.prev_stage) else: for idx, buffer in enumerate(inputs): # Skip tensors that will not produce a grad if not buffer.is_floating_point(): assert buffer.grad is None continue assert buffer.grad is not None p2p.send(buffer.grad, self.prev_stage) # We can free up the input buffer now self.pipe_buffers['inputs'][buffer_id] = None if self.wall_clock_breakdown(): self.timers('pipe_send_grad').stop() def _exec_recv_activations(self, buffer_id): if self.wall_clock_breakdown(): self.timers('pipe_recv_input').start() recvd = None # Allocate the buffer if necessary if self.pipe_recv_buf is None: self.pipe_recv_buf = self._recv_tensor_meta(self.prev_stage) if isinstance(self.pipe_recv_buf, torch.Tensor): p2p.recv(self.pipe_recv_buf, self.prev_stage) recvd = self.pipe_recv_buf.clone().detach() recvd.requires_grad = recvd.is_floating_point() else: assert isinstance(self.pipe_recv_buf, tuple) recvd = [None] * len(self.pipe_recv_buf) for idx, buffer in enumerate(self.pipe_recv_buf): assert torch.is_tensor(buffer) # XXX hardcode meta type if self.is_pipe_partitioned and idx == 0 and buffer.dtype != torch.long: if self.meta_buffer is None: self.meta_buffer = torch.zeros(buffer.size(), dtype=torch.long, device=self.device) buffer = self.meta_buffer p2p.recv(buffer, self.prev_stage) recvd[idx] = buffer.clone().detach() # NCCL does not like to send torch.BoolTensor types, so un-cast the # attention mask if self.has_attention_mask or self.has_bool_tensors: recvd[-1] = recvd[-1].bool() recvd = tuple(recvd) for buffer in recvd: buffer.requires_grad = buffer.is_floating_point() self.pipe_buffers['inputs'][buffer_id] = recvd if self.wall_clock_breakdown(): self.timers('pipe_recv_input').stop() def _exec_recv_grads(self, buffer_id): if self.wall_clock_breakdown(): self.timers('pipe_recv_grad').start() outputs = self.pipe_buffers['outputs'][buffer_id] # XXX these shapes are hardcoded for Megatron # Restore partitioned output if it was partitioned and we are sending full gradients if self.is_pipe_partitioned and not self.is_grad_partitioned: part_output = PartitionedTensor.from_meta(meta=outputs[0], local_part=outputs[1], group=self.grid.get_slice_parallel_group()) outputs[0].data = part_output.full() outputs = (outputs[0], *outputs[2:]) # save for backward self.pipe_buffers['outputs'][buffer_id] = outputs # Allocate gradient if necessary if self.grad_layer is None: if isinstance(outputs, torch.Tensor): s = list(outputs.size()) self.grad_layer = self._allocate_buffer(s, dtype=outputs.dtype, num_buffers=1)[0] else: # XXX This is a HACK # When we exchange activations/gradients, the two pipe stages # need to issue the send/recv with the same buffer sizes or # else there is a deadlock. The is_floating_point() filter is # used to avoid sending gradients for tensors that do not # produce gradients. When TP>1, we partition the first # activations/gradients across TP ranks to save communication # volume and memory. That partitioned tensor is represented as # two tensors: a 1/TPth chunk of the original data and also a # small LongTensor storing the metadata used to reconstruct on # the other side. When combined, the floating point filter also # filtered out the metadata tensor. This quick (hacky) fix just # branches on is_grad_partitioned so we don't filter out the # metadata tensor. if self.is_grad_partitioned: sizes_and_dtypes = [(list(t.size()), t.dtype) for t in outputs[:2]] + [(list(t.size()), t.dtype) for t in outputs[2:] if t.is_floating_point()] else: sizes_and_dtypes = [(list(t.size()), t.dtype) for t in outputs if t.is_floating_point()] self.grad_layer = self._allocate_buffers(sizes_and_dtypes, num_buffers=1)[0] if isinstance(self.grad_layer, torch.Tensor): p2p.recv(self.grad_layer, self.next_stage) else: assert isinstance(outputs, tuple) for idx, buffer in enumerate(self.grad_layer): # XXX GPT-2 hack if self.is_grad_partitioned and idx == 0 and buffer.dtype != torch.long: buffer.data = torch.zeros(buffer.size(), dtype=torch.long, device=self.device) p2p.recv(buffer, self.next_stage) if self.wall_clock_breakdown(): self.timers('pipe_recv_grad').stop() def _exec_optimizer_step(self, lr_kwargs=None): if self.wall_clock_breakdown(): self.timers('step_microstep').start() self.timers('step').start() self.mem_status('BEFORE STEP', reset_max=True) self._force_grad_boundary = True self._take_model_step(lr_kwargs) self._force_grad_boundary = False self.mem_status('AFTER STEP') if self.global_rank == 0 and self.monitor.enabled: self.summary_events = [(f'Train/Samples/lr', self.get_lr()[0], self.global_samples)] if self.fp16_enabled() and hasattr(self.optimizer, 'cur_scale'): self.summary_events.append( (f'Train/Samples/loss_scale', self.optimizer.cur_scale, self.global_samples)) self.monitor.write_events(self.summary_events) if self.wall_clock_breakdown(): self.timers('step_microstep').stop() self.timers('step').stop() if self.global_steps % self.steps_per_print() == 0: self.timers.log([ 'batch_input', 'forward_microstep', 'backward_microstep', 'backward_inner_microstep', 'backward_allreduce_microstep', 'backward_tied_allreduce_microstep', 'step_microstep' ]) if self.global_steps % self.steps_per_print() == 0: self.timers.log(['forward', 'backward', 'backward_inner', 'backward_allreduce', 'step']) def _zero_grads(self, inputs): if isinstance(inputs, torch.Tensor): if inputs.grad is not None: inputs.grad.data.zero_() else: for t in inputs: if t.grad is not None: t.grad.data.zero_() def _allocate_zeros(self, shape, **kwargs): """ Allocate a tensor of zeros on the engine's device. Arguments: shape: the shape of the tensor to allocate kwargs: passed to torch.zeros() Returns: A tensor from torch.zeros() allocated on self.device. """ if "dtype" not in kwargs: if self.fp16_enabled(): kwargs["dtype"] = torch.half if self.bfloat16_enabled(): kwargs["dtype"] = torch.bfloat16 return torch.zeros(shape, device=self.device, **kwargs) def _allocate_buffer(self, shape, num_buffers=-1, **kwargs): buffers = [] if num_buffers == -1: num_buffers = self.num_pipe_buffers for count in range(num_buffers): buffers.append(self._allocate_zeros(shape, **kwargs)) return buffers def _allocate_buffers(self, shapes_and_dtypes, requires_grad=False, num_buffers=-1): buffers = [] if num_buffers == -1: num_buffers = self.num_pipe_buffers for count in range(num_buffers): buffer = [] for shape, dtype in shapes_and_dtypes: buffer.append(self._allocate_zeros(shape, dtype=dtype, requires_grad=requires_grad)) buffers.append(buffer) return buffers def forward(self, *args, **kwargs): """Disabled for pipeline parallel training. See ``train_batch()``. """ raise PipelineError("Only train_batch() is accessible in pipeline mode.") def backward(self, *args, **kwargs): """Disabled for pipeline parallel training. See ``train_batch()``. """ raise PipelineError("Only train_batch() is accessible in pipeline mode.") def step(self, *args, **kwargs): """Disabled for pipeline parallel training. See ``train_batch()``. """ raise PipelineError("Only train_batch() is accessible in pipeline mode.") def mem_status(self, msg, print_rank=-1, reset_max=False): return global mem_alloced, mem_cached if not self.global_steps == 0 or not self.global_steps == 9: #return pass if self.mpu.get_data_parallel_rank() != 0: return if self.global_rank != 0: return rank = self.global_rank if print_rank != -1 and rank != print_rank: return get_accelerator().synchronize() if reset_max: get_accelerator().reset_max_memory_cached() get_accelerator().reset_max_memory_allocated() new_alloced = get_accelerator().memory_allocated() new_cached = get_accelerator().memory_cached() delta_alloced = new_alloced - mem_alloced delta_cached = new_cached - mem_cached mem_cached = new_cached mem_alloced = new_alloced max_alloced = get_accelerator().max_memory_allocated() max_cached = get_accelerator().max_memory_cached() # convert to GB for printing new_alloced /= 1024**3 new_cached /= 1024**3 delta_alloced /= 1024**3 delta_cached /= 1024**3 max_alloced /= 1024**3 max_cached /= 1024**3 print( f'RANK={rank} STAGE={self.stage_id} STEP={self.global_steps} MEMSTATS', msg, f'current alloc={new_alloced:0.4f}GB (delta={delta_alloced:0.4f}GB max={max_alloced:0.4f}GB) ' f'current cache={new_cached:0.4f}GB (delta={delta_cached:0.4f}GB max={max_cached:0.4f}GB)') def module_state_dict(self): """Override hack to save a pipe model and return the directory path of the save. This method should only be called by DeepSpeed's ``save_checkpoint()``. The recommended way of saving a ``PipelineModule`` outside of ``save_checkpoint()`` is ``save_state_dict()``. Returns: None """ assert isinstance(self.module, PipelineModule) assert self._curr_ckpt_path is not None, \ "PipelineEngine expects module_state_dict() to be called from save_checkpoint()" self.module.save_state_dict(self._curr_ckpt_path, checkpoint_engine=self.checkpoint_engine) return None def load_module_state_dict(self, checkpoint, strict=True, custom_load_fn=None): """Override hack to instead use a directory path. This is important because pipeline models checkpoint by layer instead of rank. If ``state_dict`` is not ``None`` or a ``str``, we revert to ``super()`` expecting a ``dict``. Args: state_dict (str, None): unused strict (bool, optional): Strict state loading. Defaults to True. """ assert custom_load_fn is None, "custom_load_fn not supported w. pipeline parallelism" state_dict = checkpoint['module'] if (state_dict is not None) and (not isinstance(state_dict, str)): super().load_module_state_dict(state_dict, strict) return self.module.load_state_dir(load_dir=self._curr_ckpt_path, strict=strict, checkpoint_engine=self.checkpoint_engine) # A map of PipeInstruction types to methods. Each method will be executed with the # kwargs provided to the PipeInstruction from the scheduler. _INSTRUCTION_MAP = { schedule.OptimizerStep: _exec_optimizer_step, schedule.ReduceGrads: _exec_reduce_grads, schedule.ReduceTiedGrads: _exec_reduce_tied_grads, schedule.LoadMicroBatch: _exec_load_micro_batch, schedule.ForwardPass: _exec_forward_pass, schedule.BackwardPass: _exec_backward_pass, schedule.SendActivation: _exec_send_activations, schedule.RecvActivation: _exec_recv_activations, schedule.SendGrad: _exec_send_grads, schedule.RecvGrad: _exec_recv_grads, } def _exec_schedule(self, pipe_schedule): # Reserve and reset buffers. self._reserve_pipe_buffers(pipe_schedule.num_pipe_buffers()) self.fwd_outputs = [] # For each step in the schedule for step_cmds in pipe_schedule: # For each instruction in the step for cmd in step_cmds: if type(cmd) not in self._INSTRUCTION_MAP: raise RuntimeError(f'{self.__class__.__name__} does not understand instruction {repr(cmd)}') # Equivalent to: self._exec_forward_pass(buffer_id=0) self._exec_instr = MethodType(self._INSTRUCTION_MAP[type(cmd)], self) self._exec_instr(**cmd.kwargs)
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from deepspeed import comm as dist from collections import namedtuple from itertools import product as cartesian_product class ProcessTopology: """ Manages the mapping of n-dimensional Cartesian coordinates to linear indices. This mapping is used to map the rank of processes to the grid for various forms of parallelism. Each axis of the tensor is accessed by its name. The provided ordering of the axes defines the layout of the topology. ProcessTopology uses a "row-major" layout of the tensor axes, and so axes=['x', 'y'] would map coordinates (x,y) and (x,y+1) to adjacent linear indices. If instead axes=['y', 'x'] was used, coordinates (x,y) and (x+1,y) would be adjacent. Some methods return ProcessCoord namedtuples. """ def __init__(self, axes, dims): """Create a mapping of n-dimensional tensor coordinates to linear indices. Arguments: axes (list): the names of the tensor axes dims (list): the dimension (length) of each axis of the topology tensor """ self.axes = axes # names of each topology axis self.dims = dims # length of each topology axis # This is actually a class that lets us hash {'row':3, 'col':2} mappings self.ProcessCoord = namedtuple('ProcessCoord', axes) self.mapping = {} ranges = [range(d) for d in dims] # example: 1, (0,0,1) for global_rank, coord in enumerate(cartesian_product(*ranges)): key = {axis: coord[self.axes.index(axis)] for axis in self.axes} key = self.ProcessCoord(**key) # for example, {ProcessCoord(row=0, col=1) : 1} self.mapping[key] = global_rank def get_rank(self, **coord_kwargs): """Return the global rank of a process via its coordinates. Coordinates are specified as kwargs. For example: >>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3]) >>> X.get_rank(x=0, y=1) 1 """ if len(coord_kwargs) != len(self.axes): raise ValueError('get_rank() does not support slices. Use filter_match())') key = self.ProcessCoord(**coord_kwargs) assert key in self.mapping, f'key {coord_kwargs} invalid' return self.mapping[key] def get_axis_names(self): """Return a list of the axis names in the ordering of the topology. """ return self.axes def get_rank_repr(self, rank, omit_axes=['data', 'pipe'], inner_sep='_', outer_sep='-'): """Return a string representation of a rank. This method is primarily used for checkpointing model data. For example: >>> topo = Topo(axes=['a', 'b'], dims=[2, 2]) >>> topo.get_rank_repr(rank=3) 'a_01-b_01' >>> topo.get_rank_repr(rank=3, omit_axes=['a']) 'b_01' Args: rank (int): A rank in the topology. omit_axes (list, optional): Axes that should not be in the representation. Defaults to ['data', 'pipe']. inner_sep (str, optional): [description]. Defaults to '_'. outer_sep (str, optional): [description]. Defaults to '-'. Returns: str: A string representation of the coordinate owned by ``rank``. """ omit_axes = frozenset(omit_axes) axes = [a for a in self.get_axis_names() if a not in omit_axes] names = [] for ax in axes: ax_rank = getattr(self.get_coord(rank=rank), ax) names.append(f'{ax}{inner_sep}{ax_rank:02d}') return outer_sep.join(names) def get_dim(self, axis): """Return the number of processes along the given axis. For example: >>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3]) >>> X.get_dim('y') 3 """ if axis not in self.axes: return 0 return self.dims[self.axes.index(axis)] def get_coord(self, rank): """Return the coordinate owned by a process rank. The axes of the returned namedtuple can be directly accessed as members. For example: >>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3]) >>> coord = X.get_coord(rank=1) >>> coord.x 0 >>> coord.y 1 """ for coord, idx in self.mapping.items(): if idx == rank: return coord raise ValueError(f'rank {rank} not found in topology.') def get_axis_comm_lists(self, axis): """ Construct lists suitable for a communicator group along axis ``axis``. Example: >>> topo = Topo(axes=['pipe', 'data', 'model'], dims=[2, 2, 2]) >>> topo.get_axis_comm_lists('pipe') [ [0, 4], # data=0, model=0 [1, 5], # data=0, model=1 [2, 6], # data=1, model=0 [3, 7], # data=1, model=1 ] Returns: A list of lists whose coordinates match in all axes *except* ``axis``. """ # We don't want to RuntimeError because it allows us to write more generalized # code for hybrid parallelisms. if axis not in self.axes: return [] # Grab all axes but `axis` other_axes = [a for a in self.axes if a != axis] lists = [] # Construct all combinations of coords with other_axes ranges = [range(self.get_dim(a)) for a in other_axes] for coord in cartesian_product(*ranges): other_keys = {a: coord[other_axes.index(a)] for a in other_axes} # now go over all ranks in `axis`. sub_list = [] for axis_key in range(self.get_dim(axis)): key = self.ProcessCoord(**other_keys, **{axis: axis_key}) sub_list.append(self.mapping[key]) lists.append(sub_list) return lists def filter_match(self, **filter_kwargs): """Return the list of ranks whose coordinates match the provided criteria. Example: >>> X = ProcessTopology(axes=['pipe', 'data', 'model'], dims=[2, 2, 2]) >>> X.filter_match(pipe=0, data=1) [2, 3] >>> [X.get_coord(rank) for rank in X.filter_match(pipe=0, data=1)] [ProcessCoord(pipe=0, data=1, model=0), ProcessCoord(pipe=0, data=1, model=1)] Arguments: **filter_kwargs (dict): criteria used to select coordinates. Returns: The list of ranks whose coordinates match filter_kwargs. """ def _filter_helper(x): for key, val in filter_kwargs.items(): if getattr(x, key) != val: return False return True coords = filter(_filter_helper, self.mapping.keys()) return [self.mapping[coord] for coord in coords] def get_axis_list(self, axis, idx): """Returns the list of global ranks whose coordinate in an axis is idx. For example: >>> X = ProcessTopology(axes=['x', 'y'], dims=[2,3]) >>> X.get_axis_list(axis='x', idx=0) [0, 1, 2] >>> X.get_axis_list(axis='y', idx=0) [0, 3] """ # This could be faster by generating the desired keys directly instead of # filtering. axis_num = self.axes.index(axis) ranks = [self.mapping[k] for k in self.mapping.keys() if k[axis_num] == idx] return ranks def world_size(self): return len(self.mapping) def __str__(self): return str(self.mapping) def _prime_factors(N): """ Returns the prime factorization of positive integer N. """ if N <= 0: raise ValueError("Values must be strictly positive.") primes = [] while N != 1: for candidate in range(2, N + 1): if N % candidate == 0: primes.append(candidate) N //= candidate break return primes class PipeDataParallelTopology(ProcessTopology): """ A topology specialization for hybrid data and pipeline parallelism. Uses data parallelism on the last dimension to encourage gradient reductions to use high-bandwidth intra-node links and lower-volume pipeline communications to use low-bandwidth inter-node links. """ def __init__(self, num_pp, num_dp): super().__init__(axes=['pipe', 'data'], dims=[num_pp, num_dp]) class PipeModelDataParallelTopology(ProcessTopology): """ A topology for hybrid pipeline, model, and data parallelism. """ def __init__(self, num_pp, num_mp, num_dp): super().__init__(axes=['pipe', 'data', 'model'], dims=[num_pp, num_dp, num_mp]) class PipelineParallelGrid: """Implements a grid object that stores the data parallel ranks corresponding to each of the model parallel stages The grid object organizes the processes in a distributed pytorch job into a 2D grid, of stage_id and data_parallel_id. self.stage_id and self.data_parallel_id stores the stage id and the data parallel id of current process. self.dp_group groups the processes by stage_id. self.dp_group[i], is a list containing all process ranks whose stage_id is i. self.p2p_groups stores a list of tuple, where each tuple stores process ranks of adjacent stages for a given data_parallel_id. For example if num_stage is 5 then a tuple [7,8] represents stages [3, 4], with data_parallel id = 1. A stage wrap around will appear as non-adjacent ranks, for example tuple [4,0] with representing wrap-around stage 4 and 0, for data_parallel_id = 0, or similarly [9,5] represents wrapped around stages [4,0] for data_parallel_id = 1. """ def __init__(self, topology=None, process_group=None): # TODO use process_group if provided self.global_rank = dist.get_rank() self.world_size = dist.get_world_size() if topology is not None: if self.global_rank == 0: print('Using topology:', topology) self._topo = topology else: num_pp = 1 num_dp = 1 for idx, prime in enumerate(_prime_factors(self.world_size)): if idx % 2 == 0: num_pp *= prime else: num_dp *= prime self._topo = PipeDataParallelTopology(num_dp=num_dp, num_pp=num_pp) self.data_parallel_size = max(self._topo.get_dim('data'), 1) self.pipe_parallel_size = max(self._topo.get_dim('pipe'), 1) self.model_parallel_size = max(self._topo.get_dim('model'), 1) self.slice_parallel_size = self.model_parallel_size assert self._is_grid_valid(), "Invalid Grid" self.stage_id = self.get_stage_id() self.data_parallel_id = self.get_data_parallel_id() # Create new ProcessGroups for all model parallelism. DeepSpeedLight uses these # to detect overflow, etc. self.ds_model_proc_group = None self.ds_model_rank = -1 for dp in range(self.data_parallel_size): ranks = sorted(self._topo.get_axis_list(axis='data', idx=dp)) if self.global_rank == 0: #print(f'RANK={self.global_rank} building DeepSpeed model group: {ranks}') pass proc_group = dist.new_group(ranks=ranks) if self.global_rank in ranks: self.ds_model_proc_group = proc_group self.ds_model_world_size = len(ranks) self.ds_model_rank = ranks.index(self.global_rank) assert self.ds_model_rank > -1 assert self.ds_model_proc_group is not None # Create new ProcessGroup for gradient all-reduces - these are the data parallel groups self.dp_group = [] self.dp_groups = self._topo.get_axis_comm_lists('data') for g in self.dp_groups: proc_group = dist.new_group(ranks=g) if self.global_rank in g: self.dp_group = g self.dp_proc_group = proc_group self.is_first_stage = (self.stage_id == 0) self.is_last_stage = (self.stage_id == (self.pipe_parallel_size - 1)) self.p2p_groups = self._build_p2p_groups() # Create new ProcessGroup for pipeline collectives - these are pipe parallel groups self.pp_group = [] self.pp_proc_group = None self.pipe_groups = self._topo.get_axis_comm_lists('pipe') for ranks in self.pipe_groups: if self.global_rank == 0: #print(f'RANK={self.global_rank} building pipeline group: {ranks}') pass proc_group = dist.new_group(ranks=ranks) if self.global_rank in ranks: self.pp_group = ranks self.pp_proc_group = proc_group assert self.pp_proc_group is not None # Create new ProcessGroup for model (tensor-slicing) collectives # Short circuit case without model parallelism. # TODO: it would be nice if topology had bcast semantics to avoid this branching # case? if self.model_parallel_size == 1: for group_rank in range(self.world_size): group_rank = [group_rank] group = dist.new_group(ranks=group_rank) if group_rank[0] == self.global_rank: self.slice_group = group_rank self.slice_proc_group = group return else: self.mp_group = [] self.model_groups = self._topo.get_axis_comm_lists('model') for g in self.model_groups: proc_group = dist.new_group(ranks=g) if self.global_rank in g: self.slice_group = g self.slice_proc_group = proc_group def get_stage_id(self): return self._topo.get_coord(rank=self.global_rank).pipe def get_data_parallel_id(self): return self._topo.get_coord(rank=self.global_rank).data def _build_p2p_groups(self): """Groups for sending and receiving activations and gradients across model parallel stages. """ comm_lists = self._topo.get_axis_comm_lists('pipe') p2p_lists = [] for rank in range(self.world_size): for l in comm_lists: assert len(l) == self.pipe_parallel_size if rank in l: idx = l.index(rank) buddy_rank = l[(idx + 1) % self.pipe_parallel_size] p2p_lists.append([rank, buddy_rank]) break # next global rank assert len(p2p_lists) == self.world_size return p2p_lists def _is_grid_valid(self): ranks = 1 for ax in self._topo.get_axis_names(): ranks *= self._topo.get_dim(ax) return ranks == dist.get_world_size() #returns the global rank of the process with the provided stage id #which has the same data_parallel_id as caller process def stage_to_global(self, stage_id, **kwargs): me = self._topo.get_coord(self.global_rank) transform = me._replace(pipe=stage_id, **kwargs)._asdict() return self._topo.get_rank(**transform) def topology(self): return self._topo # MPU functions for DeepSpeed integration def get_global_rank(self): return self.global_rank def get_pipe_parallel_rank(self): """ The stage of the pipeline this rank resides in. """ return self.get_stage_id() def get_pipe_parallel_world_size(self): """ The number of stages in the pipeline. """ return self.pipe_parallel_size def get_pipe_parallel_group(self): """ The group of ranks within the same pipeline. """ return self.pp_proc_group def get_data_parallel_rank(self): """ Which pipeline this rank resides in. """ return self.data_parallel_id def get_data_parallel_world_size(self): """ The number of pipelines. """ return self.data_parallel_size def get_data_parallel_group(self): """ The group of ranks within the same stage of all pipelines. """ return self.dp_proc_group # These are model parallel groups across all types of model parallelism. # Deepspeed uses them to detect overflow, etc. def get_model_parallel_rank(self): return self.ds_model_rank def get_model_parallel_world_size(self): return self.ds_model_world_size def get_model_parallel_group(self): return self.ds_model_proc_group # For Megatron-style tensor slicing def get_slice_parallel_rank(self): if 'model' in self._topo.get_axis_names(): return self._topo.get_coord(rank=self.global_rank).model else: return 0 def get_slice_parallel_world_size(self): return self.slice_parallel_size def get_slice_parallel_group(self): return self.slice_proc_group
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os import glob import re as regex from functools import partial import torch import torch.nn as nn from deepspeed import comm as dist from deepspeed.utils import logger from .. import utils as ds_utils from ..activation_checkpointing import checkpointing from .topology import PipeDataParallelTopology, PipelineParallelGrid from deepspeed.runtime.state_dict_factory import SDLoaderFactory from deepspeed.accelerator import get_accelerator class PipelineError(Exception): """Errors related to the use of deepspeed.PipelineModule """ class LayerSpec: """Building block for specifying pipeline-parallel modules. LayerSpec stores the type information and parameters for each stage in a PipelineModule. For example: .. code-block:: python nn.Sequence( torch.nn.Linear(self.in_dim, self.hidden_dim, bias=False), torch.nn.Linear(self.hidden_hidden, self.out_dim) ) becomes .. code-block:: python layer_specs = [ LayerSpec(torch.nn.Linear, self.in_dim, self.hidden_dim, bias=False), LayerSpec(torch.nn.Linear, self.hidden_hidden, self.out_dim)] ] """ def __init__(self, typename, *module_args, **module_kwargs): self.typename = typename self.module_args = module_args self.module_kwargs = module_kwargs if not issubclass(typename, nn.Module): raise RuntimeError('LayerSpec only supports torch.nn.Module types.') if dist.is_initialized(): self.global_rank = dist.get_rank() else: self.global_rank = -1 def __repr__(self): return ds_utils.call_to_str(self.typename.__name__, self.module_args, self.module_kwargs) def build(self, log=False): """Build the stored specification.""" if log: logger.info(f'RANK={self.global_rank} building {repr(self)}') return self.typename(*self.module_args, **self.module_kwargs) class TiedLayerSpec(LayerSpec): def __init__(self, key, typename, *module_args, forward_fn=None, tied_weight_attr='weight', **module_kwargs): super().__init__(typename, *module_args, **module_kwargs) self.key = key self.forward_fn = forward_fn self.tied_weight_attr = tied_weight_attr class PipelineModule(nn.Module): """Modules to be parallelized with pipeline parallelism. The key constraint that enables pipeline parallelism is the representation of the forward pass as a sequence of layers and the enforcement of a simple interface between them. The forward pass is implicitly defined by the module ``layers``. The key assumption is that the output of each layer can be directly fed as input to the next, like a ``torch.nn.Sequence``. The forward pass is implicitly: .. code-block:: python def forward(self, inputs): x = inputs for layer in self.layers: x = layer(x) return x .. note:: Pipeline parallelism is not compatible with ZeRO-2 and ZeRO-3. Args: layers (Iterable): A sequence of layers defining pipeline structure. Can be a ``torch.nn.Sequential`` module. num_stages (int, optional): The degree of pipeline parallelism. If not specified, ``topology`` must be provided. topology (``deepspeed.runtime.pipe.ProcessTopology``, optional): Defines the axes of parallelism axes for training. Must be provided if ``num_stages`` is ``None``. loss_fn (callable, optional): Loss is computed ``loss = loss_fn(outputs, label)`` seed_layers(bool, optional): Use a different seed for each layer. Defaults to False. seed_fn(type, optional): The custom seed generating function. Defaults to random seed generator. base_seed (int, optional): The starting seed. Defaults to 1234. partition_method (str, optional): The method upon which the layers are partitioned. Defaults to 'parameters'. activation_checkpoint_interval (int, optional): The granularity activation checkpointing in terms of number of layers. 0 disables activation checkpointing. activation_checkpoint_func (callable, optional): The function to use for activation checkpointing. Defaults to ``deepspeed.checkpointing.checkpoint``. checkpointable_layers(list, optional): Checkpointable layers may not be checkpointed. Defaults to None which does not additional filtering. """ def __init__(self, layers, num_stages=None, topology=None, loss_fn=None, seed_layers=False, seed_fn=None, base_seed=1234, partition_method='parameters', activation_checkpoint_interval=0, activation_checkpoint_func=checkpointing.checkpoint, checkpointable_layers=None): super().__init__() if num_stages is None and topology is None: raise RuntimeError('must provide num_stages or topology') self.micro_offset = 0 self.loss_fn = loss_fn self.checkpointable_layers = checkpointable_layers if checkpointable_layers is not None: assert isinstance(checkpointable_layers, list), "param `checkpointable_layers` must be type of list." self.seed_layers = seed_layers self.seed_fn = seed_fn self.base_seed = base_seed if dist.get_rank() == 0: try: seed_str = self.seed_fn.__name__ except AttributeError: seed_str = None print(f'SEED_LAYERS={self.seed_layers} BASE_SEED={self.base_seed} SEED_FN={seed_str}') # Setup world info self.world_group = dist.new_group(ranks=range(dist.get_world_size())) self.global_rank = dist.get_rank(group=self.world_group) self.world_size = dist.get_world_size(group=self.world_group) self.local_rank = int(os.environ.get("LOCAL_RANK", None)) assert self.local_rank != None if topology: self._topo = topology self.num_stages = self._topo.get_dim('pipe') else: self.num_stages = num_stages if topology is None: if self.world_size % self.num_stages != 0: raise RuntimeError( f'num_stages ({self.num_stages}) must divide distributed world size ({self.world_size})') dp = self.world_size // num_stages topology = PipeDataParallelTopology(num_pp=num_stages, num_dp=dp) self._topo = topology # Construct communicators for pipeline topology self._grid = PipelineParallelGrid(process_group=self.world_group, topology=self._topo) self.stage_id = self._topo.get_coord(self.global_rank).pipe # Initialize partition information self._layer_specs = list(layers) self._num_layers = len(self._layer_specs) self._local_start = 0 self._local_stop = None self._partition_layers(method=partition_method) self.forward_funcs = [] self.fwd_map = {} self.tied_modules = nn.ModuleDict() self.tied_weight_attrs = {} # Offset the random seed by the stage ID. #newseed = get_accelerator().initial_seed() + self._grid.get_stage_id() #ds_utils.set_random_seed(newseed) #with torch.random.fork_rng(devices=[get_accelerator().current_device_name()]): self._build() self.to(get_accelerator().device_name(self.local_rank)) self.tied_comms = self._index_tied_modules() self._synchronize_tied_weights() self.activation_checkpoint_interval = activation_checkpoint_interval self.activation_checkpoint_func = activation_checkpoint_func def _build(self): specs = self._layer_specs for local_idx, layer in enumerate(specs[self._local_start:self._local_stop]): layer_idx = local_idx + self._local_start if self.seed_layers: if self.seed_fn: self.seed_fn(self.base_seed + layer_idx) else: ds_utils.set_random_seed(self.base_seed + layer_idx) # Recursively build PipelineModule objects if isinstance(layer, PipelineModule): raise NotImplementedError('RECURSIVE BUILD NOT YET IMPLEMENTED') # LayerSpec objects contain an nn.Module that should be allocated now. elif isinstance(layer, nn.Module): name = str(layer_idx) self.forward_funcs.append(layer) self.fwd_map.update({name: len(self.forward_funcs) - 1}) self.add_module(name, layer) # TiedLayerSpec objects contain an nn.Module that should be allocated now. elif isinstance(layer, TiedLayerSpec): # Build and register the module if we haven't seen it before. if layer.key not in self.tied_modules: self.tied_modules[layer.key] = layer.build() self.tied_weight_attrs[layer.key] = layer.tied_weight_attr if layer.forward_fn is None: # Just use forward() self.forward_funcs.append(self.tied_modules[layer.key]) else: # User specified fn with args (module, input) self.forward_funcs.append(partial(layer.forward_fn, self.tied_modules[layer.key])) # LayerSpec objects contain an nn.Module that should be allocated now. elif isinstance(layer, LayerSpec): module = layer.build() name = str(layer_idx) self.forward_funcs.append(module) self.fwd_map.update({name: len(self.forward_funcs) - 1}) self.add_module(name, module) # Last option: layer may be a functional (e.g., lambda). We do nothing in # that case and just use it in forward() else: self.forward_funcs.append(layer) # All pipeline parameters should be considered as model parallel in the context # of our FP16 optimizer for p in self.parameters(): p.ds_pipe_replicated = False def _count_layer_params(self): """Count the trainable parameters in individual layers. This routine will only build one layer at a time. Returns: A list of the number of parameters in each layer. """ param_counts = [0] * len(self._layer_specs) for idx, layer in enumerate(self._layer_specs): if isinstance(layer, LayerSpec): l = layer.build() params = filter(lambda p: p.requires_grad, l.parameters()) param_counts[idx] = sum(p.numel() for p in params) elif isinstance(layer, nn.Module): params = filter(lambda p: p.requires_grad, layer.parameters()) param_counts[idx] = sum(p.numel() for p in params) return param_counts def _find_layer_type(self, layername): idxs = [] typeregex = regex.compile(layername, regex.IGNORECASE) for idx, layer in enumerate(self._layer_specs): name = None if isinstance(layer, LayerSpec): name = layer.typename.__name__ elif isinstance(layer, nn.Module): name = layer.__class__.__name__ else: try: name = layer.__name__ except AttributeError: continue if typeregex.search(name): idxs.append(idx) if len(idxs) == 0: raise RuntimeError(f"Partitioning '{layername}' found no valid layers to partition.") return idxs def forward(self, forward_input): # We need to offset the seed by the microbatch ID. Save it in a local var to # ensure it is preserved in the closure. Otherwise checkpointed forward funcs # will see a different offset. self.micro_offset += 1 def exec_range_func(start, end): ''' Helper function to be used with checkpoint() Adapted from torch.utils.checkpoint:checkpoint_sequential() ''' local_micro_offset = self.micro_offset + 1 def exec_func(*inputs): # Single tensor inputs need to be unwrapped if len(inputs) == 1: inputs = inputs[0] for idx, layer in enumerate(self.forward_funcs[start:end]): self.curr_layer = idx + self._local_start if self.seed_layers: new_seed = (self.base_seed * local_micro_offset) + self.curr_layer if self.seed_fn: self.seed_fn(new_seed) else: ds_utils.set_random_seed(new_seed) inputs = layer(inputs) return inputs return exec_func if self.activation_checkpoint_interval == 0: func = exec_range_func(0, len(self.forward_funcs)) x = func(forward_input) else: num_layers = len(self.forward_funcs) x = forward_input for start_idx in range(0, num_layers, self.activation_checkpoint_interval): end_idx = min(start_idx + self.activation_checkpoint_interval, num_layers) funcs = self.forward_funcs[start_idx:end_idx] # Since we either pass tensors or tuples of tensors without unpacking, we # need to be careful not to double-wrap tensors with tuple. if not isinstance(x, tuple): x = (x, ) if self._is_checkpointable(funcs): x = self.activation_checkpoint_func(exec_range_func(start_idx, end_idx), *x) else: x = exec_range_func(start_idx, end_idx)(*x) return x def _partition_layers(self, method='uniform'): num_stages = self._topo.get_dim('pipe') stage_id = self._topo.get_coord(self.global_rank).pipe if self.global_rank == 0: logger.info(f'Partitioning pipeline stages with method {method}') method = method.lower() # Each stage gets a simple uniform number of layers. if method == 'uniform': num_layers = len(self._layer_specs) self.parts = ds_utils.partition_uniform(num_items=num_layers, num_parts=num_stages) elif method == 'parameters': param_counts = self._count_layer_params() self.parts = ds_utils.partition_balanced(weights=param_counts, num_parts=num_stages) elif method.startswith('type:'): layertype = method.split(':')[1] binary_weights = [0] * len(self._layer_specs) for idx in self._find_layer_type(layertype): binary_weights[idx] = 1 self.parts = ds_utils.partition_balanced(weights=binary_weights, num_parts=num_stages) elif method == 'profile': raise NotImplementedError(f'Partitioning method {method} not implemented.') else: raise NotImplementedError(f'Partitioning method {method} not implemented.') # Print some information on the partitioning. if self.global_rank == 0: for stage in range(num_stages): start = self.parts[stage] stop = self.parts[stage + 1] print(f'stage={stage} layers={stop - start}') for idx, layer in enumerate(self._layer_specs[start:stop]): name = str(layer) if isinstance(layer, LayerSpec): name = layer.typename.__name__ if isinstance(layer, nn.Module): name = layer.__class__.__name__ else: try: name = layer.__name__ except AttributeError: pass print(f' {idx+start:2d}: {name}') if self.loss_fn: try: print(f' loss: {self.loss_fn.__name__}') except AttributeError: print(f' loss: {self.loss_fn.__class__.__name__}') self._set_bounds(start=self.parts[stage_id], stop=self.parts[stage_id + 1]) def allreduce_tied_weight_gradients(self): '''All reduce the gradients of the tied weights between tied stages''' for key, comm in self.tied_comms.items(): weight = getattr(self.tied_modules[key], comm['weight_attr']) dist.all_reduce(weight.grad, group=comm['group']) def get_tied_weights_and_groups(self): weight_group_list = [] for key, comm in self.tied_comms.items(): weight = getattr(self.tied_modules[key], comm['weight_attr']) weight_group_list.append((weight, comm['group'])) return weight_group_list def _synchronize_tied_weights(self): for key, comm in self.tied_comms.items(): dist.broadcast( getattr(comm['module'], comm['weight_attr']), src=min(comm['ranks']), group=comm['group'], ) def _index_tied_modules(self): ''' Build communication structures for tied modules. ''' tied_comms = {} if self._topo.get_dim('pipe') == 1: return tied_comms specs = self._layer_specs tie_keys = set(s.key for s in specs if isinstance(s, TiedLayerSpec)) for key in tie_keys: # Find the layers that the tied module appears in tied_layers = [] for idx, layer in enumerate(specs): if isinstance(layer, TiedLayerSpec) and layer.key == key: tied_layers.append(idx) # Find all stages with this tied module # TODO: Would be nice to remove the nested data/model parallelism loops and # TODO: instead generalize in some way, since we really just care about the # TODO: stage that owns the tied layer. Then loop over each (dp, mp, ...) # TODO: fiber to generate process groups. tied_stages = set(self.stage_owner(idx) for idx in tied_layers) for dp in range(self._grid.data_parallel_size): for mp in range(self._grid.get_slice_parallel_world_size()): tied_ranks = [] for s in sorted(tied_stages): if self._grid.get_slice_parallel_world_size() > 1: tied_ranks.append(self._grid.stage_to_global(stage_id=s, data=dp, model=mp)) else: tied_ranks.append(self._grid.stage_to_global(stage_id=s, data=dp)) group = dist.new_group(ranks=tied_ranks) # Record this tied module if we own a local copy of it. if self.global_rank in tied_ranks: assert key in self.tied_modules if key in self.tied_modules: tied_comms[key] = { 'ranks': tied_ranks, 'group': group, 'weight_attr': self.tied_weight_attrs[key], 'module': self.tied_modules[key], } # Only count the tied module once in the eyes of the FP16 optimizer if self.global_rank != tied_ranks[0]: for p in self.tied_modules[key].parameters(): p.ds_pipe_replicated = True ''' if len(tied_comms) > 0: print(f'RANK={self.global_rank} tied_comms={tied_comms}') ''' return tied_comms def partitions(self): return self.parts def stage_owner(self, layer_idx): assert 0 <= layer_idx < self._num_layers for stage in range(self._topo.get_dim('pipe')): if self.parts[stage] <= layer_idx < self.parts[stage + 1]: return stage raise RuntimeError(f'Layer {layer_idx} not owned? parts={self.parts}') def _set_bounds(self, start=None, stop=None): """Manually define the range of layers that will be built on this process. These boundaries are treated as list slices and so start is inclusive and stop is exclusive. The default of None for both results in all layers being built locally. """ self._local_start = start self._local_stop = stop def set_checkpoint_interval(self, interval): assert interval >= 0 self.checkpoint_interval = interval def topology(self): """ ProcessTopology object to query process mappings. """ return self._topo def mpu(self): return self._grid def num_pipeline_stages(self): return self._topo.get_dim('pipe') def ckpt_prefix(self, checkpoints_path, tag): """Build a prefix for all checkpoint files written by this module. """ # All checkpoint files start with this rank_name = 'module' # Data parallelism is omitted from the naming convention because we are agnostic # to this in the checkpoint. omit_dims = frozenset(['data']) axes = [a for a in self._grid._topo.get_axis_names() if a not in omit_dims] for dim in axes: rank = getattr(self._grid._topo.get_coord(rank=self.global_rank), dim) rank_name += f'-{dim}_{rank:02d}' ckpt_name = os.path.join(checkpoints_path, str(tag), rank_name) return ckpt_name def ckpt_layer_path(self, ckpt_dir, local_layer_idx): """Customize a prefix for a specific pipeline module layer. """ idx = local_layer_idx + self._local_start layer_ckpt_path = os.path.join(ckpt_dir, f'layer_{idx:02d}') rank_repr = self._grid._topo.get_rank_repr(rank=self.global_rank) if rank_repr != '': layer_ckpt_path += f'-{rank_repr}' layer_ckpt_path += '-model_states.pt' return layer_ckpt_path def ckpt_layer_path_list(self, ckpt_dir, local_layer_idx): """Get all ckpt file list for a specific pipeline module layer. """ idx = local_layer_idx + self._local_start layer_ckpt_path = os.path.join(ckpt_dir, f'layer_{idx:02d}-') layer_ckpt_path += "*model_states.pt" ckpt_files = glob.glob(layer_ckpt_path) ckpt_files.sort() return ckpt_files def save_state_dict(self, save_dir, checkpoint_engine): # Processes having the same model parallel rank on different data parallel instances # have identical layer weights. We can distribute the task of saving the layer weights # among the data parallel ranks. For example, if a pipeline stage has 9 layers and # if there are 2 data parallel instances, rank 0 will save the first 5 layers and # rank 1 will save the last 4. dp_rank = self._grid.data_parallel_id dp_size = self._grid.data_parallel_size num_layers = len(self.forward_funcs) if self.checkpoint_parallel_write_pipeline: # spread layers evenly across data parallel ranks offsets = ds_utils.partition_uniform(num_layers, dp_size) start, end = offsets[dp_rank], offsets[dp_rank + 1] else: # data parallel rank 0 writes all layers if dp_rank != 0: return start, end = 0, num_layers layer_list = self.forward_funcs[start:end] checkpoint_engine.makedirs(save_dir, exist_ok=True) for idx, layer in enumerate(layer_list): model_ckpt_path = self.ckpt_layer_path(save_dir, start + idx) if not hasattr(layer, 'state_dict'): continue # We pass cloned tensors to torch.save() to avoid checkpoint bloat which occurs because torch.save() # saves the underlying storage rather than the slice of the storage corresponding to individual tensors. # This is a problem in DeepSpeed because we often allocate tensors using slices of large flattened buffers. # Tensor cloning helps to avoid this problem because the storage of cloned tensors are closer to the true size. # It is expected that the garbage collector will reclaim the cloned tensor storage to avoid memory bloat. # See https://pytorch.org/docs/stable/notes/serialization.html#preserve-storage-sharing orig_state_dict = layer.state_dict() final_state_dict = type(orig_state_dict)({k: v.clone() for k, v in orig_state_dict.items()}) checkpoint_engine.save(final_state_dict, model_ckpt_path) def load_state_dir(self, load_dir, checkpoint_engine, strict=True): for idx, layer in enumerate(self.forward_funcs): # Functions, etc. will not have state_dicts if not hasattr(layer, 'load_state_dict'): continue # get all checkpoint files for the layer. model_ckpt_list = self.ckpt_layer_path_list(load_dir, idx) mp_rank = self._grid.get_slice_parallel_rank() mp_world_size = self._grid.get_slice_parallel_world_size() sd_loader = SDLoaderFactory.get_sd_loader(model_ckpt_list, version=2.0, checkpoint_engine=checkpoint_engine) load_path, checkpoint, _ = sd_loader.load(mp_world_size, mp_rank, module_key=None, is_pipe_parallel=True) layer.load_state_dict(checkpoint) # if self._grid.data_parallel_id == 0: # logger.info( # f'RANK={self.global_rank} Loaded layer={idx+self._local_start} file={load_path}' # ) self._synchronize_tied_weights() def _is_checkpointable(self, funcs): # This is an unfortunate hack related to torch and deepspeed activation checkpoint implementations. # Some layers like torch.nn.Embedding will not receive grads if checkpointed, which breaks things. # I presume it's related to the discrete inputs that cannot require_grad? Need to revisit. if self.__class__.__name__ in ('GPTModelPipe', 'GPT2ModelPipe'): return all('ParallelTransformerLayerPipe' in f.__class__.__name__ for f in funcs) if self.checkpointable_layers is not None: return all(f.__class__.__name__ in self.checkpointable_layers for f in funcs) params = [f.parameters() for f in funcs if isinstance(f, torch.nn.Module)] return any(len(list(p)) > 0 for p in params)
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from ..utils import call_to_str from abc import ABC, abstractmethod class PipeSchedule(ABC): """Directs the execution of a pipeline engine by generating sequences of :class:`PipeInstruction`. Schedules are generators that yield sequences of :class:`PipeInstruction` to process the micro-batches in one batch. Each yielded step is atomic in the sense that a barrier synchronization can be placed between successive steps without deadlock. Below is an example schedule that implements data parallelism with gradient accumulation: .. code-block:: python class DataParallelSchedule(PipeSchedule): def steps(self): for step_id in range(self.micro_batches): cmds = [ LoadMicroBatch(buffer_id=0), ForwardPass(buffer_id=0), BackwardPass(buffer_id=0), ] if step_id == self.micro_batches - 1: cmds.extend([ ReduceGrads(), OptimizerStep(), ]) yield cmds def num_pipe_buffers(self): return 1 Args: micro_batches (int): The number of micro-batches that comprise a batch. stages (int): The number of pipeline stages. stage_id (int): The pipe stage that will execute the generated schedule. """ def __init__(self, micro_batches, stages, stage_id): super().__init__() self.micro_batches = micro_batches self.stages = stages self.stage_id = stage_id self.prev_stage = self.stage_id - 1 self.next_stage = self.stage_id + 1 @abstractmethod def steps(self): """Yield a list of :class:`PipeInstruction` for each step in the schedule. .. note:: Schedules must implement ``steps()`` to define the schedule. Returns: Instructions to be executed as one step of the pipeline """ pass def num_pipe_buffers(self): """The number of pipeline buffers that will be used by this stage. .. note:: Schedules should specialize ``num_pipe_buffers()`` for memory savings at scale. Returns: The number of buffers for the engine to allocate. """ return self.micro_batches def _valid_micro_batch(self, micro_batch_id): return 0 <= micro_batch_id < self.micro_batches def _valid_stage(self, stage_id): return 0 <= stage_id < self.stages @property def stage(self): """Stage index used to configure this schedule.""" return self.stage_id @property def num_stages(self): """The number of total pipeline stages used to configure this schedule.""" return self.stages @property def num_micro_batches(self): """The number of total micro_batches used to configure this schedule.""" return self.micro_batches @property def is_first_stage(self): """True if the configured ``stage_id`` is the first stage in the pipeline.""" return self.stage_id == 0 @property def is_last_stage(self): """True if the configured ``stage_id`` is the last stage in the pipeline.""" return self.stage_id == self.stages - 1 def _buffer_idx(self, micro_batch_id): """Map a micro-batch index to a pipeline buffer index. This method uses a cyclic allocation strategy. Args: micro_batch_id (int): The micro-batch index relative to the beginning of the schedule. Returns: int: The index of the buffer that should store data. """ assert self._valid_micro_batch(micro_batch_id) return micro_batch_id % self.num_pipe_buffers() def __iter__(self): self.it = None return self def __next__(self): if self.it is None: self.it = self.steps() return next(self.it) class InferenceSchedule(PipeSchedule): """A schedule for inferencing batches using pipeline parallelism. """ def steps(self): """""" prev_micro_batch_id = -1 total_steps = self.micro_batches + self.stages - 1 for step_id in range(total_steps): cmds = [] micro_batch_id = step_id - self.stage_id # Alternate send/recv buffers if _is_even(self.stage_id): recv_buf = step_id % 2 send_buf = (step_id + 1) % 2 else: recv_buf = (step_id + 1) % 2 send_buf = step_id % 2 if self.is_first_stage or self.is_last_stage: if self._valid_micro_batch(micro_batch_id): cmds.append(LoadMicroBatch(recv_buf)) if _is_even(self.stage_id): if self._valid_stage(self.next_stage): if self._valid_micro_batch(micro_batch_id - 1): cmds.append(SendActivation(send_buf)) if self._valid_stage(self.prev_stage): if self._valid_micro_batch(micro_batch_id): cmds.append(RecvActivation(recv_buf)) else: if self._valid_stage(self.prev_stage): if self._valid_micro_batch(micro_batch_id): cmds.append(RecvActivation(recv_buf)) if self._valid_stage(self.next_stage): if self._valid_micro_batch(micro_batch_id - 1): cmds.append(SendActivation(send_buf)) if self._valid_micro_batch(micro_batch_id): cmds.append(ForwardPass(recv_buf)) yield cmds def num_pipe_buffers(self): """Only two pipeline buffers are required for inferencing. Returns: ``2`` """ return 2 class TrainSchedule(PipeSchedule): """A schedule for training a batch using hybrid parallelism. Pipeline parallelism is extracted through gradient accumulation and thus convergence follows that of a data parallel approach with the same batch size. """ def steps(self): """""" prev_micro_batch_id = -1 total_steps = 2 * (self.micro_batches + self.stages - 1) for step_id in range(total_steps): # Map the step of the pipeline to the micro-batch id and also whether it is a # forward or backward pass step. micro_batch_id, is_forward = self._step_to_micro_batch(step_id) if self._valid_micro_batch(prev_micro_batch_id): prev_buffer = self._buffer_idx(prev_micro_batch_id) if self._valid_micro_batch(micro_batch_id): curr_buffer = self._buffer_idx(micro_batch_id) cmds = [] # Exchange activations if is_forward: if self._valid_micro_batch(prev_micro_batch_id) and self._valid_stage(self.prev_stage): cmds.append(SendGrad(prev_buffer)) if self._valid_micro_batch(micro_batch_id) and self._valid_stage(self.prev_stage): cmds.append(RecvActivation(curr_buffer)) else: if self._valid_micro_batch(micro_batch_id) and self._valid_stage(self.next_stage): cmds.append(RecvGrad(curr_buffer)) if self._valid_micro_batch(prev_micro_batch_id) and self._valid_stage(self.next_stage): cmds.append(SendActivation(prev_buffer)) # First/last stage loads if self.stage_id == 0 or self.stage_id == self.stages - 1: if is_forward and self._valid_micro_batch(micro_batch_id): cmds.append(LoadMicroBatch(curr_buffer)) # Computation if self._valid_micro_batch(micro_batch_id): if is_forward: cmds.append(ForwardPass(curr_buffer)) else: cmds.append(BackwardPass(curr_buffer)) # Model step at the end of the batch if step_id == total_steps - 1: cmds.append(ReduceTiedGrads()) cmds.append(ReduceGrads()) cmds.append(OptimizerStep()) # Prepare state for next time prev_micro_batch_id = micro_batch_id yield cmds def num_pipe_buffers(self): """Return the number of pipeline buffers required for this stage. This is equivalent to the maximum number of in-flight forward passes, since we need to remember the activations of forward passes in order to run backpropagation. For synchronous 1F1B, this is equivalent to the index difference between this stage and the last stage. """ buffers = min(self.stages - self.stage_id, self.micro_batches) return max(2, buffers) def _step_to_micro_batch(self, step_id): if _is_even(step_id) and _is_even(self.stage_id): micro_batch_id = self._even_step_forward_id(step_id) is_forward = True elif _is_odd(step_id) and _is_odd(self.stage_id): micro_batch_id = self._odd_step_forward_id(step_id) is_forward = True elif _is_even(step_id) and _is_odd(self.stage_id): micro_batch_id = self._even_step_backward_id(step_id) is_forward = False elif _is_odd(step_id) and _is_even(self.stage_id): micro_batch_id = self._odd_step_backward_id(step_id) is_forward = False else: assert False return micro_batch_id, is_forward def _even_step_forward_id(self, step_id): base = step_id // 2 micro_batch_id = int(base - self.stage_id // 2) return micro_batch_id def _odd_step_forward_id(self, step_id): base = (step_id - 1) // 2 micro_batch_id = int(base - self.stage_id // 2) return micro_batch_id def _even_step_backward_id(self, step_id): base = step_id // 2 micro_batch_id = int(base - self.stages + (self.stage_id + 1) // 2) return micro_batch_id def _odd_step_backward_id(self, step_id): base = ((step_id - 1) // 2) - self.stages + 1 micro_batch_id = int(base + self.stage_id // 2) return micro_batch_id class DataParallelSchedule(PipeSchedule): """An example schedule that trains using traditional data parallelism with gradient accumulation. """ def steps(self): """""" for step_id in range(self.micro_batches): cmds = [ LoadMicroBatch(buffer_id=0), ForwardPass(buffer_id=0), BackwardPass(buffer_id=0), ] if step_id == self.micro_batches - 1: cmds.extend([ ReduceGrads(), OptimizerStep(), ]) yield cmds def num_pipe_buffers(self): """Only one pipeline buffer needed. """ return 1 class PipeInstruction: """Base class for all instructions to be executed by the pipeline engine. All keyword arguments are stored as members similar to a ``namedtuple``. These are then accessible to the :class:`PipeEngine` during execution. Args: kwargs (optional): keyword arguments to store as members """ def __init__(self, **kwargs): self.name = self.__class__.__name__ self.kwargs = kwargs for key, val in kwargs.items(): setattr(self, key, val) def __repr__(self): return call_to_str(self.name, **self.kwargs) class OptimizerStep(PipeInstruction): """Performs one step with the optimizer and zeros gradients. .. note:: Should be issued after :class:`ReduceGrads` and :class:`ReduceTiedGrads`. .. note:: Can be a synchronization point among data-parallel ranks. """ pass class ReduceGrads(PipeInstruction): """Reduce the computed gradients among data-parallel processes within the stage. """ pass class ReduceTiedGrads(PipeInstruction): """Reduce the computed gradients of tied modules within a pipeline-parallel group. .. warning:: The stages included in this synchronization point are not known until the model is partitioned among pipeline stages. In the worst case, it includes all pipeline stages. This instruction should be scheduled carefully to avoid deadlocks. """ pass class BufferOpInstruction(PipeInstruction): """A pipeline instruction that operates on pipeline buffer(s). Args: buffer_id (int): the index of the pipeline buffer() to modify. """ def __init__(self, buffer_id, **kwargs): super().__init__(buffer_id=buffer_id, **kwargs) # IO class LoadMicroBatch(BufferOpInstruction): """Load a micro-batch into a buffer. Roughly: .. code-block:: python buffers['inputs'][buffer_id] = next(data_iter) """ pass # Compute class ForwardPass(BufferOpInstruction): """Compute a forward pass. Roughly: .. code-block:: python buffers['outputs'][buffer_id] = forward(buffers['inputs'][buffer_id]) """ pass class BackwardPass(BufferOpInstruction): """Compute a backward pass and accumulate gradients. Roughly: .. code-block:: python outputs = buffers['outputs'][buffer_id] gradients = buffers['gradients'][buffer_id] torch.autograd.backward(tensors=outputs, grad_tensors=gradients) """ pass # Communication class SendActivation(BufferOpInstruction): """Send activations to the next stage in the pipeline. Roughly: .. code-block:: python send(buffers['outputs'][buffer_id]) .. note:: The communication is blocking and must be paired with a :class:`RecvActivation` on the next pipeline stage to avoid deadlock. """ pass class RecvActivation(BufferOpInstruction): """Receive activations from the previous stage in the pipeline. Roughly: .. code-block:: python buffers['inputs'][buffer_id] = recv() .. note:: The communication is blocking and must be paired with a :class:`SendActivation` on the previous pipeline stage to avoid deadlock. """ pass class SendGrad(BufferOpInstruction): """Send computed gradients to the previous pipeline stage. with respect to the received activations .. note:: Only received tensors with ``requires_grad==True`` will produce gradients. Missing gradients will be replaced with ``None`` on the receiving stage. .. note:: The communication is blocking and must be paired with a :class:`RecvGrad` on the previous pipeline stage to avoid deadlock. """ pass class RecvGrad(BufferOpInstruction): """Receive computed gradients the next pipeline stage. .. note:: Only activations with ``requires_grad==True`` will produce gradients. Missing gradients will be replaced with ``None``. .. note:: The communication is blocking and must be paired with a :class:`SendGrad` on the next pipeline stage to avoid deadlock. """ pass def _is_even(x): return x % 2 == 0 def _is_odd(x): return x % 2 != 0
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ Functionality of swapping tensors to/from (NVMe) storage devices. """ import os import torch from deepspeed import comm as dist from deepspeed.utils.logging import logger from deepspeed.runtime.swap_tensor.constants import * from deepspeed.runtime.swap_tensor.utils import swap_in_tensors, swap_out_tensors, \ MIN_AIO_BYTES, AIO_ALIGNED_BYTES, get_sized_buffers from deepspeed.runtime.swap_tensor.utils import SwapBufferManager, SwapBufferPool class FlattenedTensorSwapInfo(object): def __init__(self, path, length, offset): self.path = path self.offset = offset self.length = length class OptimizerStateSwapInfo(object): def __init__(self, parameter, numel, base_folder): self.tensors = [] self.param_id = id(parameter) self.swap_folder = base_folder self.swap_paths = [] self.swapped_gradients = {} self.unswapped_gradients = {} self.tensor_numel = numel self.tensor_dtype = parameter.dtype self.tensor_device = parameter.device self.has_state_tensors = False self._add_tensors([parameter]) def numel(self): return self.tensor_numel def has_gradients(self): return self.swapped_gradients or self.unswapped_gradients def _add_tensors(self, tensor_list): for t in tensor_list: self.tensors.append(t) self.swap_paths.append(os.path.join(self.swap_folder, f'{id(t)}.tensor.swp')) def add_state_tensors(self, tensor_list): self.has_state_tensors = True self._add_tensors(tensor_list) def device(self): return self.tensor_device def dtype(self): return self.tensor_dtype def release_memory(self): for tensor in self.tensors: tensor.data = torch.Tensor() def get_or_create_gradient_paths(self, offsets, lengths): gradient_paths = [] for offset, length in zip(offsets, lengths): if not offset in self.swapped_gradients.keys(): path = os.path.join(self.swap_folder, f'{self.param_id}_gradient_{offset}_{length}.tensor.swp') self.swapped_gradients[offset] = FlattenedTensorSwapInfo(path, length, offset) gradient_paths.append(self.swapped_gradients[offset].path) return gradient_paths def set_swap_buffers(self, buffers): compute_lengths = [self.numel()] * len(self.tensors) compute_buffers = get_sized_buffers(buffers, compute_lengths) for t, buffer in zip(self.tensors, compute_buffers): t.data = buffer.data def get_swap_gradient_buffers(self, swap_buffer): assert self.numel() <= swap_buffer.numel() return [swap_buffer.narrow(0, grad.offset, grad.length) for grad in self.swapped_gradients.values()] def get_swap_gradient_paths(self): return [grad.path for grad in self.swapped_gradients.values()] def get_unpinned_state_tensors(self): return [t for t in self.tensors if not t.is_pinned()] def read_unswapped_gradients(self, dest_buffer): num_elem_count = 0 for offset, grad_partition in self.unswapped_gradients.items(): dst_tensor = dest_buffer.narrow(0, offset, grad_partition.numel()) dst_tensor.data.copy_(grad_partition.data) num_elem_count += grad_partition.numel() return num_elem_count def release_unswapped_gradients(self): self.unswapped_gradients = {} SWAPPER_DEBUG_MODE = False SWAP_OUT_GRADIENT_TIMER = 'swap_out_gradient' class OptimizerSwapper(object): def __init__(self, swap_config, aio_config, base_folder, optimizer, largest_numel, device, dtype, timers): self.swap_config = swap_config self.aio_config = aio_config # NVMe swap management self.swap_params_info = {} self.swap_element_size = torch.tensor([], dtype=dtype).element_size() self.swap_folder = os.path.join(base_folder, 'optimizer', f'rank{dist.get_rank()}') os.makedirs(self.swap_folder, exist_ok=True) self.optimizer = optimizer # Read/Write alignment for each thread during Intra-request parallelism self.min_aio_bytes = max(MIN_AIO_BYTES, aio_config[AIO_BLOCK_SIZE]) self.aligned_bytes = AIO_ALIGNED_BYTES * aio_config[AIO_THREAD_COUNT] self.numel_alignment = self.aligned_bytes // self.swap_element_size # Swap buffer management self.largest_numel = self._io_aligned_numel(largest_numel) self.dtype = dtype self.swap_buffer_manager = SwapBufferManager(num_elems=self.largest_numel, count=swap_config.buffer_count, dtype=dtype) # Timers self.timers = timers self.timer_names = set() # Print exclusion list self.print_exclude_list = [ 'optimizer', 'swap_buffer_manager', 'swap_params_info', 'timers', 'timer_names', ] def swappable_tensor(self, param=None, numel=None): assert param is not None or numel is not None, "Either param or numel must be provided" if param is not None: return self.min_aio_bytes <= (param.numel() * self.swap_element_size) return self.min_aio_bytes <= (numel * self.swap_element_size) def init_timers(self): self.timer_names = set() def log_timers(self): if self.timer_names: self._log_timers(list(self.timer_names), force=True) def pre_backward(self): self.init_timers() def post_backward(self): pass def _flush_gradient_swapper(self, gradient_swapper): if gradient_swapper.has_buffers(): self._start_timer(SWAP_OUT_GRADIENT_TIMER) pinned_buffers = gradient_swapper.release_buffers() self.swap_buffer_manager.free(pinned_buffers) self._stop_timer(SWAP_OUT_GRADIENT_TIMER) self.timer_names.add(SWAP_OUT_GRADIENT_TIMER) self.timer_names.update(gradient_swapper.get_timer_names()) def _swap_out_gradients(self, parameter, gradient_offsets, gradient_tensors, gradient_swapper): if not id(parameter) in self.swap_params_info.keys(): return swap_info = self.swap_params_info[id(parameter)] swappable_tensors = [] swappable_offsets = [] swappable_lengths = [] aligned_gradients, aligned_offsets = self._adjust_for_misaligned_lengths(tensors=gradient_tensors, offsets=gradient_offsets) self._start_timer(SWAP_OUT_GRADIENT_TIMER) for tensor, offset in zip(aligned_gradients, aligned_offsets): if not self.swappable_tensor(param=tensor): swap_info.unswapped_gradients[offset] = tensor continue swappable_tensors.append(tensor) swappable_offsets.append(offset) swappable_lengths.append(tensor.numel()) if len(swappable_tensors) > 0: if not gradient_swapper.has_buffers(): pinned_buffers = self.swap_buffer_manager.allocate_all(num_elems=self.largest_numel, dtype=self.dtype) gradient_swapper.add_buffers(pinned_buffers) swappable_paths = swap_info.get_or_create_gradient_paths(swappable_offsets, swappable_lengths) gradient_swapper.swap_out_tensors(tensor_list=swappable_tensors, path_list=swappable_paths) self._stop_timer(SWAP_OUT_GRADIENT_TIMER) self.timer_names.add(SWAP_OUT_GRADIENT_TIMER) def _initialize_from_swapped_fp16_params(self, aio_handle, fp16_partitions_info, fp16_num_elems, fp16_pinned_buffers, fp32_parameters): assert len(fp32_parameters) == len(fp16_partitions_info) assert len(fp32_parameters) == len(fp16_num_elems) assert all([buffer.is_pinned() for buffer in fp16_pinned_buffers]) fp32_swap_paths = self._get_swap_paths(parameters=fp32_parameters, num_elems=fp16_num_elems) fp32_pinned_buffers = self.swap_buffer_manager.allocate_all(num_elems=self.largest_numel, dtype=self.dtype) fp16_buffer_numel = [buf.numel() for buf in fp16_pinned_buffers] assert all([numel >= self.largest_numel for numel in fp16_buffer_numel]), \ f"numel of fp16 buffers {fp16_buffer_numel} is too small for initializing fp32 params {self.largest_numel}" fp32_swap_buffers = SwapBufferPool(fp32_pinned_buffers) fp16_swap_buffers = SwapBufferPool(fp16_pinned_buffers) curr_index = 0 while curr_index < len(fp32_parameters): fp16_pinned_tensors = self._swap_in_fp16_params(aio_handle=aio_handle, fp16_num_elems=fp16_num_elems[curr_index:], fp16_partitions_info=fp16_partitions_info[curr_index:], fp16_swap_buffers=fp16_swap_buffers) if dist.get_rank() == 0 and SWAPPER_DEBUG_MODE: for i, tensor in enumerate(fp16_pinned_tensors): true_index = curr_index + i logger.info( f'swap_in_fp16_param: fp32_id = {id(fp32_parameters[true_index])} index = {true_index} orig_num_elem = {fp16_num_elems[true_index]}, swap_num_elem = {fp16_pinned_tensors[i].numel()}' ) swap_out_count = self._swap_out_fp16_params(aio_handle=aio_handle, fp32_swap_paths=fp32_swap_paths[curr_index:], fp32_swap_buffers=fp32_swap_buffers, fp16_pinned_tensors=fp16_pinned_tensors) assert swap_out_count == len(fp16_pinned_tensors), \ f"{swap_out_count} does not match {len(fp16_pinned_tensors)}" fp16_swap_buffers.reset() fp32_swap_buffers.reset() curr_index += swap_out_count self.swap_buffer_manager.free(fp32_pinned_buffers) def _swap_in_fp16_params(self, aio_handle, fp16_num_elems, fp16_partitions_info, fp16_swap_buffers): assert len(fp16_num_elems) > 0 swapped_fp16_tensors = [] swap_tensors = [] swap_paths = [] unswapped_srcs = [] unswapped_dsts = [] for i, numel in enumerate(fp16_num_elems): pinned_tensor, _ = fp16_swap_buffers.allocate_tensor(numel, None, numel) if pinned_tensor is None: break swapped_fp16_tensors.append(pinned_tensor) offset = 0 for tensor, partition_numel, partition_path in fp16_partitions_info[i]: dst_tensor = pinned_tensor.narrow(0, offset, partition_numel) if partition_path is None: unswapped_srcs.append(tensor) unswapped_dsts.append(dst_tensor) else: swap_paths.append(partition_path) swap_tensors.append(dst_tensor) offset += partition_numel assert len(swapped_fp16_tensors) + len(unswapped_srcs) > 0 ret = swap_in_tensors(aio_handle, swap_tensors, swap_paths) for src, dst in zip(unswapped_srcs, unswapped_dsts): dst.data.copy_(src.data) assert len(swap_tensors) == aio_handle.wait() return swapped_fp16_tensors def _swap_out_fp16_params(self, aio_handle, fp32_swap_paths, fp32_swap_buffers, fp16_pinned_tensors): assert len(fp16_pinned_tensors) <= len(fp32_swap_paths) swap_out_count = 0 for i, fp16_tensor in enumerate(fp16_pinned_tensors): if not fp32_swap_buffers.has_space(fp16_tensor.numel()): fp32_swap_buffers.swap_out(aio_handle) fp32_swap_buffers.reset() pinned_tensor, _ = fp32_swap_buffers.insert_tensor(fp16_tensor, fp32_swap_paths[i], self._io_aligned_numel(fp16_tensor.numel())) assert pinned_tensor is not None swap_out_count += 1 if len(fp32_swap_buffers.get_swap_tensors()) > 0: fp32_swap_buffers.swap_out(aio_handle) return swap_out_count def _initialize_parameters(self, parameters, src_tensors, aio_handle): assert len(parameters) == len(src_tensors) swap_paths = self._get_swap_paths(parameters=parameters, num_elems=[src.numel() for src in src_tensors]) SWAP_INIT_TIMER = "swap_init_write" self._start_timer(SWAP_INIT_TIMER) pinned_buffers = self.swap_buffer_manager.allocate_all(num_elems=self.largest_numel, dtype=self.dtype) assert pinned_buffers is not None self._swap_out_unpinned_tensors(aio_handle=aio_handle, unpinned_tensors=src_tensors, dest_paths=swap_paths, pinned_buffers=pinned_buffers) if dist.get_rank() == 0 and SWAPPER_DEBUG_MODE: for i, tensor in enumerate(src_tensors): logger.info( f'copy_in_fp16_param: fp32_id = {id(parameters[i])} index = {i}, swap_num_elem = {src_tensors[i].numel()}' ) self.swap_buffer_manager.free(pinned_buffers) self._stop_timer(SWAP_INIT_TIMER) self._log_timers([SWAP_INIT_TIMER]) def _get_swap_paths(self, parameters, num_elems): swap_info_list = [ self._create_param_swap_info(parameter=p, numel=numel) \ for p, numel in zip(parameters, num_elems) ] assert len(swap_info_list) == len(num_elems) swap_paths = [info.swap_paths[0] for info in swap_info_list] return swap_paths def _swap_out_unpinned_tensors(self, aio_handle, unpinned_tensors, dest_paths, pinned_buffers): swap_buffer_count = len(pinned_buffers) unpinned_tensor_count = len(unpinned_tensors) for i in range(0, unpinned_tensor_count, swap_buffer_count): swap_tensor_count = min((unpinned_tensor_count - i), swap_buffer_count) src_tensors = unpinned_tensors[i:(i + swap_tensor_count)] compute_lengths = [t.numel() for t in src_tensors] compute_buffers = get_sized_buffers(pinned_buffers, compute_lengths) for dst, src in zip(compute_buffers, src_tensors): dst.data.copy_(src.data) swap_lengths = [self._io_aligned_numel(t.numel()) for t in src_tensors] swap_buffers = get_sized_buffers(pinned_buffers, swap_lengths) swap_paths = dest_paths[i:(i + swap_tensor_count)] swap_out_tensors(aio_handle, swap_buffers, swap_paths) assert aio_handle.wait() == swap_tensor_count def _adjust_for_misaligned_lengths(self, tensors, offsets): new_tensors = [] new_offsets = [] for orig_tensor, orig_offset in zip(tensors, offsets): if not self.swappable_tensor(param=orig_tensor): new_tensors.append(orig_tensor) new_offsets.append(orig_offset) continue remainder = orig_tensor.numel() % self.numel_alignment if remainder == 0: new_tensors.append(orig_tensor) new_offsets.append(orig_offset) continue # Split into two by making remainder a tensor aligned_length = (orig_tensor.numel() // self.numel_alignment) * self.numel_alignment new_tensors.append(orig_tensor.narrow(0, 0, aligned_length)) new_offsets.append(orig_offset) # remainder tensor new_tensors.append(orig_tensor.narrow(0, aligned_length, remainder)) new_offsets.append(orig_offset + aligned_length) return new_tensors, new_offsets def _retrieve_unswapped_grad_partitions(self, swap_info, dest_buffer): UNSWAPPED_READ_GRADIENTS = 'unswapped_read_gradients' self._start_timer(UNSWAPPED_READ_GRADIENTS) tensor_count = len(swap_info.unswapped_gradients) num_elem_count = swap_info.read_unswapped_gradients(dest_buffer) self._stop_timer(UNSWAPPED_READ_GRADIENTS) self._log_timers([UNSWAPPED_READ_GRADIENTS]) # It should be safe to discard unswapped gradient partitions swap_info.release_unswapped_gradients() if SWAPPER_DEBUG_MODE: logger.info( f'optimizer_retrieve_unswapped_gradients: param={swap_info.param_id} tensor_count={tensor_count} elem_count={num_elem_count}' ) def _get_state_tensors(self, parameter): if not parameter in self.optimizer.state: return [] tensor_list = [] for value in self.optimizer.state[parameter].values(): if torch.is_tensor(value): tensor_list.append(value) return tensor_list def _update_param_state_info(self, swap_info, parameter): if not swap_info.has_state_tensors: state_tensors = self._get_state_tensors(parameter) if state_tensors: swap_info.add_state_tensors(state_tensors) def _create_param_swap_info(self, parameter, numel): param_id = id(parameter) assert not param_id in self.swap_params_info self.swap_params_info[param_id] = OptimizerStateSwapInfo(parameter=parameter, numel=numel, base_folder=self.swap_folder) swap_info = self.swap_params_info[param_id] self._update_param_state_info(swap_info, parameter) return swap_info def _get_param_swap_info(self, parameter): param_id = id(parameter) swap_info = self.swap_params_info.get(param_id, None) if swap_info is not None: self._update_param_state_info(swap_info, parameter) return swap_info def _start_timer(self, name): if self.timers: self.timers(name).start() def _stop_timer(self, name): if self.timers: self.timers(name).stop() def _log_timers(self, name_list, force=False): if self.timers and (SWAPPER_DEBUG_MODE or force): self.timers.log(name_list) def _io_aligned_numel(self, numel): remainder = numel % self.numel_alignment return numel if remainder == 0 else (numel + self.numel_alignment - remainder)
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ Functionality of swapping optimizer tensors to/from (NVMe) storage devices. """ from deepspeed.ops.op_builder import AsyncIOBuilder from deepspeed import comm as dist from deepspeed.runtime.swap_tensor.constants import * from deepspeed.runtime.swap_tensor.utils import swap_in_tensors, swap_out_tensors, print_object from deepspeed.runtime.swap_tensor.async_swapper import AsyncTensorSwapper from deepspeed.runtime.swap_tensor.utils import get_sized_buffer from deepspeed.runtime.swap_tensor.optimizer_utils import OptimizerSwapper class OptimizerSwapOp(object): def __init__(self, aio_handle, read_op, param_info, allocated_buffers, state_buffers, num_ops): self.aio_handle = aio_handle self.read_op = read_op self.param_info = param_info self.allocated_buffers = allocated_buffers self.state_buffers = state_buffers self.wait_required = True self.num_ops = num_ops def is_parameter(self, parameter): return id(parameter) == self.param_info.param_id def wait(self): assert self.wait_required assert self.aio_handle.wait() == self.num_ops self.wait_required = False SYNC_SWAP_IN = 'sync_swap_in' ASYNC_SWAP_IN = 'async_swap_in' SYNC_SWAP_OUT = 'sync_swap_out' ASYNC_SWAP_OUT = 'async_swap_out' SWAP_IN_STATE_TIMER = 'swap_in_state' SWAP_OUT_STATE_TIMER = 'swap_out_state' SWAP_OUT_GRADIENT_TIMER = 'swap_out_gradient' ASYNC_SWAP_IN_STATE_TIMER = "async_swap_in_state" ASYNC_SWAP_OUT_STATE_TIMER = 'async_swap_out_state' class PipelinedOptimizerSwapper(OptimizerSwapper): def __init__(self, swap_config, aio_config, base_folder, optimizer, largest_numel, device, dtype, timers): super(PipelinedOptimizerSwapper, self).__init__(swap_config, aio_config, base_folder, optimizer, largest_numel, device, dtype, timers) aio_op = AsyncIOBuilder().load() self.write_aio_handle = aio_op.aio_handle(aio_config[AIO_BLOCK_SIZE], aio_config[AIO_QUEUE_DEPTH], aio_config[AIO_SINGLE_SUBMIT], aio_config[AIO_OVERLAP_EVENTS], aio_config[AIO_THREAD_COUNT]) self.read_aio_handle = aio_op.aio_handle(aio_config[AIO_BLOCK_SIZE], aio_config[AIO_QUEUE_DEPTH], aio_config[AIO_SINGLE_SUBMIT], aio_config[AIO_OVERLAP_EVENTS], aio_config[AIO_THREAD_COUNT]) # Overlap gradient swap out self.gradient_swapper = AsyncTensorSwapper(aio_handle=self.write_aio_handle, numel_alignment=self.numel_alignment, timers=self.timers) self.async_swap_in = swap_config.pipeline_read self.async_swap_out = swap_config.pipeline_write self.swap_ops = {SYNC_SWAP_IN: None, ASYNC_SWAP_IN: None, SYNC_SWAP_OUT: None, ASYNC_SWAP_OUT: None} self.print_exclude_list += [ 'gradient_swapper', 'read_aio_handle', 'write_aio_handle', 'swap_ops', 'print_exclude_list' ] if dist.get_rank() == 0: print_object(obj=self, name='PipelinedOptimizerSwapper', exclude_list=self.print_exclude_list) def initialize_parameters(self, parameters, src_tensors): self._initialize_parameters(parameters=parameters, src_tensors=src_tensors, aio_handle=self.write_aio_handle) def initialize_from_swapped_fp16_params(self, fp16_partitions_info, fp16_num_elems, fp16_pinned_buffers, fp32_parameters): self._initialize_from_swapped_fp16_params(aio_handle=self.write_aio_handle, fp16_partitions_info=fp16_partitions_info, fp16_num_elems=fp16_num_elems, fp16_pinned_buffers=fp16_pinned_buffers, fp32_parameters=fp32_parameters) def flush_gradients(self): self._flush_gradient_swapper(self.gradient_swapper) def swap_in_optimizer_state(self, parameter, async_parameter): assert parameter is not None assert self.swap_ops[SYNC_SWAP_IN] is None self._flush_gradient_swapper(self.gradient_swapper) self._start_timer(SWAP_IN_STATE_TIMER) if self.swap_ops[ASYNC_SWAP_IN]: assert self.swap_ops[ASYNC_SWAP_IN].is_parameter(parameter) self.swap_ops[SYNC_SWAP_IN] = self.swap_ops[ASYNC_SWAP_IN] self.swap_ops[ASYNC_SWAP_IN] = None else: self.swap_ops[SYNC_SWAP_IN] = self._swap_in_optimizer_state(aio_handle=self.read_aio_handle, parameter=parameter) if self.swap_ops[SYNC_SWAP_IN]: self.swap_ops[SYNC_SWAP_IN].wait() if self.async_swap_in and async_parameter is not None: assert self.swap_ops[ASYNC_SWAP_IN] is None self.swap_ops[ASYNC_SWAP_IN] = self._swap_in_optimizer_state(aio_handle=self.read_aio_handle, parameter=async_parameter) self._stop_timer(SWAP_IN_STATE_TIMER) self.timer_names.add(SWAP_IN_STATE_TIMER) def swap_out_optimizer_state(self, parameter, async_swap): self._start_timer(SWAP_OUT_STATE_TIMER) if self.swap_ops[ASYNC_SWAP_OUT]: self._start_timer(ASYNC_SWAP_OUT_STATE_TIMER) self._complete_swap_out(ASYNC_SWAP_OUT) self._stop_timer(ASYNC_SWAP_OUT_STATE_TIMER) self.timer_names.add(ASYNC_SWAP_OUT_STATE_TIMER) assert self.swap_ops[SYNC_SWAP_IN] is not None assert not self.swap_ops[SYNC_SWAP_IN].wait_required swap_op = self._swap_out_optimizer_state(aio_handle=self.write_aio_handle, parameter=parameter, swap_in_op=self.swap_ops[SYNC_SWAP_IN]) self.swap_ops[SYNC_SWAP_IN] = None if self.async_swap_out and async_swap: self.swap_ops[ASYNC_SWAP_OUT] = swap_op else: self.swap_ops[SYNC_SWAP_OUT] = swap_op self._complete_swap_out(SYNC_SWAP_OUT) self._stop_timer(SWAP_OUT_STATE_TIMER) self.timer_names.add(SWAP_OUT_STATE_TIMER) def swap_out_gradients(self, parameter, gradient_offsets, gradient_tensors): self._swap_out_gradients(parameter=parameter, gradient_offsets=gradient_offsets, gradient_tensors=gradient_tensors, gradient_swapper=self.gradient_swapper) def _complete_swap_out(self, swap_out_type): self.swap_ops[swap_out_type].wait() self.swap_buffer_manager.free(self.swap_ops[swap_out_type].allocated_buffers) self.swap_ops[swap_out_type] = None def _swap_out_optimizer_state(self, aio_handle, parameter, swap_in_op): assert swap_in_op.is_parameter(parameter) allocated_buffers = swap_in_op.allocated_buffers.copy() swap_buffers = swap_in_op.state_buffers.copy() param_info = swap_in_op.param_info self._update_param_state_info(param_info, parameter) unpinned_tensors = param_info.get_unpinned_state_tensors() if len(unpinned_tensors) > 0: new_alloc_buffers = self.swap_buffer_manager.allocate(num_elems=self._io_aligned_numel(param_info.numel()), count=len(unpinned_tensors), dtype=param_info.dtype()) assert new_alloc_buffers is not None allocated_buffers += new_alloc_buffers swap_buffers += new_alloc_buffers for pinned_dst, unpinned_src in zip(new_alloc_buffers, unpinned_tensors): dst = get_sized_buffer(pinned_dst, unpinned_src.numel()) dst.data.copy_(unpinned_src.data) swap_paths = param_info.swap_paths.copy() assert len(swap_paths) == len(swap_buffers) swap_out_tensors(aio_handle, swap_buffers, swap_paths) swap_out_op = OptimizerSwapOp(aio_handle=aio_handle, param_info=param_info, read_op=False, allocated_buffers=allocated_buffers, state_buffers=swap_buffers, num_ops=len(swap_buffers)) return swap_out_op def _swap_in_optimizer_state(self, aio_handle, parameter): param_info = self._get_param_swap_info(parameter) if param_info is None: return None required_buffer_count = len(param_info.tensors) + (1 if param_info.has_gradients() else 0) aligned_numel = self._io_aligned_numel(param_info.numel()) allocated_buffers = self.swap_buffer_manager.allocate(num_elems=aligned_numel, count=required_buffer_count, dtype=parameter.dtype) assert allocated_buffers is not None, \ f"PipelinedOptimizerSwapper ran out of swap buffers, try increasing 'buffer_count'" state_buffers = allocated_buffers[:len(param_info.tensors)] param_info.set_swap_buffers(state_buffers) swap_buffers = state_buffers.copy() swap_paths = param_info.swap_paths.copy() if param_info.has_gradients(): parameter.grad = allocated_buffers[-1].narrow(0, 0, param_info.numel()) if param_info.swapped_gradients: swap_buffers += param_info.get_swap_gradient_buffers(parameter.grad) swap_paths += param_info.get_swap_gradient_paths() swap_in_tensors(aio_handle, swap_buffers, swap_paths) if param_info.unswapped_gradients: self._retrieve_unswapped_grad_partitions(swap_info=param_info, dest_buffer=parameter.grad) swap_in_op = OptimizerSwapOp(aio_handle=aio_handle, param_info=param_info, read_op=True, allocated_buffers=allocated_buffers, state_buffers=state_buffers, num_ops=len(swap_buffers)) return swap_in_op
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ AIO """ AIO_FORMAT = ''' "aio": { "block_size": 1048576, "queue_depth": 8, "thread_count": 1, "single_submit": false, "overlap_events": true } ''' AIO = "aio" AIO_BLOCK_SIZE = "block_size" AIO_BLOCK_SIZE_DEFAULT = 1048576 AIO_QUEUE_DEPTH = "queue_depth" AIO_QUEUE_DEPTH_DEFAULT = 8 AIO_THREAD_COUNT = "thread_count" AIO_THREAD_COUNT_DEFAULT = 1 AIO_SINGLE_SUBMIT = "single_submit" AIO_SINGLE_SUBMIT_DEFAULT = False AIO_OVERLAP_EVENTS = "overlap_events" AIO_OVERLAP_EVENTS_DEFAULT = True
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ Functionality of swapping tensors to/from (NVMe) storage devices. """ import os import shutil from enum import Enum import torch from deepspeed import comm as dist from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import AsyncIOBuilder from .constants import * from .utils import swap_in_tensors, swap_out_tensors, MIN_AIO_BYTES, AIO_ALIGNED_BYTES, print_object, SwapBufferPool def print_rank_0(message, debug=False, force=False): if dist.get_rank() == 0 and (debug or force): print(message) class PartitionedParamStatus(Enum): # Partitioned parameters are present and ready for use AVAILABLE = 1 # partitioned params are in some non-memory device NOT_AVAILABLE = 2 # partitioned params are being read from some non-memory device. INFLIGHT = 3 class AsyncPartitionedParameterSwapper(object): def __init__(self, ds_config, model_dtype): aio_op = AsyncIOBuilder().load(verbose=False) self.aio_handle = aio_op.aio_handle self.dtype = model_dtype #set swap buffers, create aio handles self._configure_aio(ds_config) #mapping from param id to path self.id_to_path = {} #mapping from pram_id to buffer id self.param_id_to_buffer_id = {} # mapping from param_id to swap buffer self.param_id_to_swap_buffer = {} #number of elements in the param self.param_id_to_numel = {} self.pending_writes = 0 self.pending_reads = 0 #keep track of async swap in params and buffers self.inflight_params = [] self.inflight_swap_in_buffers = [] self.inflight_numel = 0 #keep track of available params self.available_params = set() self.available_numel = 0 # for swapping out from partitioned fp32 params self.partitioned_swap_buffer = None self.partitioned_swap_pool = None self.invalid_buffer = torch.tensor(1).half() if dist.get_rank() == 0: exclude_list = ['aio_read_handle', 'aio_write_handle', 'buffers'] print_object(obj=self, name='AsyncPartitionedParameterSwapper', exclude_list=exclude_list) def available_swap_in_buffers(self): return len(self.available_buffer_ids) def _configure_aio(self, ds_config): self.swap_config = ds_config.zero_config.offload_param torch_dtype_string = str(self.dtype).split(".")[1] self.swap_folder = os.path.join(self.swap_config.nvme_path, 'zero_stage_3', f'{torch_dtype_string}params', f'rank{dist.get_rank()}') shutil.rmtree(self.swap_folder, ignore_errors=True) os.makedirs(self.swap_folder, exist_ok=True) self.swap_element_size = torch.tensor([], dtype=self.dtype).element_size() self.aio_config = ds_config.aio_config # Read/Write alignment for each thread during Intra-request parallelism self.min_aio_bytes = max(MIN_AIO_BYTES, self.aio_config[AIO_BLOCK_SIZE]) self.aligned_bytes = AIO_ALIGNED_BYTES * self.aio_config[AIO_THREAD_COUNT] self.numel_alignment = self.aligned_bytes // self.swap_element_size self.elements_per_buffer = self.swap_config.buffer_size self.aligned_elements_per_buffer = self._io_aligned_numel(self.elements_per_buffer) self.param_buffer_count = self.swap_config.buffer_count self.available_buffer_ids = [i for i in range(self.param_buffer_count)] self.reserved_buffer_ids = [] self.buffers = get_accelerator().pin_memory( torch.empty(int(self.aligned_elements_per_buffer * self.param_buffer_count), dtype=self.dtype, requires_grad=False)) self.aio_read_handle = self.aio_handle(self.aio_config[AIO_BLOCK_SIZE], self.aio_config[AIO_QUEUE_DEPTH], self.aio_config[AIO_SINGLE_SUBMIT], self.aio_config[AIO_OVERLAP_EVENTS], self.aio_config[AIO_THREAD_COUNT]) self.aio_write_handle = self.aio_handle(self.aio_config[AIO_BLOCK_SIZE], self.aio_config[AIO_QUEUE_DEPTH], self.aio_config[AIO_SINGLE_SUBMIT], self.aio_config[AIO_OVERLAP_EVENTS], self.aio_config[AIO_THREAD_COUNT]) self.swap_out_params = [] #Check if partitioned param or numel in a tensor is swappable or not def swappable_tensor(self, param=None, numel=None): if param is not None: assert numel is None, "Both parma and numel cannot be provided" numel = param.ds_tensor.ds_numel if numel is not None: return self.min_aio_bytes <= numel * self.swap_element_size assert False, "Either param or numel must be provided" def get_path(self, param, must_exist=False): paths = self._get_swap_paths([param], must_exist=must_exist) return paths[0] def _get_swap_paths(self, params, must_exist=False): paths = [] for param in params: param_id = param.ds_id if param_id in self.id_to_path.keys(): param_path = self.id_to_path[param_id] else: assert not must_exist, f"Path for param id {param_id} does not exist" param_path = os.path.join(self.swap_folder, f'{param_id}_param.tensor.swp') self.id_to_path[param_id] = param_path paths.append(param_path) return paths def _get_swap_buffers(self, params): buffers = [] for param in params: param_id = param.ds_id assert param_id in self.param_id_to_swap_buffer.keys(), \ f'param {param_id} has not been assigned a swap buffer' buffers.append(self.param_id_to_swap_buffer[param_id]) return buffers def _track_numel(self, params): for param in params: assert param.ds_tensor is not None, "Partitioned tensor is None" self.param_id_to_numel[param.ds_id] = param.ds_tensor.ds_numel def _allocate_and_return_buffers_for_swap_in(self, params): compute_buffers = [] swap_buffers = [] for param in params: param_id = param.ds_id assert param_id in self.param_id_to_numel.keys(), f" Number of elements in param {param_id} is unknown" assert param_id not in self.param_id_to_buffer_id.keys( ), f"param {param_id} already assigned swap buffer id {self.param_id_to_buffer_id[param_id]}" assert param_id not in self.param_id_to_swap_buffer.keys( ), f"param {param_id} has already been assigned a swap buffer" buffer_id = self.available_buffer_ids.pop() print_rank_0(f"param {param.ds_id} is assigned swap in buffer id {buffer_id} ") self.param_id_to_buffer_id[param_id] = buffer_id aligned_swap_numel = self._io_aligned_numel(self.param_id_to_numel[param_id]) swap_buffer = self.buffers.narrow(0, int(buffer_id * self.aligned_elements_per_buffer), aligned_swap_numel) self.param_id_to_swap_buffer[param_id] = swap_buffer compute_buffer = swap_buffer.narrow(0, 0, self.param_id_to_numel[param_id]) compute_buffers.append(compute_buffer) swap_buffers.append(swap_buffer) return compute_buffers, swap_buffers #waits for inflight nvme write to complete def synchronize_writes(self): if self.pending_writes == 0: return assert self.pending_writes == self.aio_write_handle.wait() self.pending_writes = 0 self.remove_partition_and_release_buffers(self.swap_out_params) self.swap_out_params = [] #waits for inflight nvme reads to complete def synchronize_reads(self): if self.pending_reads == 0: return assert self.pending_reads == self.aio_read_handle.wait() self.pending_reads = 0 for param, swap_in_buffer in zip(self.inflight_params, self.inflight_swap_in_buffers): param_id = param.ds_id compute_buffer = swap_in_buffer.narrow(0, 0, self.param_id_to_numel[param_id]) param.ds_tensor.data = compute_buffer.data param.ds_tensor.status = PartitionedParamStatus.AVAILABLE self.available_params.update([param.ds_id for param in self.inflight_params]) self.available_numel += self.inflight_numel self.inflight_params = [] self.inflight_swap_in_buffers = [] self.inflight_numel = 0 #Removes the memory assignment and releases the buffers #Should only be executed after swapping out the tensors def remove_partition_and_release_buffers(self, params): for param in params: param_id = param.ds_id if param_id in self.param_id_to_buffer_id.keys(): buffer_id = self.param_id_to_buffer_id[param_id] assert buffer_id is not None, "Missing buffer id for releasing" self.available_buffer_ids.append(buffer_id) del self.param_id_to_buffer_id[param_id] del self.param_id_to_swap_buffer[param_id] print_rank_0(f"param {param.ds_id} releases buffer id {buffer_id} ") if param_id in self.available_params: self.available_params.remove(param_id) self.available_numel -= self.param_id_to_numel[param_id] param.ds_tensor.data = self.invalid_buffer.data param.ds_tensor.status = PartitionedParamStatus.NOT_AVAILABLE #writes from in memory to nvme. Does not release the buffers def _swap_out(self, params, async_op=True): swap_out_paths = self._get_swap_paths(params) swap_out_params = self._get_swap_buffers(params) self._track_numel(params) swap_out_tensors(self.aio_write_handle, swap_out_params, swap_out_paths) self.pending_writes += len(swap_out_params) self.swap_out_params += params if not async_op: self.synchronize_writes() #blocking swap out followed by releasing the memory buffers def swap_out_and_release(self, params, async_op=False, force_buffer_release=False): if async_op: assert force_buffer_release, "Should not release preallocated buffers without completing the swap out. Set force_buffer_release to True to do it anyways" self._swap_out(params, async_op=async_op) # book keeping function for inflight swap in def _update_inflight_swap_in(self, params, swap_in_buffers, inflight_numel): self.inflight_params.extend(params) self.inflight_swap_in_buffers.extend(swap_in_buffers) self.inflight_numel += inflight_numel for param in params: param.ds_tensor.status = PartitionedParamStatus.INFLIGHT self.pending_reads += len(params) #assigns an in memory buffer and swaps in from nvme def swap_in(self, params, async_op=True, swap_in_buffers=None): assert all([param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE for param in params]), "Some params are already available or in flight" swap_in_paths = self._get_swap_paths(params) if swap_in_buffers is None: if len(self.available_buffer_ids) < len(swap_in_paths): ids = [p.ds_id for p in params] print_rank_0( f'Not enough swap in buffers {len(self.available_buffer_ids)} for {len(swap_in_paths)} params, ids = {ids}', force=True) print_rank_0( f'Num inflight: params {len(self.inflight_params)}, buffers {len(self.inflight_swap_in_buffers)}, numel = {self.inflight_numel}', force=True) print_rank_0( f'Num available params: count = {len(self.available_params)}, ids = {self.available_params}, numel = {self.available_numel}', force=True) assert len(swap_in_paths) <= len( self.available_buffer_ids ), f"Not enough buffers {len(self.available_buffer_ids)} for swapping {len(swap_in_paths)}" compute_buffers, swap_in_buffers = self._allocate_and_return_buffers_for_swap_in(params) inflight_numel = sum([t.numel() for t in compute_buffers]) else: inflight_numel = sum([t.numel() for t in swap_in_buffers]) swap_in_tensors(self.aio_read_handle, swap_in_buffers, swap_in_paths) self._update_inflight_swap_in(params, swap_in_buffers, inflight_numel) if not async_op: self.synchronize_reads() # Enables swapping into buffer that is out the control of swapper. This is always synchronous def swap_into_buffer(self, param, dest_buffer): assert param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE, f"param {param.ds_id} is already available or inflight" require_swap_buffer = not (dest_buffer.is_pinned() and self._is_io_aligned(dest_buffer.numel())) if require_swap_buffer: assert len(self.available_buffer_ids) > 0, f"No buffer available to swap param {param.ds_id}." compute_buffers, swap_in_buffers = self._allocate_and_return_buffers_for_swap_in([param]) inflight_numel = compute_buffers[0].numel() else: swap_in_buffers = [dest_buffer] inflight_numel = dest_buffer.numel() swap_in_paths = self._get_swap_paths([param]) swap_in_tensors(self.aio_read_handle, swap_in_buffers, swap_in_paths) self._update_inflight_swap_in([param], swap_in_buffers, inflight_numel) self.synchronize_reads() if require_swap_buffer: dest_buffer.data.copy_(param.ds_tensor.data) # Release swap buffer memory assignment. Note, this will mark the parameter not available. self.remove_partition_and_release_buffers([param]) #assign a buffer to a param and return the buffer def get_buffer(self, param, numel): param_id = param.ds_id assert self.available_swap_in_buffers( ) > 0, f"No swap buffers to allocate for fp16 param {param_id} of numel = {numel}" assert numel < self.elements_per_buffer, f"More elements {numel} than buffer size {self.elements_per_buffer}" self.param_id_to_numel[param_id] = numel buffer_id = self.available_buffer_ids.pop() self.param_id_to_buffer_id[param_id] = buffer_id aligned_swap_numel = self._io_aligned_numel(self.param_id_to_numel[param_id]) swap_buffer = self.buffers.narrow(0, int(buffer_id * self.aligned_elements_per_buffer), aligned_swap_numel) self.param_id_to_swap_buffer[param_id] = swap_buffer compute_buffer = swap_buffer.narrow(0, 0, self.param_id_to_numel[param_id]) print_rank_0(f"param {param.ds_id} is assigned swap in buffer id {buffer_id}") return compute_buffer def reserve_available_buffers(self): buffers = [] for id in self.available_buffer_ids: buffers.append( self.buffers.narrow(0, int(id * self.aligned_elements_per_buffer), int(self.aligned_elements_per_buffer))) self.reserved_buffer_ids.append(id) self.available_buffer_ids = [] return buffers def release_reserved_buffers(self): for id in self.reserved_buffer_ids: self.available_buffer_ids.append(id) self.reserved_buffer_ids = [] def _io_aligned_numel(self, numel): remainder = numel % self.numel_alignment return numel if remainder == 0 else (numel + self.numel_alignment - remainder) def _is_io_aligned(self, numel): return (numel % self.numel_alignment) == 0 def reserve_partitioned_swap_space(self, partition_num_elems): aligned_numel = sum([self._io_aligned_numel(numel) for numel in partition_num_elems]) self.partitioned_swap_buffer = get_accelerator().pin_memory( torch.zeros(aligned_numel, device='cpu', dtype=self.dtype)) self.partitioned_swap_pool = SwapBufferPool([self.partitioned_swap_buffer]) def swap_out_partitioned_params(self, dst_fp16_params, src_fp32_params): assert self.partitioned_swap_buffer is not None, f'partitioned swap buffers for fp16 params not initialized' assert self.partitioned_swap_pool is not None, f'partitioned swap pool for fp16 params not initialized' assert len(dst_fp16_params) == len(src_fp32_params), \ f'mismatch in number of fp16 params {len(dst_fp16_params)} and fp32 params {len(src_fp32_params)}' fp16_swap_paths = self._get_swap_paths(dst_fp16_params, must_exist=True) self.synchronize_writes() self.partitioned_swap_pool.reset() for i, fp32_tensor in enumerate(src_fp32_params): swap_tensor, _ = self.partitioned_swap_pool.insert_tensor(fp32_tensor, fp16_swap_paths[i], self._io_aligned_numel(fp32_tensor.numel())) assert swap_tensor is not None dst_fp16_params[i].ds_tensor.status = PartitionedParamStatus.AVAILABLE self.partitioned_swap_pool.swap_out(self.aio_write_handle) for param in dst_fp16_params: param.ds_tensor.status = PartitionedParamStatus.NOT_AVAILABLE
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ Functionality of swapping tensors to/from (NVMe) storage devices. """ import torch from deepspeed.utils.logging import logger from deepspeed.accelerator import get_accelerator from deepspeed import comm as dist MIN_AIO_BYTES = 1024**2 AIO_ALIGNED_BYTES = 1024 def swap_in_tensors(swap_handle, tensor_buffers, swap_paths): for buffer, path in zip(tensor_buffers, swap_paths): assert (swap_handle.async_pread(buffer, path) == 0) def swap_out_tensors(swap_handle, tensor_buffers, swap_paths): for buffer, path in zip(tensor_buffers, swap_paths): assert (swap_handle.async_pwrite(buffer, path) == 0) def print_object(obj, name, exclude_list=[]): logger.info('{}:'.format(name)) for arg in sorted(vars(obj)): if not arg in exclude_list: dots = '.' * (29 - len(arg)) logger.info(' {} {} {}'.format(arg, dots, getattr(obj, arg))) class SwapBuffer(object): def __init__(self, buffer): self.buffer = buffer self.reset() def reset(self): self.offset = 0 self.swap_tensors = {} self.compute_tensors = {} self.swap_paths = {} self.num_elem = 0 def insert_tensor(self, tensor, swap_path, aligned_numel): swap_tensor, compute_tensor = self.allocate_tensor(swap_path, tensor.numel(), aligned_numel) compute_tensor.data.copy_(tensor.data) return swap_tensor, compute_tensor def allocate_tensor(self, swap_path, numel, aligned_numel): assert self.has_space(aligned_numel) assert not self.offset in self.swap_tensors allocate_offset = self.offset swap_tensor = self.buffer.narrow(0, allocate_offset, aligned_numel) dest_tensor = swap_tensor.narrow(0, 0, numel) self.swap_tensors[allocate_offset] = swap_tensor self.compute_tensors[allocate_offset] = dest_tensor self.swap_paths[allocate_offset] = swap_path self.offset += aligned_numel self.num_elem += numel return self.swap_tensors[allocate_offset], self.compute_tensors[allocate_offset] def has_space(self, numel): return (self.offset + numel) <= self.buffer.numel() def get_swap_tensors(self): return [tensor for tensor in self.swap_tensors.values()] def get_swap_paths(self): return [path for path in self.swap_paths.values()] def get_compute_tensors(self): return [tensor for tensor in self.compute_tensors.values()] def get_num_elem(self): return self.num_elem def get_swap_tensor(self, offset): return self.swap_tensors.get(offset, None) def get_compute_tensor(self, offset): return self.compute_tensors.get(offset, None) def get_swap_path(self, offset): return self.swap_paths(offset, None) class SwapBufferPool(object): def __init__(self, buffers): assert all([buf.is_pinned() for buf in buffers]) self.buffers = [SwapBuffer(buf) for buf in buffers] self.current_index = 0 def reset(self): self.current_index = 0 for buffer in self.buffers: buffer.reset() def allocate_tensor(self, numel, swap_path, aligned_numel): if self.has_space(aligned_numel): swap_tensor, compute_tensor = self._get_current_buffer().allocate_tensor(swap_path, numel, aligned_numel) return swap_tensor, compute_tensor return None, None def insert_tensor(self, tensor, swap_path, aligned_numel): if self.has_space(aligned_numel): swap_tensor, compute_tensor = self._get_current_buffer().insert_tensor(tensor, swap_path, aligned_numel) return swap_tensor, compute_tensor return None, None def get_swap_tensors(self): swap_tensors = [] for buffer in self._get_used_buffers(): swap_tensors += buffer.get_swap_tensors() return swap_tensors def get_swap_paths(self): swap_paths = [] for buffer in self._get_used_buffers(): swap_paths += buffer.get_swap_paths() return swap_paths def get_compute_tensors(self): compute_tensors = [] for buffer in self._get_used_buffers(): compute_tensors += buffer.get_compute_tensors() return compute_tensors def has_space(self, numel): if self._get_current_buffer().has_space(numel): return True if self.current_index == len(self.buffers) - 1: return False self.current_index += 1 return self._get_current_buffer().has_space(numel) def swap_out(self, aio_handle, async_op=False): swap_tensors = self.get_swap_tensors() swap_paths = self.get_swap_paths() assert all([p is not None for p in swap_paths]) swap_out_tensors(aio_handle, swap_tensors, swap_paths) if not async_op: assert len(swap_tensors) == aio_handle.wait() def swap_in(self, aio_handle, async_op=False): swap_tensors = self.get_swap_tensors() swap_paths = self.get_swap_paths() assert all([p is not None for p in swap_paths]) swap_in_tensors(aio_handle, swap_tensors, swap_paths) if not async_op: assert len(swap_tensors) == aio_handle.wait() def _get_current_buffer(self): return self.buffers[self.current_index] def _get_used_buffers(self): return self.buffers[:self.current_index + 1] class SwapBufferManager(object): def __init__(self, num_elems, count, dtype): self.num_elems = num_elems self.count = count self.dtype = dtype self.all_buffers = [ get_accelerator().pin_memory(torch.zeros(num_elems, device='cpu', dtype=dtype)) for _ in range(count) ] self.free_buffer_index = [i for i in range(count)] self.used_buffer_index = {} self.gigabytes = (self.all_buffers[0].element_size() * num_elems * count) / (1024**3) if dist.get_rank() == 0: exclude_list = ['all_buffers'] print_object(obj=self, name='SwapBufferManager', exclude_list=exclude_list) def allocate(self, num_elems, count, dtype): assert dtype == self.dtype assert num_elems <= self.num_elems if count > len(self.free_buffer_index): return None used_indices = self.free_buffer_index[-count:] self.free_buffer_index = self.free_buffer_index[:-count] buffers = [] for i in used_indices: tmp_buffer = self.all_buffers[i].narrow(0, 0, num_elems) buffers.append(tmp_buffer) self.used_buffer_index[id(tmp_buffer)] = i return buffers def allocate_all(self, num_elems, dtype): return self.allocate(num_elems=num_elems, count=len(self.free_buffer_index), dtype=dtype) def free(self, buffers): buffer_ids = [] for buf in buffers: buffer_ids.append(id(buf)) assert all([b_id in self.used_buffer_index for b_id in buffer_ids]) for b_id in buffer_ids: self.free_buffer_index.append(self.used_buffer_index[b_id]) del (self.used_buffer_index[b_id]) def get_sized_buffer(buffer, num_elems): assert num_elems <= buffer.numel(), \ f'num_elems {num_elems} > buffer {buffer.numel()}' return buffer.narrow(0, 0, num_elems) if num_elems < buffer.numel() else buffer def get_sized_buffers(buffer_list, num_elems_list): swap_buffers = [ get_sized_buffer(buffer, num_elems) \ for buffer, num_elems in zip(buffer_list, num_elems_list) ] return swap_buffers
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ Functionality of swapping tensors to/from (NVMe) storage devices. """ import torch from deepspeed import comm as dist from deepspeed.utils.logging import logger from deepspeed.runtime.swap_tensor.utils import swap_out_tensors, SwapBuffer INVALID_BUFFER_INDEX = -1 ASYNC_SWAPPER_WAIT_TIMER = 'async_swap_gradient_wait' class AsyncTensorSwapper(object): def __init__(self, aio_handle, numel_alignment, timers): self.free_buffer_index = [] self.swapping_buffer_index = [] self.ready_buffer_index = [] self.current_buffer_index = INVALID_BUFFER_INDEX self.all_buffers = [] self.aio_handle = aio_handle self.numel_alignment = numel_alignment self.max_numel = 0 self.num_pending_swaps = 0 self.timers = timers self.timer_names = set() self.num_elements_swapped = 0 self.dtype = None def has_buffers(self): return len(self.all_buffers) > 0 def add_buffers(self, buffer_list): assert len(self.all_buffers) == 0 assert all([buffer.is_pinned() for buffer in buffer_list]) dtype = buffer_list[0].dtype assert all([buffer.dtype == dtype for buffer in buffer_list]) self.dtype = dtype self.all_buffers = [SwapBuffer(buffer) for buffer in buffer_list] self.free_buffer_index += [i for i in range(len(self.all_buffers))] self.max_numel = max([buffer.numel() for buffer in buffer_list]) self.timer_names = set() def get_timer_names(self): return list(self.timer_names) def release_buffers(self): self._report_statistics('Swapped out[Before flush]') self._flush_buffers_until_complete() self._report_statistics('Swapped out[After flush]') pinned_buffers = [buf.buffer for buf in self.all_buffers] self.all_buffers = [] self.free_buffer_index = [] self.current_buffer_index = INVALID_BUFFER_INDEX self.num_elements_swapped = 0 self.dtype = None return pinned_buffers def swap_out_tensors(self, tensor_list, path_list): for tensor, swap_path in zip(tensor_list, path_list): self._swap_out_tensor(tensor, swap_path) def _report_statistics(self, message): if dist.get_rank() == 0: element_size = torch.tensor([], dtype=self.dtype).element_size() swapped_GB = (self.num_elements_swapped * element_size) / (1024**3) logger.debug(f'{message} num_elems = {self.num_elements_swapped}, {swapped_GB:5.2f} GB') def _swap_out_tensor(self, tensor, swap_path): assert len(self.all_buffers) > 0 aligned_numel = self._io_aligned_numel(tensor.numel()) assert aligned_numel <= self.max_numel self._make_swap_space(aligned_numel) assert self.current_buffer_index != INVALID_BUFFER_INDEX swap_buffer = self._get_current_buffer() swap_buffer.insert_tensor(tensor, swap_path, aligned_numel) def _make_swap_space(self, numel): if self.current_buffer_index == INVALID_BUFFER_INDEX: self._allocate_buffer() return if not self._get_current_buffer().has_space(numel): if len(self.free_buffer_index) > 0: self._flush_ready_buffers() else: self._flush_buffers_until_complete() self._allocate_buffer() def _io_aligned_numel(self, numel): remainder = numel % self.numel_alignment return numel if remainder == 0 else (numel + self.numel_alignment - remainder) def _allocate_buffer(self): assert self.current_buffer_index == INVALID_BUFFER_INDEX assert len(self.all_buffers) > 0 assert len(self.free_buffer_index) > 0 self.current_buffer_index = self.free_buffer_index[-1] self.free_buffer_index = self.free_buffer_index[:-1] def _flush_ready_buffers(self): if self.current_buffer_index != INVALID_BUFFER_INDEX: self.ready_buffer_index.append(self.current_buffer_index) self.current_buffer_index = INVALID_BUFFER_INDEX self._swap_out_ready_buffers() def _flush_buffers_until_complete(self): self._flush_ready_buffers() assert len(self.ready_buffer_index) == 0 self._wait_for_swap_complete() assert len(self.swapping_buffer_index) == 0 assert len(self.free_buffer_index) == len(self.all_buffers) def _swap_out_ready_buffers(self): for buffer_index in self.ready_buffer_index: buffer = self._get_buffer(buffer_index) swap_tensors = buffer.get_swap_tensors() swap_paths = buffer.get_swap_paths() self.num_pending_swaps += len(swap_tensors) swap_out_tensors(self.aio_handle, swap_tensors, swap_paths) self.swapping_buffer_index += self.ready_buffer_index self.ready_buffer_index = [] def _wait_for_swap_complete(self): assert len(self.swapping_buffer_index) > 0 self._start_timer(ASYNC_SWAPPER_WAIT_TIMER) assert self.aio_handle.wait() == self.num_pending_swaps self._stop_timer(ASYNC_SWAPPER_WAIT_TIMER) self.timer_names.add(ASYNC_SWAPPER_WAIT_TIMER) self.num_pending_swaps = 0 for buffer_index in self.swapping_buffer_index: buffer = self._get_buffer(buffer_index) self.num_elements_swapped += buffer.get_num_elem() buffer.reset() self.free_buffer_index += self.swapping_buffer_index assert len(self.free_buffer_index) <= len(self.all_buffers) self.swapping_buffer_index = [] def _get_buffer(self, index): assert index != INVALID_BUFFER_INDEX return self.all_buffers[index] def _get_current_buffer(self): return self._get_buffer(self.current_buffer_index) def _start_timer(self, name): if self.timers: self.timers(name).start() def _stop_timer(self, name): if self.timers: self.timers(name).stop() def _log_timers(self, name_list, force=False): if self.timers and force: self.timers.log(name_list)
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ Functionality of swapping optimizer tensors to/from (NVMe) storage devices. """ import torch from deepspeed.utils.logging import logger from deepspeed.ops.op_builder import AsyncIOBuilder from deepspeed import comm as dist from deepspeed.runtime.swap_tensor.constants import * from deepspeed.runtime.swap_tensor.utils import swap_in_tensors, swap_out_tensors, print_object, \ get_sized_buffers from deepspeed.runtime.swap_tensor.async_swapper import AsyncTensorSwapper from deepspeed.runtime.swap_tensor.optimizer_utils import OptimizerSwapper DEBUG_MODE = False SWAP_IN_PARAM_TIMER = 'swap_in_param' SWAP_OUT_PARAM_TIMER = 'swap_out_param' SWAP_IN_GRADIENT_TIMER = 'swap_in_gradient' class PartitionedOptimizerSwapper(OptimizerSwapper): def __init__(self, swap_config, aio_config, base_folder, optimizer, largest_numel, device, dtype, timers): super(PartitionedOptimizerSwapper, self).__init__(swap_config, aio_config, base_folder, optimizer, largest_numel, device, dtype, timers) aio_op = AsyncIOBuilder().load() self.aio_handle = aio_op.aio_handle(aio_config[AIO_BLOCK_SIZE], aio_config[AIO_QUEUE_DEPTH], aio_config[AIO_SINGLE_SUBMIT], aio_config[AIO_OVERLAP_EVENTS], aio_config[AIO_THREAD_COUNT]) # Overlap swapping out self.gradient_swapper = AsyncTensorSwapper(aio_handle=self.aio_handle, numel_alignment=self.numel_alignment, timers=self.timers) self.print_exclude_list += ['aio_handle', 'gradient_swapper', 'print_exclude_list'] if dist.get_rank() == 0: print_object(obj=self, name='PartitionedOptimizerSwapper', exclude_list=self.print_exclude_list) def initialize_parameters(self, parameters, src_tensors): self._initialize_parameters(parameters=parameters, src_tensors=src_tensors, aio_handle=self.aio_handle) def initialize_from_swapped_fp16_params(self, fp16_partitions_info, fp16_num_elems, fp16_pinned_buffers, fp32_parameters): self._initialize_from_swapped_fp16_params(aio_handle=self.aio_handle, fp16_partitions_info=fp16_partitions_info, fp16_num_elems=fp16_num_elems, fp16_pinned_buffers=fp16_pinned_buffers, fp32_parameters=fp32_parameters) def flush_gradients(self): self._flush_gradient_swapper(self.gradient_swapper) def swap_in_optimizer_state(self, parameter, async_parameter=None): swap_info = self._get_param_swap_info(parameter) if swap_info is None: return self._flush_gradient_swapper(self.gradient_swapper) required_buffer_count = len(swap_info.tensors) + (1 if swap_info.has_gradients() else 0) aligned_numel = self._io_aligned_numel(swap_info.numel()) pinned_buffers = self.swap_buffer_manager.allocate(num_elems=aligned_numel, count=required_buffer_count, dtype=parameter.dtype) assert pinned_buffers is not None self.allocated_swap_buffers = pinned_buffers.copy() self._start_timer(SWAP_IN_PARAM_TIMER) self._swap_in_parameter(aio_handle=self.aio_handle, parameter=parameter, dest_buffers=pinned_buffers[:required_buffer_count]) self._stop_timer(SWAP_IN_PARAM_TIMER) self.timer_names.add(SWAP_IN_PARAM_TIMER) self._start_timer(SWAP_IN_GRADIENT_TIMER) self._swap_in_gradients(aio_handle=self.aio_handle, parameter=parameter, dest_buffer=pinned_buffers[-1]) self._stop_timer(SWAP_IN_GRADIENT_TIMER) self.timer_names.add(SWAP_IN_GRADIENT_TIMER) def swap_out_optimizer_state(self, parameter, async_swap=False): swap_info = self._get_param_swap_info(parameter=parameter) if swap_info is None: return self._start_timer(SWAP_OUT_PARAM_TIMER) pinned_tensors, pinned_paths, unpinned_tensors, unpinned_paths = self._separate_pinned_tensors(swap_info) swap_bytes = sum([self._io_aligned_numel(t.numel()) * t.element_size() for t in swap_info.tensors]) WRITE_TIMER = 'swap_submit_write' self._start_timer(WRITE_TIMER) swap_out_tensors(self.aio_handle, pinned_tensors, pinned_paths) assert self.aio_handle.wait() == len(pinned_tensors) for t in pinned_tensors: t.data = torch.Tensor() if len(unpinned_tensors) > 0: pinned_buffers = self.swap_buffer_manager.allocate_all(num_elems=self.largest_numel, dtype=self.dtype) self._swap_out_unpinned_tensors(aio_handle=self.aio_handle, unpinned_tensors=unpinned_tensors, dest_paths=unpinned_paths, pinned_buffers=pinned_buffers) self.allocated_swap_buffers += pinned_buffers for t in unpinned_tensors: t.data = torch.Tensor() self._stop_timer(WRITE_TIMER) self.swap_buffer_manager.free(self.allocated_swap_buffers) self.allocated_swap_buffers = [] self._stop_timer(SWAP_OUT_PARAM_TIMER) self.timer_names.add(SWAP_OUT_PARAM_TIMER) self._log_timers([WRITE_TIMER]) if DEBUG_MODE and dist.get_rank() == 0: logger.info(f'optimizer_param_swap_out: {(swap_bytes/(1024**3)):5.2f} GB') def swap_out_gradients(self, parameter, gradient_offsets, gradient_tensors): self._swap_out_gradients(parameter=parameter, gradient_offsets=gradient_offsets, gradient_tensors=gradient_tensors, gradient_swapper=self.gradient_swapper) def _swap_in_parameter(self, aio_handle, parameter, dest_buffers): swap_info = self._get_param_swap_info(parameter) if swap_info is None: return assert len(swap_info.tensors) <= len(dest_buffers) swap_lengths = [self._io_aligned_numel(swap_info.numel())] * len(swap_info.tensors) swap_buffers = get_sized_buffers(dest_buffers, swap_lengths) READ_TIMER = 'swap_submit_read_param' WAIT_TIMER = 'swap_wait_read_param' self._start_timer(READ_TIMER) swap_in_tensors(aio_handle, swap_buffers, swap_info.swap_paths) self._stop_timer(READ_TIMER) swap_bytes = sum([buffer.numel() * buffer.element_size() for buffer in swap_buffers]) self._start_timer(WAIT_TIMER) aio_handle.wait() self._stop_timer(WAIT_TIMER) compute_lengths = [swap_info.numel()] * len(swap_info.tensors) compute_buffers = get_sized_buffers(dest_buffers, compute_lengths) for t, buffer in zip(swap_info.tensors, compute_buffers): t.data = buffer.data self._log_timers([READ_TIMER, WAIT_TIMER]) if DEBUG_MODE and dist.get_rank() == 0: logger.info(f'optimizer_param_swap_in: {(swap_bytes/(1024**3)):5.2f} GB') def _separate_pinned_tensors(self, swap_info): pinned_tensors = [] pinned_paths = [] unpinned_tensors = [] unpinned_paths = [] for tensor, path in zip(swap_info.tensors, swap_info.swap_paths): if tensor.is_pinned(): pinned_tensors.append(tensor) pinned_paths.append(path) else: unpinned_tensors.append(tensor) unpinned_paths.append(path) return pinned_tensors, pinned_paths, unpinned_tensors, unpinned_paths def _swap_in_pinned_gradients(self, aio_handle, parameter, gradient_tensor): swap_info = self.swap_params_info[id(parameter)] param_gradients = swap_info.swapped_gradients.values() swap_buffers = [gradient_tensor.narrow(0, grad.offset, grad.length) for grad in param_gradients] swap_paths = [grad.path for grad in param_gradients] SWAP_READ_GRADIENTS = 'swap_submit_read_gradient' SWAP_WAIT_GRADIENTS = 'swap_submit_wait_gradient' self._start_timer(SWAP_READ_GRADIENTS) swap_in_tensors(aio_handle, swap_buffers, swap_paths) self._stop_timer(SWAP_READ_GRADIENTS) self._start_timer(SWAP_WAIT_GRADIENTS) assert len(swap_buffers) == aio_handle.wait() self._stop_timer(SWAP_WAIT_GRADIENTS) self._log_timers([SWAP_READ_GRADIENTS, SWAP_WAIT_GRADIENTS]) def _swap_in_gradients(self, aio_handle, parameter, dest_buffer): swap_info = self.swap_params_info.get(id(parameter), None) if not (swap_info and swap_info.has_gradients()): return assert dest_buffer.is_pinned() assert parameter.numel() <= dest_buffer.numel() parameter.grad = dest_buffer.narrow(0, 0, parameter.numel()) if swap_info.swapped_gradients: self._swap_in_pinned_gradients(aio_handle, parameter, parameter.grad) if swap_info.unswapped_gradients: self._retrieve_unswapped_grad_partitions(swap_info=swap_info, dest_buffer=parameter.grad)
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from deepspeed.runtime.config_utils import get_scalar_param from deepspeed.runtime.swap_tensor.constants import * AIO_DEFAULT_DICT = { AIO_BLOCK_SIZE: AIO_BLOCK_SIZE_DEFAULT, AIO_QUEUE_DEPTH: AIO_QUEUE_DEPTH_DEFAULT, AIO_THREAD_COUNT: AIO_THREAD_COUNT_DEFAULT, AIO_SINGLE_SUBMIT: AIO_SINGLE_SUBMIT_DEFAULT, AIO_OVERLAP_EVENTS: AIO_OVERLAP_EVENTS_DEFAULT } def get_aio_config(param_dict): if AIO in param_dict.keys() and param_dict[AIO] is not None: aio_dict = param_dict[AIO] return { AIO_BLOCK_SIZE: get_scalar_param(aio_dict, AIO_BLOCK_SIZE, AIO_BLOCK_SIZE_DEFAULT), AIO_QUEUE_DEPTH: get_scalar_param(aio_dict, AIO_QUEUE_DEPTH, AIO_QUEUE_DEPTH_DEFAULT), AIO_THREAD_COUNT: get_scalar_param(aio_dict, AIO_THREAD_COUNT, AIO_THREAD_COUNT_DEFAULT), AIO_SINGLE_SUBMIT: get_scalar_param(aio_dict, AIO_SINGLE_SUBMIT, AIO_SINGLE_SUBMIT_DEFAULT), AIO_OVERLAP_EVENTS: get_scalar_param(aio_dict, AIO_OVERLAP_EVENTS, AIO_OVERLAP_EVENTS_DEFAULT) } return AIO_DEFAULT_DICT
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch from deepspeed import comm as dist import cupy import numpy as np from deepspeed.runtime.compression.cupy import CupyBackend from deepspeed.accelerator import get_accelerator class NcclBackend(object): def __init__(self, mpu=None): if mpu is None: self.world_group = dist.new_group(ranks=range(dist.get_world_size())) else: self.mpu = mpu self.world_group = self.mpu.get_data_parallel_group() self.rank = dist.get_rank(group=self.world_group) self.size = dist.get_world_size(group=self.world_group) self.compression_backend = CupyBackend() self.bool_not_supported = False TORCH_MAJOR = int(torch.__version__.split('.')[0]) TORCH_MINOR = int(torch.__version__.split('.')[1]) if (TORCH_MAJOR == 1 and TORCH_MINOR >= 10) or TORCH_MAJOR == 2: self.bool_not_supported = True def my_igather(self, rank, size, group, sendbuf, recvbuf, root): req = [] if rank == root: for idx in range(size): if idx != rank: req.append(dist.irecv(recvbuf[idx], src=idx, group=group)) else: recvbuf[rank] = sendbuf else: req.append(dist.isend(sendbuf, group=group, dst=root)) return req def my_gather(self, rank, size, group, sendbuf, recvbuf, root): if rank == root: for idx in range(size): if idx != rank: dist.recv(recvbuf[idx], src=idx, group=group) else: recvbuf[rank] = sendbuf else: dist.send(sendbuf, group=group, dst=root) def compressed_allreduce(self, buffer_m: torch.tensor, worker_error, server_error, local_rank): # all_start_time = time.time() original_shape = buffer_m.size() if len(original_shape) > 1: buffer_m = torch.flatten(buffer_m) original_size = buffer_m.numel() worker_error_size = worker_error.numel() cupy.cuda.Device(local_rank).use() if original_size != worker_error_size: empty_tensor = torch.zeros(worker_error_size - original_size, device=buffer_m.device) buffer_m = torch.cat([buffer_m, empty_tensor]) buffer_m.add_(worker_error) worker_scale = torch.norm(buffer_m) / np.sqrt(buffer_m.numel()) worker_error.set_(buffer_m - worker_scale * buffer_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)) if self.bool_not_supported: cupy_sign_list_packed = self.compression_backend.compress_by_chunk( self.compression_backend.torch2cupy(buffer_m.sign_().add_(1).bool().to(dtype=torch.uint8)), self.size) else: cupy_sign_list_packed = self.compression_backend.compress_by_chunk( self.compression_backend.torch2cupy(buffer_m.sign_().add_(1).bool()), self.size) cupy_worker_scale = self.compression_backend.torch2cupy(worker_scale) cupy_recvbuf_sign = cupy.zeros([self.size, cupy_sign_list_packed[self.rank].size], dtype=cupy_sign_list_packed[0].dtype) # cupy_recvbuf_scale = cupy.zeros([self.size, 1], dtype=cupy_worker_scale.dtype) sign_list_packed = [ self.compression_backend.cupy2torch(cupy_sign_list_packed[idx]) for idx in range(self.size) ] # worker_scale = self.compression_backend.cupy2torch(cupy_worker_scale) recvbuf_sign = self.compression_backend.cupy2torch(cupy_recvbuf_sign) #recvbuf_scale = self.compression_backend.cupy2torch(cupy_recvbuf_scale) recvbuf_scale = [ torch.zeros(1, dtype=worker_scale.dtype, device=torch.device(get_accelerator().device_name(local_rank))) for i in range(self.size) ] # communication phase 1 # gather_start = time.time() # Alltoall for sign dist.all_to_all_single(recvbuf_sign, torch.stack(sign_list_packed), group=self.world_group) # Allgather for scale dist.all_gather(recvbuf_scale, worker_scale, group=self.world_group) # gather_end = time.time() # cupy_sign_list_packed, sign_list_packed, cupy_worker_scale, worker_scale = None, None, None, None cupy_sign_list_packed = None cupy_recvbuf_sign = self.compression_backend.torch2cupy(recvbuf_sign) #cupy_recvbuf_scale = self.compression_backend.torch2cupy(torch.stack(recvbuf_scale)) compensated_server_m = self.compression_backend.cupy2torch( (cupy.unpackbits(cupy_recvbuf_sign.flatten())).reshape(self.size, -1)).float().add_(-0.5).mul_(2.0).mul_( torch.stack(recvbuf_scale).mul_(1 / self.size)).sum(0) compensated_server_m.add_(server_error) server_scale = torch.norm(compensated_server_m) / np.sqrt(compensated_server_m.numel()) server_error.set_(compensated_server_m - server_scale * compensated_server_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)) # cupy_server_scale = self.compression_backend.torch2cupy(server_scale) if self.bool_not_supported: cupy_server_sign_packed = self.compression_backend.compress_by_chunk( self.compression_backend.torch2cupy(compensated_server_m.sign_().add_(1).bool().to(dtype=torch.uint8)), 1) else: cupy_server_sign_packed = self.compression_backend.compress_by_chunk( self.compression_backend.torch2cupy(compensated_server_m.sign_().add_(1).bool()), 1) compensated_server_m = None cupy_recvbuf_sign_server = cupy.zeros([self.size, cupy_server_sign_packed[0].size], dtype=cupy_recvbuf_sign.dtype) # cupy_recvbuf_sign, recvbuf_sign = None, None cupy_recvbuf_sign = None server_sign_packed = [self.compression_backend.cupy2torch(cupy_server_sign_packed[0])] recvbuf_sign_server = [ self.compression_backend.cupy2torch(cupy_recvbuf_sign_server[idx]) for idx in range(self.size) ] # server_scale = self.compression_backend.cupy2torch(cupy_server_scale) cupy_recvbuf_scale_server = cupy.zeros([self.size, 1], dtype=cupy_worker_scale.dtype) # cupy_recvbuf_scale, recvbuf_scale = None, None recvbuf_scale_server = [ self.compression_backend.cupy2torch(cupy_recvbuf_scale_server[idx]) for idx in range(self.size) ] # Communication Phase 2 dist.all_gather(recvbuf_sign_server, server_sign_packed[0], group=self.world_group) dist.all_gather(recvbuf_scale_server, server_scale, group=self.world_group) cupy_server_sign_packed = None # need to convert from a tensor list to a single tensor # dist.all_gather only provides a tensor list as the recv/output buffer recvbuf_sign_server = torch.stack(recvbuf_sign_server) cupy_recvbuf_sign_server = self.compression_backend.torch2cupy(recvbuf_sign_server) buffer_m.data.copy_( self.compression_backend.cupy2torch((cupy.unpackbits(cupy_recvbuf_sign_server.flatten())).reshape( self.size, -1)).float().add_(-0.5).mul_(2.0).mul_( self.compression_backend.cupy2torch(cupy_recvbuf_scale_server)).flatten().data) if original_size != worker_error_size: buffer_m = buffer_m[0:original_size] if len(original_shape) > 1: buffer_m = buffer_m.reshape(original_shape) return buffer_m
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team '''Copyright The Microsoft DeepSpeed Team'''
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import cupy import time import numpy as np from mpi4py import MPI from deepspeed.runtime.compression.cupy import CupyBackend class MpiBackend(object): def __init__(self, cuda_aware): self.comm = MPI.COMM_WORLD self.rank = self.comm.Get_rank() self.size = self.comm.Get_size() self.cuda_aware = cuda_aware self.compression_backend = CupyBackend() def my_igather(self, rank, size, comm, sendbuf, recbuf, root): req = [] if rank == root: for idx in range(size): if idx != rank: req.append(comm.Irecv(recbuf[idx], source=idx)) else: recbuf[rank] = sendbuf else: req.append(comm.Isend(sendbuf, dest=root)) return req def gather_cuda(self, rank, world_size, comm, cupy_sign_list_packed, cupy_recvbuf_sign, cupy_worker_scale, cupy_recvbuf_scale): # We do in-place operations on cupy buffers so we do not return any buffers requests = [] for idx in range(world_size): req_sign = self.my_igather(rank, world_size, comm, cupy_sign_list_packed[idx], cupy_recvbuf_sign, root=idx) requests += req_sign for idx in range(world_size): req_scale = self.my_igather(rank, world_size, comm, cupy_worker_scale, cupy_recvbuf_scale, root=idx) requests += req_scale MPI.Request.Waitall(requests) def gather_host(self, rank, world_size, comm, cupy_sign_list_packed, cupy_recvbuf_sign, cupy_worker_scale, cupy_recvbuf_scale): # In-place operations are not possible for newly created cupy arrays # so we need to return the new buffers numpy_recvbuf_sign = np.zeros([world_size, cupy_sign_list_packed[rank].size], dtype=cupy_sign_list_packed[0].dtype) numpy_recvbuf_scale = np.zeros([world_size, 1], dtype=cupy_worker_scale.dtype) # 1. convert from cupy to numpy numpy_sign_list_packed = cupy_sign_list_packed for idx in range(world_size): numpy_sign_list_packed[idx] = cupy.asnumpy(cupy_sign_list_packed[idx]) numpy_worker_scale = cupy.asnumpy(cupy_worker_scale) numpy_recvbuf_scale = cupy.asnumpy(cupy_recvbuf_scale) cupy.cuda.get_current_stream().synchronize() # 2. use numpy buffers for communication requests = [] for idx in range(world_size): req_sign = self.my_igather(rank, world_size, comm, numpy_sign_list_packed[idx], numpy_recvbuf_sign, root=idx) requests += req_sign for idx in range(world_size): req_scale = self.my_igather(rank, world_size, comm, numpy_worker_scale, numpy_recvbuf_scale, root=idx) requests += req_scale MPI.Request.Waitall(requests) # 3. Convert back from numpy to cupy cupy_recvbuf_sign = cupy.asarray(numpy_recvbuf_sign) for idx in range(world_size): cupy_sign_list_packed[idx] = cupy.asarray(numpy_sign_list_packed[idx]) cupy_worker_scale = cupy.asarray(numpy_worker_scale) cupy_recvbuf_scale = cupy.asarray(numpy_recvbuf_scale) cupy.cuda.get_current_stream().synchronize() return cupy_sign_list_packed, cupy_recvbuf_sign, cupy_worker_scale, cupy_recvbuf_scale def allgather_cuda(self, comm, cupy_server_sign_packed, cupy_recvbuf_sign_server, cupy_server_scale, cupy_recvbuf_scale_server): comm.Allgather(cupy_server_sign_packed, cupy_recvbuf_sign_server) comm.Allgather(cupy_server_scale, cupy_recvbuf_scale_server) def allgather_host(self, comm, cupy_server_sign_packed, cupy_recvbuf_sign_server, cupy_server_scale, cupy_recvbuf_scale_server): # 1. Convert cupy to numpy numpy_recvbuf_sign_server = np.zeros([comm.Get_size(), cupy_server_sign_packed.size], dtype=cupy_server_sign_packed.dtype) numpy_recvbuf_scale_server = np.zeros([comm.Get_size(), 1], dtype=cupy_server_scale.dtype) numpy_server_sign_packed = cupy.asnumpy(cupy_server_sign_packed) numpy_recvbuf_sign_server = cupy.asnumpy(cupy_recvbuf_sign_server) numpy_server_scale = cupy.asnumpy(cupy_server_scale) numpy_recvbuf_scale_server = cupy.asnumpy(cupy_recvbuf_scale_server) cupy.cuda.get_current_stream().synchronize() # 2. Communicate numpy buffers comm.Allgather(numpy_server_sign_packed, numpy_recvbuf_sign_server) comm.Allgather(numpy_server_scale, numpy_recvbuf_scale_server) comm.Barrier() # 3. Convert numpy back to cupy cupy_server_sign_packed = cupy.asarray(numpy_server_sign_packed) cupy_recvbuf_sign_server = cupy.asarray(numpy_recvbuf_sign_server) cupy_server_scale = cupy.asarray(numpy_server_scale) cupy_recvbuf_scale_server = cupy.asarray(numpy_recvbuf_scale_server) cupy.cuda.get_current_stream().synchronize() return cupy_server_sign_packed, cupy_recvbuf_sign_server, cupy_server_scale, cupy_recvbuf_scale_server def compressed_allreduce(self, buffer_m: torch.tensor, worker_error, server_error, local_rank): all_start_time = time.time() original_shape = buffer_m.size() if len(original_shape) > 1: buffer_m = torch.flatten(buffer_m) original_size = buffer_m.numel() worker_error_size = worker_error.numel() cupy.cuda.Device(local_rank).use() if original_size != worker_error_size: empty_tensor = torch.zeros(worker_error_size - original_size, device=buffer_m.device) buffer_m = torch.cat([buffer_m, empty_tensor]) buffer_m.add_(worker_error) worker_scale = torch.norm(buffer_m) / np.sqrt(torch.numel(buffer_m)) worker_error.set_(buffer_m - worker_scale * buffer_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)) cupy_sign_list_packed = self.compression_backend.compress_by_chunk( self.compression_backend.torch2cupy(buffer_m.sign_().add_(1).bool()), self.size) cupy_worker_scale = self.compression_backend.torch2cupy(worker_scale) cupy_recvbuf_sign = cupy.zeros([self.size, cupy_sign_list_packed[self.rank].size], dtype=cupy_sign_list_packed[0].dtype) cupy_recvbuf_scale = cupy.zeros([self.size, 1], dtype=cupy_worker_scale.dtype) # Communication Phase 1 gather_start = time.time() if self.cuda_aware: self.gather_cuda(self.rank, self.size, self.comm, cupy_sign_list_packed, cupy_recvbuf_sign, cupy_worker_scale, cupy_recvbuf_scale) else: _, cupy_recvbuf_sign, _, cupy_recvbuf_scale = self.gather_host(self.rank, self.size, self.comm, cupy_sign_list_packed, cupy_recvbuf_sign, cupy_worker_scale, cupy_recvbuf_scale) gather_end = time.time() # cupy_sign_list_packed, cupy_worker_scale, worker_scale = None, None, None cupy_sign_list_packed = None compensated_server_m = self.compression_backend.cupy2torch( (cupy.unpackbits(cupy_recvbuf_sign.flatten())).reshape(self.size, -1)).float().add_(-0.5).mul_(2.0).mul_( self.compression_backend.cupy2torch(cupy_recvbuf_scale).mul_(1 / self.size)).sum(0) compensated_server_m.add_(server_error) server_scale = torch.norm(compensated_server_m) / np.sqrt(compensated_server_m.numel()) server_error.set_(compensated_server_m - server_scale * compensated_server_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)) cupy_server_scale = self.compression_backend.torch2cupy(server_scale) cupy_server_sign_packed = self.compression_backend.compress_by_chunk( self.compression_backend.torch2cupy(compensated_server_m.sign_().add_(1).bool()), 1) compensated_server_m = None cupy_recvbuf_sign_server = cupy.zeros([self.size, cupy_server_sign_packed[0].size], dtype=cupy_recvbuf_sign.dtype) cupy_recvbuf_scale_server = cupy.zeros([self.size, 1], dtype=cupy_recvbuf_scale.dtype) # cupy_recvbuf_sign, cupy_recvbuf_scale = None, None cupy_recvbuf_sign = None # Communication Phase 2 if self.cuda_aware: self.allgather_cuda(self.comm, cupy_server_sign_packed[0], cupy_recvbuf_sign_server, cupy_server_scale, cupy_recvbuf_scale_server) else: _, cupy_recvbuf_sign_server, _, cupy_recvbuf_scale_server = self.allgather_host( self.comm, cupy_server_sign_packed[0], cupy_recvbuf_sign_server, cupy_server_scale, cupy_recvbuf_scale_server) # cupy_server_sign_packed, cupy_server_scale, server_scale = None, None, None cupy_server_sign_packed = None buffer_m.data.copy_( self.compression_backend.cupy2torch((cupy.unpackbits(cupy_recvbuf_sign_server.flatten())).reshape( self.size, -1)).float().add_(-0.5).mul_(2.0).mul_( self.compression_backend.cupy2torch(cupy_recvbuf_scale_server)).flatten().data) if original_size != worker_error_size: buffer_m = buffer_m[0:original_size] if len(original_shape) > 1: buffer_m = buffer_m.reshape(original_shape) # cupy_recvbuf_sign_server, cupy_recvbuf_scale_server = None, None return buffer_m
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ batched collective operations for overhead amortization and better bandwidth utilization """ import math from typing import List import torch from torch import Tensor from deepspeed import comm as dist # NOTE: Use torch.distributed's ProcessGroup class until we have our own. from torch.distributed import ProcessGroup import torch.nn.functional from deepspeed.utils import instrument_w_nvtx def _torch_reduce_scatter_fn(input_tensor: Tensor, output_tensor: Tensor, group=None, async_op=False, prof=False): return instrument_w_nvtx(dist.reduce_scatter_fn)(output_tensor, input_tensor, group=group, async_op=async_op) @instrument_w_nvtx @torch.no_grad() def reduce_scatter_coalesced( tensors: List[Tensor], group: ProcessGroup = None, ) -> List[Tensor]: """simultaneously reduce-scatter a list of tensors - this can be done more efficiently than individual reduce scatter calls TODO. see if PyTorch team wants a c++ version of this for ProcessGroupNCCL """ this_rank = dist.get_rank(group) world_sz = dist.get_world_size(group) partition_lst_for_each_tensor = [None] * len(tensors) for tensor_idx, tensor in enumerate(tensors): flattened_tensor = tensor.view(-1) chunk_sz = math.ceil(tensor.numel() / world_sz) partition_lst_for_each_tensor[tensor_idx] = [ flattened_tensor[rank * chunk_sz:rank * chunk_sz + chunk_sz] for rank in range(0, world_sz) ] padded_partition_sz_for_each_tensor = tuple(math.ceil(t.numel() / world_sz) for t in tensors) if len(tensors) == 1 and tensors[0].numel() % world_sz == 0: # if there's only one tensor being reduced and we don't need to pad # we have an opportunity to avoid a memory allocation tensor_partition_flat_buffer = tensors[0].view(-1) else: # interleave tensor partitions such that the correct reduced partitions of each tensor # end up at each rank tensor_partitions_lst_with_padding = [] for rank in range(world_sz): for tensor_idx in range(len(tensors)): # add tensor content tensor_chunk = partition_lst_for_each_tensor[tensor_idx][rank] tensor_partitions_lst_with_padding.append(tensor_chunk) # add padding if necessary padding_sz = padded_partition_sz_for_each_tensor[tensor_idx] - tensor_chunk.numel() if padding_sz > 0: tensor_partitions_lst_with_padding.append( torch.empty(padding_sz, dtype=tensor_chunk.dtype, device=tensor_chunk.device)) tensor_partition_flat_buffer = instrument_w_nvtx(torch.cat)(tensor_partitions_lst_with_padding) tensor_partition_flat_buffer.div_(world_sz) # pre-divide tensor_partition_buffer_for_each_rank: List[Tensor] = torch.chunk(tensor_partition_flat_buffer, world_sz) # batched reduce-scatter call _torch_reduce_scatter_fn(tensor_partition_flat_buffer, tensor_partition_buffer_for_each_rank[this_rank], group=group) # reverse procedure of the interleaving done previously, done on the # result of the batched reduce-scatter output_lst: List[Tensor] = [None] * len(tensors) offset = 0 for tensor_idx in range(len(tensors)): output_lst[tensor_idx] = tensor_partition_buffer_for_each_rank[this_rank].narrow( 0, offset, partition_lst_for_each_tensor[tensor_idx][this_rank].numel()) offset += padded_partition_sz_for_each_tensor[tensor_idx] return output_lst
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from pydantic import Field, validator import sys from typing import Optional from enum import Enum from deepspeed.runtime.config_utils import get_scalar_param, pp_int, DeepSpeedConfigModel from deepspeed.utils import logger from .offload_config import DeepSpeedZeroOffloadParamConfig, DeepSpeedZeroOffloadOptimizerConfig, OffloadDeviceEnum # ZeRO optimization. By default, this optimization is not enabled. # Users have to configure the desired optimization (0 means disabled) in params.json as below example: ZERO_FORMAT = """ ZeRO optimization should be enabled as: "session_params": { "zero_optimization": { "stage": [0|1|2], "stage3_max_live_parameters" : 1000000000, "stage3_max_reuse_distance" : 1000000000, "allgather_partitions": [true|false], "allgather_bucket_size": 500000000, "reduce_scatter": [true|false], "contiguous_gradients" : [true|false] "overlap_comm": [true|false], "reduce_bucket_size": 500000000, "load_from_fp32_weights": [true|false], "cpu_offload": [true|false] (deprecated), "cpu_offload_params" : [true|false] (deprecated), "cpu_offload_use_pin_memory": [true|false] (deprecated), "sub_group_size" : 1000000000000, "offload_param": {...}, "offload_optimizer": {...}, "ignore_unused_parameters": [true|false], "round_robin_gradients": [true|false], "memory_efficient_linear": [true|false] } } """ ZERO_OPTIMIZATION = "zero_optimization" def read_zero_config_deprecated(param_dict): zero_config_dict = {} zero_config_dict["stage"] = 1 if param_dict[ZERO_OPTIMIZATION] else 0 if zero_config_dict["stage"] > 0: zero_config_dict["allgather_bucket_size"] = get_scalar_param(param_dict, "allgather_size", 5e8) logger.warning( "DeepSpeedConfig: this format of ZeRO optimization setup is deprecated. Please use the following format: {}". format(ZERO_FORMAT)) return zero_config_dict def get_zero_config(param_dict): if ZERO_OPTIMIZATION in param_dict: zero_config_dict = param_dict[ZERO_OPTIMIZATION] if isinstance(zero_config_dict, bool): zero_config_dict = read_zero_config_deprecated(param_dict) else: zero_config_dict = {} return DeepSpeedZeroConfig(**zero_config_dict) class ZeroStageEnum(int, Enum): """ Enum class for possible zero stages """ disabled = 0 optimizer_states = 1 gradients = 2 weights = 3 max_stage = 3 class DeepSpeedZeroConfig(DeepSpeedConfigModel): """ Sets parameters for ZeRO optimizations. """ stage: ZeroStageEnum = 0 """ Chooses different stages of ZeRO Optimizer. Stage 0, 1, 2, and 3 refer to disabled, optimizer state partitioning, and optimizer+gradient state partitioning, and optimizer+gradient+parameter partitioning, respectively. """ contiguous_gradients: bool = True """ Copies the gradients to a contiguous buffer as they are produced. Avoids memory fragmentation during backward pass. """ reduce_scatter: bool = True """ Uses reduce or reduce scatter instead of allreduce to average gradients """ reduce_bucket_size: int = Field(pp_int(5e8), ge=0) """ Number of elements reduced/allreduced at a time. Limits the memory required for the allgather for large model sizes """ allgather_partitions: bool = True """ Chooses between allgather collective or a series of broadcast collectives to gather updated parameters from all the GPUs at the end of each step """ allgather_bucket_size: int = Field(pp_int(5e8), ge=0) """ Number of elements allgathered at a time. Limits the memory required for the allgather for large model sizes """ overlap_comm: bool = None # None for dynamic default value (see validator `overlap_comm_valid` below) """ Attempts to overlap the reduction of the gradients with backward computation """ load_from_fp32_weights: bool = True """ Boolean indicating whether to initialize fp32 master weights from fp32 copies in checkpoint (no precision loss) or from model's fp16 copies (with precision loss). This can be used to initialize optimizer state even when checkpoint is missing optimizer state. """ elastic_checkpoint: bool = False """ Enable loading checkpoint that was saved by job with different GPU count. No longer supported. """ offload_param: Optional[DeepSpeedZeroOffloadParamConfig] = None """ Enable offloading of model parameters to CPU or NVMe. This frees up GPU memory for larger models or batch sizes. Valid only with stage 3. Expects a dictionary containing values for :any:`DeepSpeedZeroOffloadParamConfig`. """ offload_optimizer: Optional[DeepSpeedZeroOffloadOptimizerConfig] = None """ Enable offloading of optimizer state to CPU or NVMe, and optimizer computation to CPU. This frees up GPU memory for larger models or batch sizes. Valid for ZeRO stage 1, 2, 3. Expects a dictionary containing values for :any:`DeepSpeedZeroOffloadOptimizerConfig`. """ sub_group_size: int = Field(pp_int(1e9), ge=0) """ Tile size for parameter processing to fit massive models (with trillions of parameters). Used by ZeRO3-Offload and ZeRO-Infinity """ cpu_offload_param: bool = Field( None, deprecated=True, new_param="offload_param", new_param_fn=(lambda val: DeepSpeedZeroOffloadParamConfig(device=OffloadDeviceEnum.cpu) if val else None), ) """ Deprecated, please use ``offload_param`` """ cpu_offload_use_pin_memory: bool = Field( None, deprecated=True, new_param="offload_param or offload_optimizer", set_new_param=False, ) """ Deprecated, please use ``offload_param`` or ``offload_optimizer`` """ cpu_offload: bool = Field( None, deprecated=True, new_param="offload_optimizer", new_param_fn=(lambda val: DeepSpeedZeroOffloadOptimizerConfig(device=OffloadDeviceEnum.cpu) if val else None), ) """ Deprecated, please use ``offload_optimizer`` """ prefetch_bucket_size: int = Field(pp_int(5e7), ge=0, alias="stage3_prefetch_bucket_size") """ Maximum number of parameter elements to fetch ahead of use. Used by ZeRO3, ZeRO3-Offload, ZeRO-Infinity, and ZeRO-Inference. """ param_persistence_threshold: int = Field(pp_int(1e5), ge=0, alias="stage3_param_persistence_threshold") """ Do not partition parameters smaller than this threshold. Smaller values use less memory, but can greatly increase communication (especially latency-bound messages). """ model_persistence_threshold: int = Field(pp_int(sys.maxsize, "sys.maxsize"), ge=0, alias="stage3_model_persistence_threshold") """ Maximum number of parameter elements that can be persisted in GPU and not partitioned. This imposes an upper bound on the number of unpartitioned parameters resulting from param_persistence_threshold setting. Used by ZeRO3-Offload, ZeRO-Infinity and ZeRO-Inference. """ max_live_parameters: int = Field(pp_int(1e9), ge=0, alias="stage3_max_live_parameters") """ The maximum number of parameters resident per GPU before releasing. Smaller values use less memory, but perform more communication. """ max_reuse_distance: int = Field(pp_int(1e9), ge=0, alias="stage3_max_reuse_distance") """ Do not release a parameter if it will be reused within this threshold of parameters. Smaller values use less memory, but perform more communication. """ gather_16bit_weights_on_model_save: bool = Field(False, alias="stage3_gather_16bit_weights_on_model_save") """ Consolidate the weights before saving the model by ``save_16bit_model()``. Since the weights are partitioned across GPUs, they aren’t part of ``state_dict``, so this function automatically gathers the weights when this option is enabled and then saves the fp16 model weights. """ stage3_gather_fp16_weights_on_model_save: bool = Field(False, deprecated=True, new_param="gather_16bit_weights_on_model_save") """ Deprecated, please use ``gather_16bit_weights_on_model_save`` """ ignore_unused_parameters: bool = True """ Unused parameters in modules may be unexpected in static networks, but could be normal in dynamic networks. This controls whether or not training should terminate with an error message when unused parameters are detected. This is set to ``False`` by default, which means unused parameters are ignored and training continues. Now is just used in stage 2. """ legacy_stage1: bool = False """ For backward-compatibility enable old ZeRO stage 1 implementation. Use at your own risk, will be deprecated soon. """ round_robin_gradients: bool = False """ Stage 1 and 2 optimization for CPU offloading that parallelizes gradient copying to CPU memory among ranks by fine-grained gradient partitioning. Performance benefit grows with gradient accumulation steps (more copying between optimizer steps) or GPU count (increased parallelism). """ mics_shard_size: int = Field(-1, new_param="mics_shard_size") mics_hierarchical_params_gather: bool = False memory_efficient_linear: bool = True """ Use memory efficient linear implementation, for Stage 3. """ # Validators @validator("overlap_comm") def overlap_comm_valid(cls, field_value, values): if field_value is None: assert ("stage" in values), "DeepSpeedZeroConfig: 'stage' must be defined before 'overlap_comm'" field_value = values["stage"] == ZeroStageEnum.weights return field_value
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import sys import torch from collections import OrderedDict from deepspeed.runtime.utils import see_memory_usage from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum from deepspeed.runtime.zero.partition_parameters import _init_external_params from deepspeed.runtime.zero.partition_parameters import * from deepspeed.runtime.zero.partitioned_param_coordinator import PartitionedParameterCoordinator, InflightParamRegistry, iter_params from deepspeed import comm as dist from deepspeed.accelerator import get_accelerator FWD_MODULE_STACK = list() def is_builtin_type(obj): # https://stackoverflow.com/a/17795199 return obj.__class__.__module__ == '__builtin__' or obj.__class__.__module__ == "builtins" def isinstance_namedtuple(obj: object) -> bool: """ Is this an instance of namedtuple/NamedTuple? From: https://stackoverflow.com/a/62692640 Args: obj (object): An object. Returns: bool: True if namedtuple/NamedTuple else False. """ return isinstance(obj, tuple) and hasattr(obj, '_asdict') and hasattr(obj, '_fields') # ensure we only warn once, otherwise every iteration will trigger a warning warned = False def _apply_to_tensors_only(module, functional, backward_function, outputs): """ Apply a torch.autograd.Function that calls a `backward_function` to every Tensor in `outputs`. Args: module (torch.nn.Module): A torch module functional (Type[torch.autograd.Function]): The function class to apply. backward_function (Callable[[torch.nn.Module], None]): A backward_function to pass to `functional.apply`. outputs (Any): The output of `module`. Returns: Any: The output of `module`. """ if isinstance(outputs, (tuple, list)): touched_outputs = [] for output in outputs: touched_output = _apply_to_tensors_only(module, functional, backward_function, output) touched_outputs.append(touched_output) if isinstance_namedtuple(outputs): # namedtuples require a slightly different syntax. return outputs.__class__(*touched_outputs) return outputs.__class__(touched_outputs) elif isinstance(outputs, dict): # apply inplace to avoid recreating dict inherited objects for key in outputs.keys(): outputs[key] = _apply_to_tensors_only(module, functional, backward_function, outputs[key]) return outputs elif isinstance(outputs, torch.Tensor): # this also applies to torch.Tensor's subclasses like torch.nn.parameter.Parameter touched_outputs = functional.apply(module, backward_function, outputs) # restore zero param attributes if those get stripped by `backward_function` if not is_zero_param(touched_outputs) and is_zero_param(outputs): touched_outputs.ds_param_alias = outputs return touched_outputs else: if not is_builtin_type(outputs): global warned if not warned and dist.get_rank() == 0: logger.warning( f"A module has unknown inputs or outputs type ({type(outputs)}) and the tensors embedded in it cannot be detected. " "The ZeRO-3 hooks designed to trigger before or after backward pass of the module relies on knowing the input and " "output tensors and therefore may not get triggered properly.") warned = True return outputs #for each tensor in outputs run the forward_function and register backward_function as hook def _apply_forward_and_backward_to_tensors_only(module, forward_function, backward_function, outputs): if type(outputs) is tuple: touched_outputs = [] for output in outputs: touched_output = _apply_forward_and_backward_to_tensors_only(module, forward_function, backward_function, output) touched_outputs.append(touched_output) return tuple(touched_outputs) elif type(outputs) is torch.Tensor: forward_function(outputs) if outputs.requires_grad: outputs.register_hook(backward_function) return outputs else: return outputs class ZeROOrderedDict(OrderedDict): def __init__(self, parent_module, *args, **kwargs): """A replacement for ``collections.OrderedDict`` to detect external ZeRO params. Args: parent_module (``collections.OrderedDict``): the collection to replace """ super().__init__(*args, **kwargs) self._parent_module = parent_module self._in_forward = False def __getitem__(self, key): param = super().__getitem__(key) # Params can be registered as None (e.g., bias) if param is None: return param if param.ds_status == ZeroParamStatus.NOT_AVAILABLE: if self._parent_module._parameters._in_forward: register_external_parameter(FWD_MODULE_STACK[-1], param) param.all_gather() print_rank_0(f'Registering external parameter from getter {key} ds_id = {param.ds_id}', force=False) return param def _inject_parameters(module, cls): for module in module.modules(): if cls == ZeROOrderedDict: new_param = cls(parent_module=module) else: new_param = cls() for key, param in module._parameters.items(): new_param[key] = param module._parameters = new_param class PreBackwardFunction(torch.autograd.Function): @staticmethod def forward(ctx, module, pre_backward_function, outputs): ctx.module = module ctx.pre_backward_function = pre_backward_function if not hasattr(module, "applied_pre_backward_ref_cnt"): module.applied_pre_backward_ref_cnt = 0 module.applied_pre_backward_ref_cnt += 1 #print(f"After Forward: {ctx.module.__class__.__name__}") outputs = outputs.detach() return outputs @staticmethod def backward(ctx, *args): #print(f"Before Backward: {ctx.module.__class__.__name__}") ctx.pre_backward_function(ctx.module) return (None, None) + args class PostBackwardFunction(torch.autograd.Function): @staticmethod def forward(ctx, module, pre_backward_function, output): ctx.module = module if output.requires_grad: #TODO SOME TIMES post backward does not seem to be triggered debug in detail #Should only cause increase in memory not correctness issue #if output.grad_fn.__class__.__name__ == 'ViewBackward': # ctx.view=True # print(f"Warning view tensor for input to module : {module.__class__.__name__}. Backward hooks may not trigger properly") #assert len(module.parameters(recurse=False)), "The input tensor to the module is a view, and autograd Function or register_hook is not triggered with view tensors." #if module.ds_grads_remaining == 0: # print(f"Before Forward: {ctx.module.__class__.__name__}") module.ds_grads_remaining += 1 ctx.pre_backward_function = pre_backward_function output = output.detach() return output @staticmethod def backward(ctx, *args): ctx.module.ds_grads_remaining = ctx.module.ds_grads_remaining - 1 if ctx.module.ds_grads_remaining == 0: ctx.pre_backward_function(ctx.module) #print(f"After Backward: {ctx.module.__class__.__name__}") return (None, None) + args class DeepSpeedZeRoOffload(object): def __init__(self, module, timers, ds_config, overlap_comm=True, prefetch_bucket_size=50000000, max_reuse_distance=1000000000, max_live_parameters=1000000000, param_persistence_threshold=100000, model_persistence_threshold=sys.maxsize, offload_param_config=None, mpu=None): see_memory_usage("DeepSpeedZeRoOffload initialize [begin]", force=True) print_rank_0(f"initialized {__class__.__name__} with args: {locals()}", force=False) self.module = module self.dtype = list(module.parameters())[0].dtype self.offload_device = None self.offload_param_pin_memory = False if offload_param_config is not None and offload_param_config.device != OffloadDeviceEnum.none: self.offload_device = offload_param_config.device self.offload_param_pin_memory = offload_param_config.pin_memory self._convert_to_zero_parameters(ds_config, module, mpu) for m in module.modules(): _init_external_params(m) _inject_parameters(module, ZeROOrderedDict) self.param_numel_persistence_threshold = int(param_persistence_threshold) self.model_persistence_threshold = int(model_persistence_threshold) self.persistent_parameters = self.mark_persistent_parameters(self.param_numel_persistence_threshold, self.model_persistence_threshold) self.param_coordinators = {} self._prefetch_bucket_sz = int(prefetch_bucket_size) self._max_reuse_distance_in_numel = int(max_reuse_distance) self._max_available_parameters_in_numel = int(max_live_parameters) self.__allgather_stream = get_accelerator().Stream() if overlap_comm else get_accelerator().default_stream() if not hasattr(module, "ds_inflight_param_registry"): module.ds_inflight_param_registry = InflightParamRegistry() self.__inflight_param_registry = module.ds_inflight_param_registry self.forward_hooks = [] self.backward_hooks = [] self.setup_zero_stage3_hooks() print_rank_0( f'Created module hooks: forward = {len(self.forward_hooks)}, backward = {len(self.backward_hooks)}', force=False) see_memory_usage("DeepSpeedZeRoOffload initialize [end]", force=True) @instrument_w_nvtx def partition_all_parameters(self): """Partitioning Parameters that were not partitioned usually if parameters of modules whose input parameters do not require grad computation do not trigger post call and will therefore will remain unpartitioned""" self.get_param_coordinator(training=self.module.training).release_and_reset_all(self.module) for param in iter_params(self.module, recurse=True): if param.ds_status != ZeroParamStatus.NOT_AVAILABLE: raise RuntimeError(f"{param.ds_summary()} expected to be released") def get_param_coordinator(self, training): if not training in self.param_coordinators: self.param_coordinators[training] = PartitionedParameterCoordinator( prefetch_bucket_sz=self._prefetch_bucket_sz, max_reuse_distance_in_numel=self._max_reuse_distance_in_numel, max_available_parameters_in_numel=self._max_available_parameters_in_numel, allgather_stream=self.__allgather_stream, inflight_param_registry=self.__inflight_param_registry, prefetch_nvme=self.offload_device == OffloadDeviceEnum.nvme, ) return self.param_coordinators[training] def empty_partition_cache(self): self.partition_all_parameters() def _convert_to_zero_parameters(self, ds_config, module, mpu): non_zero_params = [p for p in module.parameters() if not is_zero_param(p)] if non_zero_params: zero_params = [p for p in module.parameters() if is_zero_param(p)] if zero_params: zero_params[0].convert_to_zero_parameters(param_list=non_zero_params) else: group = None if mpu: group = mpu.get_data_parallel_group() Init(module=module, data_parallel_group=group, dtype=self.dtype, config_dict_or_path=ds_config, remote_device=self.offload_device, pin_memory=self.offload_param_pin_memory, mpu=mpu) def destroy(self): self._remove_module_hooks() def _remove_module_hooks(self): num_forward_hooks = len(self.forward_hooks) num_backward_hooks = len(self.backward_hooks) for hook in self.forward_hooks: hook.remove() for hook in self.backward_hooks: hook.remove() print_rank_0(f'Deleted module hooks: forward = {num_forward_hooks}, backward = {num_backward_hooks}', force=False) def setup_zero_stage3_hooks(self): self.hierarchy = 0 #reset step if in inference mode @instrument_w_nvtx def _end_of_forward_hook(module, *args): if not torch._C.is_grad_enabled(): self.get_param_coordinator(training=False).reset_step() #likely one of them should be enough but just to be safe self._register_hooks_recursively(self.module) self.module.register_forward_hook(_end_of_forward_hook) # Add top module to stack trace global FWD_MODULE_STACK FWD_MODULE_STACK.append(self.module) def mark_persistent_parameters(self, param_threshold, model_threshold): persistent_params = [] total_persistent_parameters = 0 params_count = 0 for _, param in self.module.named_parameters(recurse=True): if param.ds_numel + total_persistent_parameters > model_threshold: continue if param.ds_numel <= param_threshold: params_count += 1 param.ds_persist = True persistent_params.append(param) total_persistent_parameters += param.ds_numel print_rank_0( f"Parameter Offload: Total persistent parameters: {total_persistent_parameters} in {params_count} params", force=True) return persistent_params def _register_hooks_recursively(self, module, count=[0]): my_count = count[0] module.id = my_count #print(f"{module.__class__} : {module.id}") for child in module.children(): count[0] = count[0] + 1 self._register_hooks_recursively(child, count=count) @instrument_w_nvtx def _pre_forward_module_hook(module, *args): self.pre_sub_module_forward_function(module) @instrument_w_nvtx def _post_forward_module_hook(module, input, output): global FWD_MODULE_STACK FWD_MODULE_STACK.pop() if output is None: output = [] elif not isinstance(output, (list, tuple)): if torch.is_tensor(output): output = [output] else: #print(f'got UNKNOWN type {type(output)}') outputs = [] output = output if isinstance(output, dict) else vars(output) for name, val in output.items(): if not name.startswith('__') and torch.is_tensor(val): outputs.append(val) output = outputs for item in filter(lambda item: is_zero_param(item) or hasattr(item, 'ds_param_alias'), output): key = id(item) if hasattr(item, 'ds_id') else id(item.ds_param_alias) actual_external_param = item if hasattr(item, 'ds_id') else item.ds_param_alias if not any(key in m._external_params for m in FWD_MODULE_STACK): actual_external_param.is_external_param = True module_to_register = FWD_MODULE_STACK[-1] register_external_parameter(module_to_register, actual_external_param) print_rank_0( f'Registering dangling parameter for module {module_to_register.__class__.__name__}, ds_id = {actual_external_param.ds_id}.', force=False) # It's possible that the parameter was already external to the completed module. If so, remove it the # registration as it will be covered by the outer module instead. if key in module._external_params: print_rank_0( f' Unregistering nested dangling parameter from module {module.__class__.__name__}, ds_id = {actual_external_param.ds_id}', force=False) unregister_external_parameter(module, actual_external_param) actual_external_param.all_gather() self.post_sub_module_forward_function(module) def _pre_backward_module_hook(module, inputs, output): @instrument_w_nvtx def _run_before_backward_function(sub_module): # some models (e.g. Albert) may run multiple forwards on the same layer in a loop # before doing backwards, so each backward will need a pre-fetch - using reference # counting to support this scenario #print(f"COUNTER before: {sub_module.applied_pre_backward_ref_cnt}") if sub_module.applied_pre_backward_ref_cnt > 0: self.pre_sub_module_backward_function(sub_module) sub_module.applied_pre_backward_ref_cnt -= 1 #print(f"COUNTER after: {sub_module.applied_pre_backward_ref_cnt}") return _apply_to_tensors_only(module, PreBackwardFunction, _run_before_backward_function, output) #This is an alternate to doing _post_backward_module_hook #it uses tensor.register_hook instead of using torch.autograd.Function def _alternate_post_backward_module_hook(module, inputs): module.ds_grads_remaining = 0 #print(f"Before Forward {module.__class__.__name__}") def _run_after_backward_hook(*unused): module.ds_grads_remaining = module.ds_grads_remaining - 1 if module.ds_grads_remaining == 0: #print(f"After backward {module.__class__.__name__}") self.post_sub_module_backward_function(module) def _run_before_forward_function(input): if input.requires_grad: module.ds_grads_remaining += 1 return _apply_forward_and_backward_to_tensors_only(module, _run_before_forward_function, _run_after_backward_hook, inputs) def _post_backward_module_hook(module, inputs): module.ds_grads_remaining = 0 @instrument_w_nvtx def _run_after_backward_function(sub_module): if sub_module.ds_grads_remaining == 0: self.post_sub_module_backward_function(sub_module) return _apply_to_tensors_only(module, PostBackwardFunction, _run_after_backward_function, inputs) # Pre forward hook self.forward_hooks.append(module.register_forward_pre_hook(_pre_forward_module_hook)) # Post forward hook self.forward_hooks.append(module.register_forward_hook(_post_forward_module_hook)) # Pre backward hook self.backward_hooks.append(module.register_forward_hook(_pre_backward_module_hook)) # post backward hook self.backward_hooks.append(module.register_forward_pre_hook(_post_backward_module_hook)) @torch.no_grad() def pre_sub_module_forward_function(self, sub_module): see_memory_usage(f"Before sub module function {sub_module.__class__.__name__}", force=False) global FWD_MODULE_STACK FWD_MODULE_STACK.append(sub_module) param_coordinator = self.get_param_coordinator(training=sub_module.training) param_coordinator.trace_prologue(sub_module) if param_coordinator.is_record_trace(): param_coordinator.record_module(sub_module) param_coordinator.fetch_sub_module(sub_module) see_memory_usage(f"Before sub module function {sub_module.__class__.__name__} after fetch", force=False) @torch.no_grad() def post_sub_module_forward_function(self, sub_module): see_memory_usage(f"After sub module function {sub_module.__class__.__name__} {sub_module.id} before release", force=False) param_coordinator = self.get_param_coordinator(training=sub_module.training) param_coordinator.release_sub_module(sub_module) see_memory_usage(f"After sub module function {sub_module.__class__.__name__} {sub_module.id} after release", force=False) @torch.no_grad() def pre_sub_module_backward_function(self, sub_module): assert sub_module.training, "backward pass is invalid for module in evaluation mode" param_coordinator = self.get_param_coordinator(training=True) param_coordinator.trace_prologue(sub_module) if param_coordinator.is_record_trace(): param_coordinator.record_module(sub_module) param_coordinator.fetch_sub_module(sub_module) @torch.no_grad() def post_sub_module_backward_function(self, sub_module): assert sub_module.training, "backward pass is invalid for module in evaluation mode" see_memory_usage( f"After sub module backward function {sub_module.__class__.__name__} {sub_module.id} before release", force=False) self.get_param_coordinator(training=True).release_sub_module(sub_module) see_memory_usage( f"After sub module backward function {sub_module.__class__.__name__} {sub_module.id} after release", force=False)
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import os from deepspeed import comm as dist from packaging import version as pkg_version from collections import OrderedDict from deepspeed.runtime import ZeROOptimizer from deepspeed.runtime.fp16.loss_scaler import CreateLossScaler from deepspeed.runtime.utils import (bwc_tensor_model_parallel_rank, get_global_norm, empty_cache, see_memory_usage, inf, is_model_parallel_parameter, align_dense_tensors, all_gather_dp_groups) from deepspeed.runtime.zero.config import ZeroStageEnum from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum from deepspeed.ops.adam import DeepSpeedCPUAdam from deepspeed.utils import logger from deepspeed.moe.utils import is_moe_param from deepspeed.git_version_info import version from deepspeed.runtime.constants import PIPE_REPLICATED from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import UtilsBuilder from deepspeed.checkpoint.constants import (DS_VERSION, GROUP_PADDINGS, PARTITION_COUNT, SINGLE_PARTITION_OF_FP32_GROUPS, BASE_OPTIMIZER_STATE, CLIP_GRAD, ZERO_STAGE, PARAM_SLICE_MAPPINGS) from deepspeed.utils import link_hp_params from deepspeed.checkpoint import enable_universal_checkpoint # Toggle this to true to enable correctness test # with gradient partitioning and without pg_correctness_test = False def input(msg): return def split_half_float_double(tensors): device_type = get_accelerator().device_name() dtypes = [ "torch.{}.HalfTensor".format(device_type), "torch.{}.FloatTensor".format(device_type), "torch.{}.DoubleTensor".format(device_type), "torch.{}.BFloat16Tensor".format(device_type) ] buckets = [] for i, dtype in enumerate(dtypes): bucket = [t for t in tensors if t.type() == dtype] if bucket: buckets.append(bucket) return buckets def isclose(a, b, rtol=1e-09, atol=0.0): return abs(a - b) <= max(rtol * max(abs(a), abs(b)), atol) def lcm(x, y): from fractions import gcd # or can import gcd from `math` in Python 3 return x * y // gcd(x, y) def get_alignment_padding(tensor_list, alignment): num_elements = sum([tensor.numel() for tensor in tensor_list]) remainder = num_elements % alignment return (alignment - remainder) if remainder else remainder def move_to_cpu(tensor_list): for tensor in tensor_list: tensor.data = tensor.data.cpu() def print_rank_msg(msg): print(f"rank {dist.get_rank()} - {msg}") def _get_padded_tensor(src_tensor, size): if src_tensor.numel() >= size: return src_tensor padded_tensor = torch.zeros(size, dtype=src_tensor.dtype, device=src_tensor.device) slice_tensor = torch.narrow(padded_tensor, 0, 0, src_tensor.numel()) slice_tensor.data.copy_(src_tensor.data) return padded_tensor class DeepSpeedZeroOptimizer(ZeROOptimizer): """ DeepSpeedZeroOptimizer designed to reduce the memory footprint required for training large deep learning models. For more details please see ZeRO: Memory Optimization Towards Training A Trillion Parameter Models https://arxiv.org/abs/1910.02054 For usage examples, refer to TODO: DeepSpeed Tutorial """ def __init__(self, init_optimizer, param_names, timers, static_loss_scale=1.0, dynamic_loss_scale=False, dynamic_loss_args=None, verbose=True, contiguous_gradients=True, reduce_bucket_size=500000000, allgather_bucket_size=5000000000, dp_process_group=None, expert_parallel_group=None, expert_data_parallel_group=None, reduce_scatter=True, overlap_comm=False, cpu_offload=False, mpu=None, clip_grad=0.0, communication_data_type=torch.float16, postscale_gradients=True, gradient_predivide_factor=1.0, gradient_accumulation_steps=1, ignore_unused_parameters=True, partition_grads=True, round_robin_gradients=False, has_moe_layers=False, fp16_master_weights_and_gradients=False, elastic_checkpoint=False): if dist.get_rank() == 0: logger.info(f"Reduce bucket size {reduce_bucket_size}") logger.info(f"Allgather bucket size {allgather_bucket_size}") logger.info(f"CPU Offload: {cpu_offload}") logger.info(f'Round robin gradient partitioning: {round_robin_gradients}') # The fused optimizer does all the work. We need this layer for two reason: # 1. maintain same user API from apex.fp16_utils # 2. keep common stuff here in case we need to add ne552w fused optimizer later self.elastic_checkpoint = elastic_checkpoint self.param_names = param_names self.mpu = mpu # differences from apex.fp16_utils: # - assume all model params in fp16 # - assume all params requires grad # - flat by groups, not keeping state. TODO: remove state explicitly? # - master grad and unflat master weight never exist. TODO: a way to save out unflat master? if not get_accelerator().is_available(): raise SystemError("Cannot use fp16 without accelerator.") self.optimizer = init_optimizer # Load pre-built or JIT compile (un)flatten ops util_ops = UtilsBuilder().load() self.flatten = util_ops.flatten self.unflatten = util_ops.unflatten # ZeRO stage 1 (False) or 2 (True) self.partition_gradients = partition_grads self.zero_stage_string = "ZeRO-2" if partition_grads else "ZeRO-1" self.timers = timers self.reduce_scatter = reduce_scatter self.overlap_comm = overlap_comm self.cpu_offload = cpu_offload self.deepspeed_adam_offload = cpu_offload self.device = get_accelerator().current_device_name() if not self.cpu_offload else 'cpu' self.dp_process_group = dp_process_group #expert parallel group self.ep_process_group = expert_parallel_group #data parallel group for experts self.expert_dp_process_group = expert_data_parallel_group #data parallel size for non-experts dp_size = dist.get_world_size(group=self.dp_process_group) #For MoE models this maybe different for different param group #It will be modified during MoE setup later in the init self.real_dp_process_group = [dp_process_group for i in range(len(self.optimizer.param_groups))] self.partition_count = [dp_size for i in range(len(self.optimizer.param_groups))] self.is_gradient_accumulation_boundary = True # CPU-Offload requires contiguous gradients self.contiguous_gradients = contiguous_gradients or cpu_offload self.has_moe_layers = has_moe_layers if self.has_moe_layers: self._configure_moe_settings() self._global_grad_norm = 0. if mpu is None: self.model_parallel_group = None self.model_parallel_world_size = 1 self.model_parallel_rank = 0 else: self.model_parallel_group = mpu.get_model_parallel_group() self.model_parallel_world_size = mpu.get_model_parallel_world_size() self.model_parallel_rank = bwc_tensor_model_parallel_rank(mpu) self.overflow = False self.clip_grad = clip_grad self.communication_data_type = communication_data_type self.gradient_predivide_factor = gradient_predivide_factor self.postscale_gradients = postscale_gradients self.gradient_accumulation_steps = gradient_accumulation_steps self.micro_step_id = 0 self.ignore_unused_parameters = ignore_unused_parameters self.round_robin_gradients = round_robin_gradients self.extra_large_param_to_reduce = None self.fp16_master_weights_and_gradients = fp16_master_weights_and_gradients if self.fp16_master_weights_and_gradients: assert self.cpu_offload and type(self.optimizer) in [DeepSpeedCPUAdam], \ f"fp16_master_and_gradients requires optimizer to support keeping fp16 master and gradients while keeping the optimizer states in fp32."\ f"Currently only supported using ZeRO-Offload with DeepSpeedCPUAdam. But current setting is ZeRO-Offload:{self.cpu_offload} and optimizer type {type(self.optimizer)}." \ f"Either disable fp16_master_weights_and_gradients or enable {self.zero_stage_string} Offload with DeepSpeedCPUAdam." if self.reduce_scatter: valid_reduce_scatter_dtypes = (torch.float16, torch.bfloat16, torch.float32) assert self.communication_data_type in valid_reduce_scatter_dtypes, f"{self.zero_stage_string} supports {valid_reduce_scatter_dtypes} communication_data_type with reduce scatter enabled. Got: '{self.communication_data_type}'" assert self.gradient_predivide_factor == 1.0, "gradient_predivide_factor != 1.0 is not yet supported with {self.zero_stage_string} with reduce scatter enabled" assert self.postscale_gradients, "pre-scale gradients is not yet supported with {self.zero_stage_string} with reduce scatter enabled" # param flattened by groups self.bit16_groups = [] self.bit16_groups_flat = [] # param partitioned by data parallel degree # this will contain a list of equal sized tensors # each of which will be updated by a different process self.parallel_partitioned_bit16_groups = [] # a single 32-bit partition of the parallel partitioned parameters # that this process will update self.single_partition_of_fp32_groups = [] # param partition info # These are the parameters in each group that will not be updated by this process directly self.params_not_in_partition = [] # These are the parameters that will be updated by this process directly self.params_in_partition = [] # Offset from the first parameter in the the self.params_in_partition # the parameter boundaries may not align with partition boundaries # so we need to keep track of the offset self.first_offset = [] # number of elements per partition in each group self.partition_size = [] # align nccl all-gather send buffers to 4-byte boundary self.nccl_start_alignment_factor = 2 # 4-byte alignment/sizeof(fp16) = 2 assert ( allgather_bucket_size % self.nccl_start_alignment_factor == 0 ), f"allgather_bucket_size must be a multiple of nccl_start_alignment_factor, {self.nccl_start_alignment_factor} " self.all_reduce_print = False self.dtype = self.optimizer.param_groups[0]['params'][0].dtype self.round_robin_bit16_groups = [] self.round_robin_bit16_indices = [] # Use different parallel to do all_to_all_reduce related things # padding on each partition for alignment purposes self.groups_padding = [] # loop to deal with groups for i, param_group in enumerate(self.optimizer.param_groups): partition_id = dist.get_rank(group=self.real_dp_process_group[i]) # push this group to list before modify # TODO: Explore simplification that avoids the extra book-keeping by pushing the reordered group trainable_parameters = [param for param in param_group['params'] if param.requires_grad] self.bit16_groups.append(trainable_parameters) # not sure why apex was cloning the weights before flattening # removing cloning here see_memory_usage(f"Before moving param group {i} to CPU") # move all the parameters to cpu to free up GPU space for creating flat buffer move_to_cpu(self.bit16_groups[i]) empty_cache() see_memory_usage(f"After moving param group {i} to CPU", force=False) # Reorder group parameters for load balancing of gradient partitioning during backward among ranks. # This ensures that gradients are reduced in a fashion such that ownership round robins among the ranks. # For example, rather than 3 gradients (g_n+2, g_n+1, g_n) that are reduced consecutively belonging # to the same rank, instead they will belong to 3 ranks (r_m+2, r_m+1, r_m). if self.round_robin_gradients: round_robin_tensors, round_robin_indices = self._round_robin_reorder( self.bit16_groups[i], dist.get_world_size(group=self.real_dp_process_group[i])) else: round_robin_tensors = self.bit16_groups[i] round_robin_indices = list(range(len(self.bit16_groups[i]))) self.round_robin_bit16_groups.append(round_robin_tensors) self.round_robin_bit16_indices.append(round_robin_indices) # create flat buffer in CPU and move to GPU self.bit16_groups_flat.append( self.flatten_dense_tensors_aligned( self.round_robin_bit16_groups[i], self.nccl_start_alignment_factor * dist.get_world_size(group=self.real_dp_process_group[i])).to( get_accelerator().current_device_name())) see_memory_usage(f"After flattening and moving param group {i} to GPU", force=False) # Record padding required for alignment if partition_id == dist.get_world_size(group=self.real_dp_process_group[i]) - 1: padding = self.bit16_groups_flat[i].numel() - sum( [t.numel() for t in self.round_robin_bit16_groups[i]]) else: padding = 0 self.groups_padding.append(padding) if dist.get_rank(group=self.real_dp_process_group[i]) == 0: see_memory_usage(f"After Flattening and after emptying param group {i} cache", force=False) # set model bit16 weight to slices of flattened buffer self._update_model_bit16_weights(i) # divide the flat weights into near equal partition equal to the data parallel degree # each process will compute on a different part of the partition data_parallel_partitions = self.get_data_parallel_partitions(self.bit16_groups_flat[i], i) self.parallel_partitioned_bit16_groups.append(data_parallel_partitions) # verify that data partition start locations are 4-byte aligned for partitioned_data in data_parallel_partitions: assert (partitioned_data.data_ptr() % (2 * self.nccl_start_alignment_factor) == 0) # A partition of the fp32 master weights that will be updated by this process. # Note that the params in single_partition_of_fp32_groups is cloned and detached # from the origin params of the model. if not fp16_master_weights_and_gradients: self.single_partition_of_fp32_groups.append(self.parallel_partitioned_bit16_groups[i][partition_id].to( self.device).clone().float().detach()) else: self.single_partition_of_fp32_groups.append(self.parallel_partitioned_bit16_groups[i][partition_id].to( self.device).clone().half().detach()) # Set local optimizer to have flat params of its own partition. # After this, the local optimizer will only contain its own partition of params. # In that case, the local optimizer only saves the states(momentum, variance, etc.) related to its partition's params(zero stage1). self.single_partition_of_fp32_groups[ i].requires_grad = True # keep this in case internal optimizer uses it param_group['params'] = [self.single_partition_of_fp32_groups[i]] partition_size = len(self.bit16_groups_flat[i]) / dist.get_world_size(group=self.real_dp_process_group[i]) params_in_partition, params_not_in_partition, first_offset = self.get_partition_info( self.round_robin_bit16_groups[i], partition_size, partition_id) self.partition_size.append(partition_size) self.params_in_partition.append(params_in_partition) self.params_not_in_partition.append(params_not_in_partition) self.first_offset.append(first_offset) for rank in range(dist.get_world_size()): if dist.get_rank() == rank: print( f"Rank: {rank} partition count {self.partition_count} and sizes{[(p.numel(), self.is_moe_param_group[i] if hasattr(self, 'is_moe_param_group') else False) for i,p in enumerate(self.single_partition_of_fp32_groups)]} " ) dist.barrier() self.reduce_bucket_size = int(reduce_bucket_size) self.allgather_bucket_size = int(allgather_bucket_size) self.reduction_event = get_accelerator().Event(enable_timing=False, blocking=False) self.reduction_stream = get_accelerator().Stream() self.cpu_computation_stream = get_accelerator().Stream() self.copy_grad_stream = get_accelerator().Stream() self.callback_queued = False self.param_dict = {} # map between param_id and bool to specify if a param is in this partition self.is_param_in_current_partition = {} self.grads_in_ipg_bucket = [] self.params_in_ipg_bucket = [] self.elements_in_ipg_bucket = 0 self.params_already_reduced = [] self._release_ipg_buffers() self.previous_reduced_grads = None self.ipg_bucket_has_moe_params = False # simplified param id self.param_id = {} #interesting code: unique ids being assigned to individual parameters largest_param_numel = 0 count = 0 for i, params_group in enumerate(self.bit16_groups): for param in params_group: unique_id = id(param) self.param_id[unique_id] = count self.param_dict[count] = param self.params_already_reduced.append(False) if param.numel() > largest_param_numel: largest_param_numel = param.numel() count = count + 1 for param_group in self.params_in_partition: for param in param_group: self.is_param_in_current_partition[self.get_param_id(param)] = True for param_group in self.params_not_in_partition: for param in param_group: self.is_param_in_current_partition[self.get_param_id(param)] = False if self.cpu_offload: self.accumulated_grads_in_cpu = {} self.norm_for_param_grads = {} self.local_overflow = False self.grad_position = {} self.temp_grad_buffer_for_cpu_offload = get_accelerator().pin_memory( torch.zeros(largest_param_numel, device=self.device, dtype=self.dtype)) self.temp_grad_buffer_for_gpu_offload = torch.zeros(largest_param_numel, device=get_accelerator().current_device_name(), dtype=self.dtype) for i, params_group in enumerate(self.bit16_groups): self.get_grad_position(i, self.params_in_partition[i], self.first_offset[i], self.partition_size[i]) # mapping from parameter to partition that it belongs to self.param_to_partition_ids = {} # stores if a partition has been reduced in this step self.is_partition_reduced = {} # number of grads in partition that still need to be computed self.remaining_grads_in_partition = {} # total number of grads in partition self.total_grads_in_partition = {} # stores if a grad in a partition has been computed or not self.is_grad_computed = {} # stores the offset at which a parameter gradient needs to be inserted in a partition self.grad_partition_insertion_offset = {} # the offset in the gradient at which it must be inserted at the beginning of the partition self.grad_start_offset = {} # will store the averaged gradients required by this partition self.averaged_gradients = {} # For cpu_offload, will store the averaged gradients required by this partition self.offload_gradient_dict = {} # store index of first parameter in each partition self.first_param_index_in_partition = {} # initializes all data structures for implementing gradient partitioning self.initialize_gradient_partitioning_data_structures() # resets the data structure value for the next backward propagation self.reset_partition_gradient_structures() # creates backward hooks for gradient partitioning if self.partition_gradients or self.overlap_comm: self.create_reduce_and_remove_grad_hooks() self.custom_loss_scaler = False self.external_loss_scale = None # we may have a way of fusing dynamic scale. Do not support for now self.loss_scaler = CreateLossScaler(dtype=self.dtype, static_loss_scale=static_loss_scale, dynamic_scaling=dynamic_loss_scale, dynamic_loss_args=dynamic_loss_args) self.dynamic_loss_scale = self.loss_scaler.dynamic see_memory_usage("Before initializing optimizer states", force=True) self.initialize_optimizer_states() see_memory_usage("After initializing optimizer states", force=True) if dist.get_rank() == 0: logger.info(f"optimizer state initialized") if dist.get_rank(group=self.dp_process_group) == 0: see_memory_usage(f"After initializing ZeRO optimizer", force=True) self._link_all_hp_params() self._enable_universal_checkpoint() self._param_slice_mappings = self._create_param_mapping() def _enable_universal_checkpoint(self): for lp_param_group in self.bit16_groups: enable_universal_checkpoint(param_list=lp_param_group) def _create_param_mapping(self): param_mapping = [] for i, _ in enumerate(self.optimizer.param_groups): param_mapping_per_group = OrderedDict() for lp in self.bit16_groups[i]: if lp._hp_mapping is not None: lp_name = self.param_names[lp] param_mapping_per_group[lp_name] = lp._hp_mapping.get_hp_fragment_address() param_mapping.append(param_mapping_per_group) return param_mapping def _link_all_hp_params(self): dp_world_size = dist.get_world_size(group=self.dp_process_group) if self.cpu_offload: self._get_offload_gradient_dict() for i, _ in enumerate(self.optimizer.param_groups): # Link bit16 and fp32 params in partition partition_id = dist.get_rank(group=self.real_dp_process_group[i]) partition_size = self.bit16_groups_flat[i].numel() // dp_world_size flat_hp_partition = self.single_partition_of_fp32_groups[i] link_hp_params(lp_param_list=self.bit16_groups[i], flat_hp_partition=flat_hp_partition, gradient_dict=self.averaged_gradients, offload_gradient_dict=self.offload_gradient_dict, use_offload=self.cpu_offload, param_group_index=i, partition_start=partition_id * partition_size, partition_size=partition_size, partition_optimizer_state=self.optimizer.state[flat_hp_partition], dp_group=self.real_dp_process_group[i]) def is_moe_group(self, group): return 'moe' in group and group['moe'] def _configure_moe_settings(self): # if we're using ZeRO stage 2, ensure contiguous gradients are used if self.partition_gradients: assert self.contiguous_gradients, "Contiguous Gradients in ZeRO Stage 2 must be set to True for MoE. Other code paths are not tested with MoE" # NOTE: To run ZeRO stage 1 with MoE, we need to set self.contiguous_gradients to True or ignore the assertion if not self.partition_gradients and not self.contiguous_gradients: logger.warn( "ZeRO Stage 1 has not been thoroughly tested with MoE. This configuration is still experimental.") assert self.reduce_scatter, "Reduce Scatter in ZeRO Stage 2 must be set to True for MoE. Other code paths are not tested with MoE" assert any( [self.is_moe_group(group) for group in self.optimizer.param_groups] ), "The model has moe layers, but None of the param groups are marked as MoE. Create a param group with 'moe' key set to True before creating optimizer" self.is_moe_param_group = [] for i, group in enumerate(self.optimizer.param_groups): if self.is_moe_group(group): assert all([is_moe_param(param) for param in group['params']]), "All params in MoE group must be MoE params" self.real_dp_process_group[i] = self.expert_dp_process_group[group['name']] self.partition_count[i] = dist.get_world_size(group=self.expert_dp_process_group[group['name']]) self.is_moe_param_group.append(True) else: self.is_moe_param_group.append(False) assert self.expert_dp_process_group is not None, "Expert data parallel group should be configured with MoE" assert self.ep_process_group is not None, "Expert parallel group should be configured with MoE" def _update_model_bit16_weights(self, group_index): updated_params = self.unflatten(self.bit16_groups_flat[group_index], self.round_robin_bit16_groups[group_index]) for p, q in zip(self.round_robin_bit16_groups[group_index], updated_params): p.data = q.data # set model fp16 weight to slices of reordered flattened buffer for param_index, param in enumerate(self.bit16_groups[group_index]): new_index = self.round_robin_bit16_indices[group_index][param_index] param.data = self.round_robin_bit16_groups[group_index][new_index].data def _round_robin_reorder(self, tensor_list, num_partitions): # disable round robin if need to debug something # return tensor_list, list(range(len(tensor_list))) partition_tensors = {} for i, tensor in enumerate(tensor_list): j = i % num_partitions if not j in partition_tensors: partition_tensors[j] = [] partition_tensors[j].append((i, tensor)) reordered_tensors = [] reordered_indices = {} for partition_index in partition_tensors.keys(): for i, (original_index, tensor) in enumerate(partition_tensors[partition_index]): reordered_indices[original_index] = len(reordered_tensors) reordered_tensors.append(tensor) return reordered_tensors, reordered_indices def _release_ipg_buffers(self): if self.contiguous_gradients: self.ipg_buffer = None self.grads_in_partition = None self.grads_in_partition_offset = 0 def initialize_optimizer_states(self): for i, group in enumerate(self.bit16_groups): single_grad_partition = torch.zeros(int(self.partition_size[i]), dtype=self.single_partition_of_fp32_groups[i].dtype, device=self.device) self.single_partition_of_fp32_groups[i].grad = get_accelerator().pin_memory( single_grad_partition) if self.cpu_offload else single_grad_partition # Initialize the optimizer states with the flattended fp32 partition. # State initialization for the Adagrad optimizer occurs at construction as opposed to other optimizers # which do lazy initialization of the state at the first call to step. if isinstance(self.optimizer, torch.optim.Adagrad): self.optimizer = torch.optim.Adagrad(self.single_partition_of_fp32_groups, **self.optimizer.defaults) else: self.optimizer.step() if not self.cpu_offload: for group in self.single_partition_of_fp32_groups: group.grad = None #class init return ######################################################################### #################### ZeRO Stage 1 - reduce gradients #################### ######################################################################### def reduce_gradients(self, pipeline_parallel=False): world_size = dist.get_world_size(self.dp_process_group) my_rank = dist.get_rank(self.dp_process_group) # with PP we must create ipg buffer, since backward is handled outside zero if pipeline_parallel and self.contiguous_gradients: self.ipg_buffer = [] buf_0 = torch.empty(int(self.reduce_bucket_size), dtype=self.dtype, device=get_accelerator().current_device_name()) self.ipg_buffer.append(buf_0) self.ipg_index = 0 if not self.overlap_comm: for i, group in enumerate(self.bit16_groups): for param in group: if param.grad is not None: self.reduce_ready_partitions_and_remove_grads(param, i) # reduce any pending grads in either hook/non-hook case self.overlapping_partition_gradients_reduce_epilogue() ######################################################################### #########################ZeRO Partition Gradients######################## ######################################################################### def get_first_param_index(self, group_id, param_group, partition_id): for index, param in enumerate(param_group): param_id = self.get_param_id(param) if partition_id in self.param_to_partition_ids[group_id][param_id]: return index return None def initialize_gradient_partitioning_data_structures(self): for i, param_group in enumerate(self.round_robin_bit16_groups): total_partitions = dist.get_world_size(group=self.real_dp_process_group[i]) self.param_to_partition_ids[i] = {} self.is_partition_reduced[i] = {} self.total_grads_in_partition[i] = {} self.remaining_grads_in_partition[i] = {} self.is_grad_computed[i] = {} self.grad_partition_insertion_offset[i] = {} self.grad_start_offset[i] = {} self.first_param_index_in_partition[i] = {} for partition_id in range(total_partitions): self.is_grad_computed[i][partition_id] = {} self.grad_partition_insertion_offset[i][partition_id] = {} self.grad_start_offset[i][partition_id] = {} self.total_grads_in_partition[i][partition_id] = 0 self.initialize_gradient_partition(i, param_group, partition_id) self.is_partition_reduced[i][partition_id] = False self.first_param_index_in_partition[i][partition_id] = self.get_first_param_index( i, param_group, partition_id) def independent_gradient_partition_epilogue(self): self.report_ipg_memory_usage(f"In ipg_epilogue before reduce_ipg_grads", 0) self.reduce_ipg_grads() self.report_ipg_memory_usage(f"In ipg_epilogue after reduce_ipg_grads", 0) # if dist.get_rank() == 0: # logger.info("Params already reduced %s", self.params_already_reduced) for i in range(len(self.params_already_reduced)): self.params_already_reduced[i] = False if self.overlap_comm: get_accelerator().synchronize() # It is safe to clear previously reduced grads of other partitions self._clear_previous_reduced_grads() if self.cpu_offload is False: for i, _ in enumerate(self.bit16_groups): if not i in self.averaged_gradients or self.averaged_gradients[i] is None: self.averaged_gradients[i] = self.get_flat_partition( self.params_in_partition[i], self.first_offset[i], self.partition_size[i], dtype=self.dtype, device=get_accelerator().current_device_name(), return_tensor_list=True) else: avg_new = self.get_flat_partition(self.params_in_partition[i], self.first_offset[i], self.partition_size[i], dtype=self.dtype, device=get_accelerator().current_device_name(), return_tensor_list=True) for accumulated_grad, new_avg_grad in zip(self.averaged_gradients[i], avg_new): accumulated_grad.add_(new_avg_grad) self._release_ipg_buffers() # No need to keep the gradients anymore. # All gradients required by the step # are in self.averaged_gradients self.zero_grad(set_to_none=True) see_memory_usage(f"End ipg_epilogue") # resets all partition to no reduced # sets remaining grads to the total number of grads in each partition # set is grad computed to false for all grads in partition def reset_partition_gradient_structures(self): for i, _ in enumerate(self.bit16_groups): total_partitions = dist.get_world_size(group=self.real_dp_process_group[i]) for partition_id in range(total_partitions): self.is_partition_reduced[i][partition_id] = False self.remaining_grads_in_partition[i][partition_id] = self.total_grads_in_partition[i][partition_id] for param_id in self.is_grad_computed[i][partition_id]: self.is_grad_computed[i][partition_id][param_id] = False def initialize_gradient_partition(self, i, param_group, partition_id): def set_key_value_list(dictionary, key, value): if key in dictionary: dictionary[key].append(value) else: dictionary[key] = [value] def increment_value(dictionary, key): if key in dictionary: dictionary[key] += 1 else: dictionary[key] = 1 partition_size = self.partition_size[i] start_index = partition_size * partition_id end_index = partition_size * (partition_id + 1) current_index = 0 first_offset = 0 for param in param_group: param_size = param.numel() param_id = self.get_param_id(param) if (current_index >= start_index and current_index < end_index): set_key_value_list(self.param_to_partition_ids[i], param_id, partition_id) increment_value(self.total_grads_in_partition[i], partition_id) self.is_grad_computed[i][partition_id][param_id] = False self.grad_partition_insertion_offset[i][partition_id][param_id] = current_index - start_index self.grad_start_offset[i][partition_id][param_id] = 0 elif start_index > current_index and start_index < (current_index + param_size): assert (first_offset == 0 ), "This can happen either zero or only once as this must be the first tensor in the partition" first_offset = start_index - current_index set_key_value_list(self.param_to_partition_ids[i], param_id, partition_id) increment_value(self.total_grads_in_partition[i], partition_id) self.is_grad_computed[i][partition_id][param_id] = False self.grad_partition_insertion_offset[i][partition_id][param_id] = 0 self.grad_start_offset[i][partition_id][param_id] = first_offset current_index = current_index + param_size def overlapping_partition_gradients_reduce_epilogue(self): self.independent_gradient_partition_epilogue() def create_reduce_and_remove_grad_hooks(self): self.grad_accs = [] for i, param_group in enumerate(self.bit16_groups): for param in param_group: if param.requires_grad: def wrapper(param, i): param_tmp = param.expand_as(param) grad_acc = param_tmp.grad_fn.next_functions[0][0] def reduce_partition_and_remove_grads(*notneeded): self.reduce_ready_partitions_and_remove_grads(param, i) grad_acc.register_hook(reduce_partition_and_remove_grads) self.grad_accs.append(grad_acc) wrapper(param, i) def get_param_id(self, param): unique_id = id(param) return self.param_id[unique_id] def report_ipg_memory_usage(self, tag, param_elems): elem_count = self.elements_in_ipg_bucket + param_elems percent_of_bucket_size = (100.0 * elem_count) // self.reduce_bucket_size see_memory_usage( f"{tag}: elems in_bucket {self.elements_in_ipg_bucket} param {param_elems} max_percent {percent_of_bucket_size}" ) # create a flat tensor aligned at the alignment boundary def flatten_dense_tensors_aligned(self, tensor_list, alignment): return self.flatten(align_dense_tensors(tensor_list, alignment)) ############### Independent Partition Gradient ######################## def reduce_independent_p_g_buckets_and_remove_grads(self, param, i): if self.elements_in_ipg_bucket + param.numel() > self.reduce_bucket_size: self.report_ipg_memory_usage("In ipg_remove_grads before reduce_ipg_grads", param.numel()) self.reduce_ipg_grads() if self.contiguous_gradients and self.overlap_comm: # Swap ipg_index between 0 and 1 self.ipg_index = 1 - self.ipg_index self.report_ipg_memory_usage("In ipg_remove_grads after reduce_ipg_grads", param.numel()) param_id = self.get_param_id(param) assert self.params_already_reduced[param_id] == False, \ f"The parameter {param_id} has already been reduced. \ Gradient computed twice for this partition. \ Multiple gradient reduction is currently not supported" if self.contiguous_gradients: if param.numel() > self.reduce_bucket_size: self.extra_large_param_to_reduce = param else: # keeping the gradients contiguous to prevent memory fragmentation, and avoid flattening new_grad_tensor = self.ipg_buffer[self.ipg_index].narrow(0, self.elements_in_ipg_bucket, param.numel()) new_grad_tensor.copy_(param.grad.view(-1)) param.grad.data = new_grad_tensor.data.view_as(param.grad) self.elements_in_ipg_bucket += param.numel() assert param.grad is not None, f"rank {dist.get_rank()} - Invalid to reduce Param {param_id} with None gradient" self.grads_in_ipg_bucket.append(param.grad) self.params_in_ipg_bucket.append((i, param, param_id)) #make sure the average tensor function knows how to average the gradients if is_moe_param(param): self.ipg_bucket_has_moe_params = True self.report_ipg_memory_usage("End ipg_remove_grads", 0) def print_rank_0(self, message): if dist.get_rank() == 0: logger.info(message) def gradient_reduction_w_predivide(self, tensor): dp_world_size = dist.get_world_size(group=self.dp_process_group) tensor_to_allreduce = tensor if self.communication_data_type != tensor.dtype: tensor_to_allreduce = tensor.to(self.communication_data_type) if self.postscale_gradients: if self.gradient_predivide_factor != 1.0: tensor_to_allreduce.mul_(1. / self.gradient_predivide_factor) dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group) if self.gradient_predivide_factor != dp_world_size: tensor_to_allreduce.mul_(self.gradient_predivide_factor / dp_world_size) else: tensor_to_allreduce.div_(dp_world_size) dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group) if self.communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce: tensor.copy_(tensor_to_allreduce) return tensor def average_tensor(self, tensor): if self.overlap_comm: stream = self.reduction_stream stream.wait_stream(get_accelerator().current_stream()) else: stream = get_accelerator().current_stream() with get_accelerator().stream(stream): if not self.reduce_scatter: self.gradient_reduction_w_predivide(tensor) return # Accumulate destination ranks and bucket offsets for each gradient slice. # Note: potential future optimization, record access pattern of parameters # in backward pass and partition gradients w.r.t. access pattern so that our # bucket is guaranteed to be contiguous w.r.t. ranks rank_and_offsets = [] real_dp_process_group = [] curr_size = 0 prev_id = -1 process_group = self.dp_process_group # count = 0 for i, param, param_id in self.params_in_ipg_bucket: process_group = self.dp_process_group #Averages gradients at parameter level if ipg has a moe param #Otherwise averaging is done at the entire buffer level at the end of the loop # MoE param have different groups if self.ipg_bucket_has_moe_params: process_group = self.expert_dp_process_group[param.group_name] if is_moe_param( param) else self.dp_process_group param.grad.data.div_(dist.get_world_size(group=process_group)) partition_ids = self.param_to_partition_ids[i][param_id] assert all([p_id < dist.get_world_size(group=process_group) for p_id in partition_ids ]), f"world size {dist.get_world_size(group=process_group)} and p_ids: {partition_ids}" partition_size = self.partition_size[i] # Get all partition ids + their offsets partition_ids_w_offsets = [] for partition_id in partition_ids: offset = self.grad_start_offset[i][partition_id][param_id] partition_ids_w_offsets.append((partition_id, offset)) partition_ids_w_offsets.sort(key=lambda t: t[1]) # Calculate rank and offsets for grad slices for idx in range(len(partition_ids_w_offsets)): partition_id, offset = partition_ids_w_offsets[idx] # if dist.get_rank() == 0 and count < 100: # print(f"Rank {dist.get_rank()} rank offset id {idx} calculated dp size {dist.get_world_size(group=process_group)} real dp size {dist.get_world_size(self.real_dp_process_group[i])} and dst: {partition_id}") # count += 1 # Calculate numel for grad slice depending on partition location if idx == len(partition_ids_w_offsets) - 1: # Last partition_id uses its own offset numel = param.numel() - offset else: # Set numel to next partition's offset numel = partition_ids_w_offsets[idx + 1][1] - offset # Merge bucket ranges if they belong to the same rank if partition_id == prev_id: prev_pid, prev_size, prev_numel = rank_and_offsets[-1] rank_and_offsets[-1] = (prev_pid, prev_size, prev_numel + numel) else: rank_and_offsets.append((partition_id, curr_size, numel)) real_dp_process_group.append(process_group) curr_size += numel prev_id = partition_id if not self.ipg_bucket_has_moe_params: tensor.div_(dist.get_world_size(group=self.dp_process_group)) tensor_to_reduce = tensor if self.communication_data_type != tensor.dtype: tensor_to_reduce = tensor.to(self.communication_data_type) async_handles = [] for i, (dst, bucket_offset, numel) in enumerate(rank_and_offsets): grad_slice = tensor_to_reduce.narrow(0, int(bucket_offset), int(numel)) # if dist.get_rank() == 0: # print(f"Rank {dist.get_rank()} rank offset id {i} real dp size {dist.get_world_size(group=real_dp_process_group[i])} and dst: {dst}") # dist.barrier() #dist.barrier() dst_rank = dist.get_global_rank(real_dp_process_group[i], dst) async_handle = dist.reduce(grad_slice, dst=dst_rank, group=real_dp_process_group[i], async_op=True) async_handles.append(async_handle) for handle in async_handles: handle.wait() if self.communication_data_type != tensor.dtype: tensor.copy_(tensor_to_reduce) ############################################################################## ############################# CPU Offload Methods############################# ############################################################################## def get_grad_position(self, group_id, tensor_list, first_offset, partition_size): current_offset = 0 for i, tensor in enumerate(tensor_list): param_id = self.get_param_id(tensor) param_start_offset = 0 num_elements = tensor.numel() # we need to offset to get to the right element if i == 0 and first_offset > 0: tensor_offset = first_offset num_elements = num_elements - tensor_offset param_start_offset = first_offset # we dont need all elements of the tensor if num_elements > (partition_size - current_offset): num_elements = partition_size - current_offset self.grad_position[param_id] = [ int(group_id), int(param_start_offset), int(current_offset), int(num_elements) ] current_offset += num_elements def update_overflow_tracker_for_param_grad(self, param): if param.grad is not None and self._has_inf_or_nan(param.grad.data): self.local_overflow = True def _get_offload_gradient_dict(self): for param_group_index, _ in enumerate(self.optimizer.param_groups): self.offload_gradient_dict[param_group_index] = [] for lp_param in self.params_in_partition[param_group_index]: param_id = self.get_param_id(lp_param) [_, _, dest_offset, num_elements] = self.grad_position[param_id] dest_tensor = self.single_partition_of_fp32_groups[param_group_index].grad.view(-1).narrow( 0, dest_offset, num_elements) self.offload_gradient_dict[param_group_index].append(dest_tensor) def async_accumulate_grad_in_cpu_via_gpu(self, param): param_id = self.get_param_id(param) [i, source_offset, dest_offset, num_elements] = self.grad_position[param_id] # copy to a preexisiting buffer to avoid memory allocation penalty dest_buffer = self.temp_grad_buffer_for_gpu_offload.view(-1).narrow(0, 0, param.numel()) #buffer for storing gradients for this parameter in CPU def buffer_to_accumulate_to_in_cpu(): if not self.fp16_master_weights_and_gradients: return get_accelerator().pin_memory(torch.zeros(param.numel(), dtype=param.dtype, device=self.device)) else: return self.single_partition_of_fp32_groups[i].grad.view(-1).narrow(0, dest_offset, num_elements) #accumulate gradients into param.grad or parts of it that belongs to this partition def accumulate_gradients(): if not self.fp16_master_weights_and_gradients: dest_buffer.copy_(self.accumulated_grads_in_cpu[param_id].view(-1), non_blocking=True) param.grad.data.view(-1).add_(dest_buffer) else: dest_buffer.narrow(0, source_offset, num_elements).copy_(self.accumulated_grads_in_cpu[param_id].view(-1), non_blocking=True) param.grad.data.view(-1).narrow(0, source_offset, num_elements).add_(dest_buffer.narrow(0, source_offset, num_elements)) #move accumulated gradients back to CPU def copy_gradients_to_cpu(): if not self.fp16_master_weights_and_gradients: self.accumulated_grads_in_cpu[param_id].data.copy_(param.grad.data.view(-1), non_blocking=True) else: self.accumulated_grads_in_cpu[param_id].data.copy_(param.grad.data.view(-1).narrow( 0, source_offset, num_elements), non_blocking=True) if param_id not in self.accumulated_grads_in_cpu: self.accumulated_grads_in_cpu[param_id] = buffer_to_accumulate_to_in_cpu() if self.micro_step_id > 0: accumulate_gradients() # at the boundary we will send 32bit directly if not self.is_gradient_accumulation_boundary: copy_gradients_to_cpu() def set_norm_for_param_grad(self, param): param_id = self.get_param_id(param) accumulated_grad = self.accumulated_grads_in_cpu[ param_id] if self.gradient_accumulation_steps > 1 else param.grad [i, source_offset, dest_offset, num_elements] = self.grad_position[param_id] start = source_offset accumulated_grad = accumulated_grad.view(-1).narrow(0, start, num_elements) self.norm_for_param_grads[param_id] = accumulated_grad.data.double().norm(2) def set_norm_for_param_grad_in_gpu(self, param): param_id = self.get_param_id(param) accumulated_grad = param.grad [i, source_offset, dest_offset, num_elements] = self.grad_position[param_id] start = source_offset accumulated_grad = accumulated_grad.view(-1).narrow(0, start, num_elements) self.norm_for_param_grads[param_id] = accumulated_grad.data.double().norm(2) def async_inplace_copy_grad_to_fp32_buffer_from_gpu(self, param): param_id = self.get_param_id(param) [i, source_offset, dest_offset, num_elements] = self.grad_position[param_id] dest_tensor = self.single_partition_of_fp32_groups[i].grad.view(-1).narrow(0, dest_offset, num_elements) src_tensor = param.grad.view(-1).narrow(0, source_offset, num_elements) if not self.fp16_master_weights_and_gradients: src_tensor = src_tensor.float() dest_tensor.copy_(src_tensor, non_blocking=True) param.grad = None #offload only def complete_grad_norm_calculation_for_cpu_offload(self, params): total_norm = 0.0 norm_type = 2.0 for p in params: # Pipeline parallelism may replicate parameters. Avoid multi-counting. if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated: continue if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0): param_id = self.get_param_id(p) # as some model have trainable parameters but skipped in training, # their backward hooks in self.create_reduce_and_remove_grad_hooks() will not run, # so they have no norm_for_param_grads if param_id in self.norm_for_param_grads: param_norm = self.norm_for_param_grads[param_id] total_norm += param_norm.item()**2 else: # As unused parameters in modules may not be expected sometimes, # add an explicit error msg when it occurred and an option to # avoid the error assert self.ignore_unused_parameters, """ This assert indicates that your module has parameters that were not used in producing loss. You can avoid this assert by (1) enable ignore_unused_parameters option in zero_optimization config; (2) making sure all trainable parameters and `forward` function outputs participate in calculating loss. """ # Sum across all model parallel GPUs. total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=self.dp_process_group) self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM) total_norm = total_norm_cuda[0].item()**(1. / norm_type) if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm: total_norm = -1 return total_norm ############################################################################################ def copy_grads_in_partition(self, param): if self.cpu_offload: if self.gradient_accumulation_steps > 1: self.async_accumulate_grad_in_cpu_via_gpu(param) if self.is_gradient_accumulation_boundary: self.set_norm_for_param_grad_in_gpu(param) self.update_overflow_tracker_for_param_grad(param) self.async_inplace_copy_grad_to_fp32_buffer_from_gpu(param) return #print(f"ID {self.get_param_id(param)} grad norm {param.grad.norm()}") if self.grads_in_partition is None: self.grads_in_partition_offset = 0 total_size = 0 for group in self.params_in_partition: for param_in_partition in group: total_size += param_in_partition.numel() see_memory_usage(f"before copying {total_size} gradients into partition") self.grads_in_partition = torch.empty(int(total_size), dtype=self.dtype, device=get_accelerator().current_device_name()) see_memory_usage(f"after copying {total_size} gradients into partition") # The allreduce buffer will be rewritten. Copy the gradients in partition to a new buffer new_grad_tensor = self.grads_in_partition.view(-1).narrow(0, self.grads_in_partition_offset, param.numel()) new_grad_tensor.copy_(param.grad.view(-1)) param.grad.data = new_grad_tensor.data.view_as(param.grad) #print(f"Grad norm after copy to contiguous_buffer {param.grad.data.norm()}") self.grads_in_partition_offset += param.numel() def reduce_ipg_grads(self): if self.contiguous_gradients: if self.extra_large_param_to_reduce is not None: assert len(self.params_in_ipg_bucket) == 1, "more than 1 param in ipg bucket, this shouldn't happen" _, _, param_id = self.params_in_ipg_bucket[0] assert self.get_param_id(self.extra_large_param_to_reduce ) == param_id, "param in ipg bucket does not match extra-large param" self.average_tensor(self.extra_large_param_to_reduce.grad.view(-1)) self.extra_large_param_to_reduce = None else: self.average_tensor(self.ipg_buffer[self.ipg_index]) else: self.buffered_reduce_fallback(None, self.grads_in_ipg_bucket, elements_per_buffer=self.elements_in_ipg_bucket) if self.overlap_comm: stream = self.reduction_stream elif self.cpu_offload: # TODO: copy_grad_stream is disabled because of race with reduce. This hurts perf and should be fixed. # get_accelerator().synchronize() # stream = self.copy_grad_stream stream = get_accelerator().current_stream() else: stream = get_accelerator().current_stream() with get_accelerator().stream(stream): for _, param, param_id in self.params_in_ipg_bucket: assert self.params_already_reduced[param_id] == False, \ f"The parameter {param_id} has already been reduced. \ Gradient computed twice for this partition. \ Multiple gradient reduction is currently not supported" self.params_already_reduced[param_id] = True if self.partition_gradients: if not self.is_param_in_current_partition[param_id]: if self.overlap_comm and self.contiguous_gradients is False: # Clear grads of other partitions during the next reduction # to avoid clearing them before the reduction is complete. if self.previous_reduced_grads is None: self.previous_reduced_grads = [] self.previous_reduced_grads.append(param) else: param.grad = None #only if self.partition_gradients elif self.contiguous_gradients: self.copy_grads_in_partition(param) else: # zero stage 1 - partition only optimizer state if self.contiguous_gradients and self.is_param_in_current_partition[param_id]: self.copy_grads_in_partition(param) self.grads_in_ipg_bucket = [] self.params_in_ipg_bucket = [] self.ipg_bucket_has_moe_params = False self.elements_in_ipg_bucket = 0 ##################################################################### def reduce_ready_partitions_and_remove_grads(self, param, i): if self.partition_gradients or self.is_gradient_accumulation_boundary: self.reduce_independent_p_g_buckets_and_remove_grads(param, i) def zero_reduced_gradients(self, partition_id, i): def are_all_related_partitions_reduced(params_id): for partition_id in self.param_to_partition_ids[i][params_id]: if not self.is_partition_reduced[i][partition_id]: return False return True for params_id in self.is_grad_computed[i][partition_id]: if are_all_related_partitions_reduced(params_id): self.param_dict[params_id].grad = None # dead code def flatten_and_print(self, message, tensors, start=0, n=5): flatten_tensor = self.flatten(tensors) def print_func(): logger.info(flatten_tensor.contiguous().view(-1).narrow(0, start, n)) self.sequential_execution(print_func, message) def get_grads_to_reduce(self, i, partition_id): def get_reducible_portion(key): grad = self.param_dict[key].grad total_elements = grad.numel() start = self.grad_start_offset[i][partition_id][key] num_elements = min(total_elements - start, self.partition_size[i] - self.grad_partition_insertion_offset[i][partition_id][key]) if not pg_correctness_test: if num_elements == total_elements: return grad else: return grad.contiguous().view(-1).narrow(0, int(start), int(num_elements)) else: if num_elements == total_elements: return grad.clone() else: return grad.clone().contiguous().view(-1).narrow(0, int(start), int(num_elements)) grads_to_reduce = [] for key in self.is_grad_computed[i][partition_id]: grad = get_reducible_portion(key) grads_to_reduce.append(grad) return grads_to_reduce def sequential_execution(self, function, message, group=None): if group is None: group = self.dp_process_group if dist.get_rank(group=group) == 0: logger.info(message) for id in range(dist.get_world_size(group=group)): if id == dist.get_rank(group=group): function() dist.barrier(group=group) def set_none_gradients_to_zero(self, i, partition_id): for param_id in self.is_grad_computed[i][partition_id]: param = self.param_dict[param_id] if param.grad is None: param.grad = torch.zero_like(param) ######################Reduction Related Methods############################## def allreduce_bucket(self, bucket, rank=None, log=None): rank = None tensor = self.flatten(bucket) tensor_to_allreduce = tensor if pg_correctness_test: communication_data_type = torch.float32 else: communication_data_type = self.communication_data_type if communication_data_type != tensor.dtype: tensor_to_allreduce = tensor.to(communication_data_type) tensor_to_allreduce.div_(dist.get_world_size(group=self.dp_process_group)) if rank is None: # "All Reducing" dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group) else: global_rank = dist.get_global_rank(self.dp_process_group, rank) dist.reduce(tensor_to_allreduce, global_rank, group=self.dp_process_group) if communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce: if rank is None or rank == dist.get_rank(group=self.dp_process_group): tensor.copy_(tensor_to_allreduce) return tensor def _clear_previous_reduced_grads(self): if self.previous_reduced_grads is not None: for param in self.previous_reduced_grads: param.grad = None # overlap enabled self.previous_reduced_grads = None # if rank is specified do a reduction instead of an allreduce def allreduce_and_copy(self, small_bucket, rank=None, log=None): if self.overlap_comm: get_accelerator().synchronize() # It is safe to clear the previously reduced grads of other partitions self._clear_previous_reduced_grads() stream = self.reduction_stream else: stream = get_accelerator().current_stream() with get_accelerator().stream(stream): allreduced = self.allreduce_bucket(small_bucket, rank=rank, log=log) if rank is None or rank == dist.get_rank(group=self.dp_process_group): for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)): buf.copy_(synced) def allreduce_no_retain(self, bucket, numel_per_bucket=500000000, rank=None, log=None): small_bucket = [] numel = 0 for tensor in bucket: small_bucket.append(tensor) numel = numel + tensor.numel() if numel > numel_per_bucket: self.allreduce_and_copy(small_bucket, rank=rank, log=None) small_bucket = [] if len(small_bucket) > 0: self.allreduce_and_copy(small_bucket, rank=rank, log=log) # allows using reduction of gradients instead of using all_reduce def buffered_reduce_fallback(self, rank, grads, elements_per_buffer=500000000, log=None): split_buckets = split_half_float_double(grads) for i, bucket in enumerate(split_buckets): self.allreduce_no_retain(bucket, numel_per_bucket=elements_per_buffer, rank=rank, log=log) ############################################################################# ############################################################################# ############################################################################# # views the tensor as multiple partitions and returns # those partitions def get_data_parallel_partitions(self, tensor, group_id): partitions = [] dp = dist.get_world_size(group=self.real_dp_process_group[group_id]) # dp_id = dist.get_rank(group=self.real_dp_process_group[group_id]) total_num_elements = tensor.numel() base_size = total_num_elements // dp remaining = total_num_elements % dp start = 0 for id in range(dp): partition_size = base_size if id < remaining: partition_size = partition_size + 1 partitions.append(tensor.narrow(0, start, partition_size)) start = start + partition_size return partitions def get_partition_info(self, tensor_list, partition_size, partition_id): params_in_partition = [] params_not_in_partition = [] start_index = partition_size * partition_id end_index = partition_size * (partition_id + 1) current_index = 0 first_offset = 0 for tensor in tensor_list: tensor_size = tensor.numel() if (current_index >= start_index and current_index < end_index): params_in_partition.append(tensor) elif start_index > current_index and start_index < (current_index + tensor_size): params_in_partition.append(tensor) assert (first_offset == 0 ), "This can happen either zero or only once as this must be the first tensor in the partition" first_offset = start_index - current_index else: params_not_in_partition.append(tensor) current_index = current_index + tensor_size return params_in_partition, params_not_in_partition, first_offset def zero_grad(self, set_to_none=False): """ Zero FP16 parameter grads. """ # FP32 grad should never exist. # For speed, set model fp16 grad to None by default for group in self.bit16_groups: for p in group: if set_to_none: p.grad = None # epilogue and in step else: if p.grad is not None: p.grad.detach_() p.grad.zero_() def _model_parallel_all_reduce(self, tensor, op): """ Perform all reduce within model parallel group, if any. """ if self.model_parallel_group is None or self.model_parallel_world_size == 1: pass else: dist.all_reduce(tensor=tensor, op=op, group=self.model_parallel_group) def get_grad_norm_direct(self, gradients, params, norm_type=2): """Clips gradient norm of an iterable of parameters. This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and added functionality to handle model parallel parameters. Note that the gradients are modified in place. Arguments: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the parameters (viewed as a single vector). """ norm_type = float(norm_type) if norm_type == inf: total_norm = max(g.data.abs().max() for g in gradients) total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=self.dp_process_group) # Take max across all GPUs. self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.MAX) total_norm = total_norm_cuda[0].item() else: total_norm = 0.0 # if dist.get_rank() == 0: # logger.info(f"Total Norm beginning {total_norm}") for g, p in zip(gradients, params): # Pipeline parallelism may replicate parameters. Avoid multi-counting. if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated: continue if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0): param_norm = g.data.double().norm(2) total_norm += param_norm.item()**2 # Sum across all model parallel GPUs. total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=self.dp_process_group) self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM) total_norm = total_norm_cuda[0].item()**(1. / norm_type) if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm: total_norm = -1 return total_norm # creates a flat fused tensor from the tensor list starting at the first_offset # in the first tensor of the list. If there are not enough elements in the tensor # list then the flat tensor will be padded with zeros def get_flat_partition(self, tensor_list, first_offset, partition_size, dtype, device, return_tensor_list=False): flat_tensor_list = [] current_size = 0 for i, tensor in enumerate(tensor_list): if tensor.grad is None: tensor.grad = torch.zeros_like(tensor) tensor = tensor.grad num_elements = tensor.numel() tensor_offset = 0 # we need to offset to get to the right element if i == 0 and first_offset > 0: tensor_offset = first_offset num_elements = num_elements - tensor_offset # we dont need all elements of the tensor if num_elements > (partition_size - current_size): num_elements = partition_size - current_size # we need a narrow view of the tensor based on the tensor offset and number of elements that # we need from this tensor if tensor_offset > 0 or num_elements < tensor.numel(): flat_tensor_list.append(tensor.contiguous().view(-1).narrow(0, int(tensor_offset), int(num_elements))) else: flat_tensor_list.append(tensor) current_size = current_size + num_elements # this means its the last partition and does not align with the dp boundary. We need to pad before flattening if current_size < partition_size: flat_tensor_list.append(torch.zeros(int(partition_size - current_size), dtype=dtype, device=device)) if return_tensor_list: return flat_tensor_list return self.flatten(flat_tensor_list) def free_grad_in_param_list(self, param_list): for p in param_list: p.grad = None # in step def reset_cpu_buffers(self): self.norm_for_param_grads = {} self.local_overflow = False def log_timers(self, timer_names): if self.timers is None: return self.timers.log(names=list(timer_names)) def start_timers(self, timer_names): if self.timers is None: return for name in timer_names: self.timers(name).start() def stop_timers(self, timer_names): if self.timers is None: return for name in timer_names: self.timers(name).stop() def set_lr(self, lr): """Set the learning rate.""" for param_group in self.optimizer.param_groups: param_group["lr"] = lr def get_lr(self): """Return the current learning rate.""" return self.optimizer.param_groups[0]["lr"] def override_loss_scale(self, loss_scale): if loss_scale != self.external_loss_scale: logger.info(f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}') self.custom_loss_scaler = True self.external_loss_scale = loss_scale def scaled_global_norm(self, norm_type=2): assert norm_type == 2, "only L2 norm supported" norm_groups = [] for i, group in enumerate(self.bit16_groups): partition_id = dist.get_rank(group=self.real_dp_process_group[i]) if self.cpu_offload: norm_groups.append(self.complete_grad_norm_calculation_for_cpu_offload(self.params_in_partition[i])) single_grad_partition = self.single_partition_of_fp32_groups[i].grad else: norm_groups.append(self.get_grad_norm_direct(self.averaged_gradients[i], self.params_in_partition[i])) if self.has_moe_layers: self._average_expert_grad_norms(norm_groups) # note that the get_global_norm function only supports l2 norm return get_global_norm(norm_list=norm_groups) def get_bit16_param_group(self, group_no): bit16_partitions = self.parallel_partitioned_bit16_groups[group_no] partition_id = dist.get_rank(group=self.real_dp_process_group[group_no]) return [bit16_partitions[dist.get_rank(group=self.real_dp_process_group[group_no])]] def _optimizer_step(self, group_no): original_param_groups = self.optimizer.param_groups self.optimizer.param_groups = [original_param_groups[group_no]] # Disabling this as the C++ side copy & synchronize is not working correctly #from deepspeed.ops.adam import DeepSpeedCPUAdam #if type(self.optimizer) == DeepSpeedCPUAdam and self.dtype == torch.half: # self.optimizer.step(fp16_param_groups=[self.get_bit16_param_group(group_no)]) #else: # self.optimizer.step() self.optimizer.step() self.optimizer.param_groups = original_param_groups def step(self, closure=None): """ Not supporting closure. """ self.micro_step_id = -1 see_memory_usage(f"In step before checking overflow") # First compute norm for all group so we know if there is overflow self.check_overflow() OPTIMIZER_ALLGATHER = 'optimizer_allgather' OPTIMIZER_GRADIENTS = 'optimizer_gradients' OPTIMIZER_STEP = 'optimizer_step' timer_names = [OPTIMIZER_ALLGATHER, OPTIMIZER_GRADIENTS, OPTIMIZER_STEP] prev_scale = self.loss_scale self._update_scale(self.overflow) if self.overflow: see_memory_usage('After overflow before clearing gradients') self.zero_grad(set_to_none=True) if self.cpu_offload: self.reset_cpu_buffers() else: self.averaged_gradients = {} see_memory_usage('After overflow after clearing gradients') self.start_timers(timer_names) self.stop_timers(timer_names) return # Step 1:- Calculate gradient norm using fp-16 grads if self.dtype == torch.float16: see_memory_usage('Before norm calculation') scaled_global_grad_norm = self.scaled_global_norm() self._global_grad_norm = scaled_global_grad_norm / prev_scale see_memory_usage('After norm before optimizer') # Step 2:- run optimizer and upscaling simultaneously for i, group in enumerate(self.bit16_groups): self.start_timers([OPTIMIZER_GRADIENTS]) partition_id = dist.get_rank(group=self.real_dp_process_group[i]) if self.cpu_offload: single_grad_partition = self.single_partition_of_fp32_groups[i].grad if self.dtype == torch.float16: self.unscale_and_clip_grads([single_grad_partition], scaled_global_grad_norm) self.stop_timers([OPTIMIZER_GRADIENTS]) self.start_timers([OPTIMIZER_STEP]) self._optimizer_step(i) # Disabled, this is not currently working #from deepspeed.ops.adam import DeepSpeedCPUAdam #if not (type(self.optimizer) == DeepSpeedCPUAdam and self.dtype == torch.half): # bit16_partitions = self.parallel_partitioned_bit16_groups[i] # fp32_partition = self.single_partition_of_fp32_groups[i] # bit16_partitions[partition_id].data.copy_(fp32_partition.data) bit16_partitions = self.parallel_partitioned_bit16_groups[i] fp32_partition = self.single_partition_of_fp32_groups[i] bit16_partitions[partition_id].data.copy_(fp32_partition.data) self.stop_timers([OPTIMIZER_STEP]) else: # free gradients for all the parameters that are not updated by this process(ZeRO stage2) self.free_grad_in_param_list(self.params_not_in_partition[i]) # create a flat gradients for parameters updated by this process # If we are last partition, ensure we have same size grads and partition size, if not pad with zero tensors if partition_id == dist.get_world_size(group=self.real_dp_process_group[i]) - 1: single_grad_partition = self.flatten_dense_tensors_aligned( self.averaged_gradients[i], int(self.partition_size[i])).to(self.single_partition_of_fp32_groups[i].dtype) else: single_grad_partition = self.flatten(self.averaged_gradients[i]).to( self.single_partition_of_fp32_groups[i].dtype) assert single_grad_partition.numel() == self.partition_size[i], \ "averaged gradients have different number of elements that partition size {} {} {} {}".format( single_grad_partition.numel(), self.partition_size[i], i, partition_id) self.single_partition_of_fp32_groups[i].grad = single_grad_partition # release all the gradient since we have already created a necessary copy in dp_grad_partition(ZeRO stage2) self.free_grad_in_param_list(self.params_in_partition[i]) self.averaged_gradients[i] = None if self.dtype == torch.float16: self.unscale_and_clip_grads([single_grad_partition], scaled_global_grad_norm) self.stop_timers([OPTIMIZER_GRADIENTS]) # Step 3:- run the optimizer if no offloading self.start_timers([OPTIMIZER_STEP]) self._optimizer_step(i) # Step 4:- get rid of the fp32 gradients. Not needed anymore self.single_partition_of_fp32_groups[i].grad = None del single_grad_partition bit16_partitions = self.parallel_partitioned_bit16_groups[i] fp32_partition = self.single_partition_of_fp32_groups[i] bit16_partitions[partition_id].data.copy_(fp32_partition.data) self.stop_timers([OPTIMIZER_STEP]) see_memory_usage('After optimizer before all-gather') if self.cpu_offload: self.reset_cpu_buffers() self.start_timers([OPTIMIZER_ALLGATHER]) # Gather the updated weights from everyone. # Then all partitions of the model parameters are updated and ready for next round forward. all_gather_dp_groups(partitioned_param_groups=self.parallel_partitioned_bit16_groups, dp_process_group=self.real_dp_process_group, start_alignment_factor=self.nccl_start_alignment_factor, allgather_bucket_size=self.allgather_bucket_size) self.stop_timers([OPTIMIZER_ALLGATHER]) # TODO: we probably don't need this? just to be safe for i in range(len(self.bit16_groups)): self._update_model_bit16_weights(i) self.log_timers(timer_names) see_memory_usage('After zero_optimizer step') return @torch.no_grad() def update_lp_params(self): for i, (bit16_partitions, fp32_partition) in enumerate( zip(self.parallel_partitioned_bit16_groups, self.single_partition_of_fp32_groups)): partition_id = dist.get_rank(group=self.real_dp_process_group[i]) bit16_partitions[partition_id].data.copy_(fp32_partition.data) # print_rank_0(f'update_lp_params {i=} {partition_id=}', force=True) # if i == 0: # print_rank_0(f'{fp32_partition[:10]=}', force=True) all_gather_dp_groups(partitioned_param_groups=self.parallel_partitioned_bit16_groups, dp_process_group=self.real_dp_process_group, start_alignment_factor=self.nccl_start_alignment_factor, allgather_bucket_size=self.allgather_bucket_size) def _average_expert_grad_norms(self, norm_groups): for i, norm in enumerate(norm_groups): if self.is_moe_param_group[i]: scaled_norm = norm * 1.0 / float(dist.get_world_size(group=self.real_dp_process_group[i])) scaled_norm_tensor = torch.tensor(scaled_norm, device=get_accelerator().device_name(), dtype=torch.float) dist.all_reduce(scaled_norm_tensor, group=self.real_dp_process_group[i]) norm_groups[i] = scaled_norm_tensor.item() def unscale_and_clip_grads(self, grad_groups_flat, total_norm): # compute combined scale factor for this group combined_scale = self.loss_scale if self.clip_grad > 0.: # norm is in fact norm*scale clip = ((total_norm / self.loss_scale) + 1e-6) / self.clip_grad if clip > 1: combined_scale = clip * self.loss_scale for grad in grad_groups_flat: if isinstance(grad, list): sub_partitions = grad for g in sub_partitions: g.data.mul_(1. / combined_scale) else: grad.data.mul_(1. / combined_scale) def _check_overflow(self, partition_gradients=True): self.overflow = self.has_overflow(partition_gradients) # `params` is a list / generator of torch.Variable def has_overflow_serial(self, params, is_grad_list=False): for p in params: if p.grad is not None and self._has_inf_or_nan(p.grad.data): return True return False def has_overflow_partitioned_grads_serial(self): for i in range(len(self.bit16_groups)): for j, grad in enumerate(self.averaged_gradients[i]): if grad is not None and self._has_inf_or_nan(grad.data, j): return True return False def has_overflow(self, partition_gradients=True): if partition_gradients: overflow = self.local_overflow if self.cpu_offload else self.has_overflow_partitioned_grads_serial() overflow_gpu = get_accelerator().ByteTensor([overflow]) '''This will capture overflow across all data parallel and expert parallel process Since expert parallel process are a subset of data parallel process''' dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.dp_process_group) else: params = [] for group in self.bit16_groups: for param in group: params.append(param) overflow = self.has_overflow_serial(params, is_grad_list=partition_gradients) overflow_gpu = get_accelerator().ByteTensor([overflow]) # Since each model parallel GPU carries only part of the model, # make sure overflow flag is synced across all the model parallel GPUs self._model_parallel_all_reduce(tensor=overflow_gpu, op=dist.ReduceOp.MAX) overflow = overflow_gpu[0].item() return bool(overflow) # `x` is a torch.Tensor @staticmethod def _has_inf_or_nan(x, j=None): try: # if x is half, the .float() incurs an additional deep copy, but it's necessary if # Pytorch's .sum() creates a one-element tensor of the same type as x # (which is true for some recent version of pytorch). cpu_sum = float(x.float().sum()) # More efficient version that can be used if .sum() returns a Python scalar # cpu_sum = float(x.sum()) except RuntimeError as instance: # We want to check if inst is actually an overflow exception. # RuntimeError could come from a different error. # If so, we still want the exception to propagate. if "value cannot be converted" not in instance.args[0]: raise return True else: if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum: return True return False def backward(self, loss, retain_graph=False): """ :attr:`backward` performs the following steps: 1. fp32_loss = loss.float() 2. scaled_loss = fp32_loss*loss_scale 3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves """ self.micro_step_id += 1 if self.contiguous_gradients: self.ipg_buffer = [] buf_0 = torch.empty(int(self.reduce_bucket_size), dtype=self.dtype, device=get_accelerator().current_device_name()) self.ipg_buffer.append(buf_0) # Use double buffers to avoid data access conflict when overlap_comm is enabled. if self.overlap_comm: buf_1 = torch.empty(int(self.reduce_bucket_size), dtype=self.dtype, device=get_accelerator().current_device_name()) self.ipg_buffer.append(buf_1) self.ipg_index = 0 if self.custom_loss_scaler: scaled_loss = self.external_loss_scale * loss scaled_loss.backward() else: self.loss_scaler.backward(loss.float(), retain_graph=retain_graph) def check_overflow(self, partition_gradients=True): self._check_overflow(partition_gradients) def _update_scale(self, has_overflow=False): self.loss_scaler.update_scale(has_overflow) # Promote state so it can be retrieved or set via "fp16_optimizer_instance.state" def _get_state(self): return self.optimizer.state def _set_state(self, value): self.optimizer.state = value state = property(_get_state, _set_state) # Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups" # (for example, to adjust the learning rate) def _get_param_groups(self): return self.optimizer.param_groups def _set_param_groups(self, value): self.optimizer.param_groups = value param_groups = property(_get_param_groups, _set_param_groups) # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" def _get_loss_scale(self): if self.custom_loss_scaler: return self.external_loss_scale else: return self.loss_scaler.cur_scale def _set_loss_scale(self, value): self.loss_scaler.cur_scale = value loss_scale = property(_get_loss_scale, _set_loss_scale) cur_scale = property(_get_loss_scale, _set_loss_scale) # Return group tensor after removing paddings that are added for alignment to DP world size. # This method works on the assumption that each group contains a single flattened tensor. def _get_groups_without_padding(self, groups_with_padding): groups_without_padding = [] for i, group in enumerate(groups_with_padding): lean_length = group.numel() - self.groups_padding[i] groups_without_padding.append(group[:lean_length]) return groups_without_padding # Return optimizer state after removing paddings that are added for alignment. def _get_state_without_padding(self, state_with_padding, padding): lean_state = {} for key, value in state_with_padding.items(): if torch.is_tensor(value): lean_length = value.numel() - padding lean_state[key] = value[:lean_length] else: lean_state[key] = value return lean_state # Return base optimizer states. # This method assumes that each param group contains a single flattened tensor. def _get_base_optimizer_state(self): optimizer_groups_state = [] for i, group in enumerate(self.optimizer.param_groups): p = group['params'][0] lean_optimizer_state = self._get_state_without_padding(self.optimizer.state[p], self.groups_padding[i]) optimizer_groups_state.append(lean_optimizer_state) return optimizer_groups_state def state_dict(self): """ Returns a dict containing the current state of this :class:`FP16_Optimizer` instance. This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict of the contained Pytorch optimizer. Example:: checkpoint = {} checkpoint['model'] = model.state_dict() checkpoint['optimizer'] = optimizer.state_dict() torch.save(checkpoint, "saved.pth") """ state_dict = {} state_dict['loss_scaler'] = self.loss_scaler state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale state_dict['overflow'] = self.overflow state_dict[CLIP_GRAD] = self.clip_grad if self.elastic_checkpoint: state_dict[BASE_OPTIMIZER_STATE] = self._get_base_optimizer_state() else: state_dict[BASE_OPTIMIZER_STATE] = self.optimizer.state_dict() # Remove paddings for DP alignment to enable loading for other alignment values fp32_groups_without_padding = self._get_groups_without_padding(self.single_partition_of_fp32_groups) state_dict[SINGLE_PARTITION_OF_FP32_GROUPS] = fp32_groups_without_padding state_dict[ ZERO_STAGE] = ZeroStageEnum.gradients if self.partition_gradients else ZeroStageEnum.optimizer_states state_dict[GROUP_PADDINGS] = self.groups_padding state_dict[PARTITION_COUNT] = self.partition_count state_dict[DS_VERSION] = version state_dict[PARAM_SLICE_MAPPINGS] = self._param_slice_mappings return state_dict # Restore base optimizer fp32 weights from elastic checkpoint by: # 1) Merging fp32 weights from checkpoints of all partitions # 2) Extracting fp32 weights for current partition from merged weights # 3) Using extracted weights to update base optimizer weights directly. def _restore_from_elastic_fp32_weights(self, all_state_dict): merged_single_partition_of_fp32_groups = [] for i in range(len(self.single_partition_of_fp32_groups)): partition_id = dist.get_rank(group=self.real_dp_process_group[i]) merged_partitions = [sd[SINGLE_PARTITION_OF_FP32_GROUPS][i] for sd in all_state_dict] if self.is_moe_group(self.optimizer.param_groups[i]): ranks = self.get_ep_ranks(group_name=self.optimizer.param_groups[i]['name']) merged_partitions = [merged_partitions[i] for i in ranks] flat_merged_partitions = self.flatten_dense_tensors_aligned( merged_partitions, self.nccl_start_alignment_factor * dist.get_world_size(group=self.real_dp_process_group[i])) dp_partitions = self.get_data_parallel_partitions(flat_merged_partitions, i) merged_single_partition_of_fp32_groups.append(dp_partitions[partition_id]) for current, saved in zip(self.single_partition_of_fp32_groups, merged_single_partition_of_fp32_groups): current.data.copy_(saved.data) # Restore base optimizer fp32 weights from ZeRO fp16 or bfloat16 weights def _restore_from_bit16_weights(self): for group_id, (bit16_partitions, fp32_partition) in enumerate( zip(self.parallel_partitioned_bit16_groups, self.single_partition_of_fp32_groups)): partition_id = dist.get_rank(group=self.real_dp_process_group[group_id]) fp32_partition.data.copy_(bit16_partitions[partition_id].data) # Refresh the fp32 master params from the fp16 or bfloat16 copies. def refresh_fp32_params(self): self._restore_from_bit16_weights() # Extract optimizer state for current partition from merged states of all partitions def _partition_base_optimizer_state(self, state_key, all_partition_states, group_id): partition_id = dist.get_rank(group=self.real_dp_process_group[group_id]) alignment = dist.get_world_size(group=self.real_dp_process_group[group_id]) if torch.is_tensor(all_partition_states[0]): flat_merged_partitions = self.flatten_dense_tensors_aligned(all_partition_states, alignment) dp_partitions = self.get_data_parallel_partitions(flat_merged_partitions, group_id) return dp_partitions[partition_id] else: # Assume non-tensor states are not partitioned and equal across ranks, so return first one return all_partition_states[0] def _restore_base_optimizer_state(self, base_optimizer_group_states): if type(base_optimizer_group_states) == dict: base_optimizer_group_states = base_optimizer_group_states['state'] for i, group in enumerate(self.optimizer.param_groups): p = group['params'][0] for key, saved in base_optimizer_group_states[i].items(): if torch.is_tensor(self.optimizer.state[p][key]): dst_tensor = self.optimizer.state[p][key] src_tensor = _get_padded_tensor(saved, dst_tensor.numel()) self.optimizer.state[p][key].data.copy_(src_tensor.data) else: self.optimizer.state[p][key] = saved def get_ep_ranks(self, rank=0, group_name=None): from deepspeed.utils import groups expert_parallel_size_ = groups._get_expert_parallel_world_size(group_name) world_size = groups._get_data_parallel_world_size() rank = groups._get_expert_parallel_rank(group_name) ranks = range(rank, world_size, expert_parallel_size_) return list(ranks) # Restore base optimizer state from elastic checkpoint by # 1) Merging optimizer state from checkpoints of all partitions # 2) Extracting optimizer state for current partition from the merged state # 3) Using the extracted value to directly update the base optimizer. def _restore_elastic_base_optimizer_state(self, all_state_dict): base_optimizer_group_states = [] for i in range(len(self.optimizer.param_groups)): partition_states = {} all_partition_group_states = [sd[BASE_OPTIMIZER_STATE][i] for sd in all_state_dict] if self.is_moe_group(self.optimizer.param_groups[i]): ranks = self.get_ep_ranks(group_name=self.optimizer.param_groups[i]['name']) all_partition_group_states = [all_partition_group_states[i] for i in ranks] for key in all_partition_group_states[0].keys(): all_partition_states = [all_states[key] for all_states in all_partition_group_states] partition_states[key] = self._partition_base_optimizer_state(key, all_partition_states, i) base_optimizer_group_states.append(partition_states) self._restore_base_optimizer_state(base_optimizer_group_states) def load_state_dict(self, state_dict_list, load_optimizer_states=True, load_from_fp32_weights=False, checkpoint_folder=None): if checkpoint_folder: self._load_universal_checkpoint(checkpoint_folder, load_optimizer_states, load_from_fp32_weights) else: self._load_legacy_checkpoint(state_dict_list, load_optimizer_states, load_from_fp32_weights) def _load_universal_checkpoint(self, checkpoint_folder, load_optimizer_states, load_from_fp32_weights): self._load_hp_checkpoint_state(checkpoint_folder) @property def param_groups(self): """Forward the wrapped optimizer's parameters.""" return self.optimizer.param_groups def _load_hp_checkpoint_state(self, checkpoint_dir): checkpoint_dir = os.path.join(checkpoint_dir, "zero") tp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) tp_world_size = self.mpu.get_slice_parallel_world_size() for i, _ in enumerate(self.optimizer.param_groups): for lp in self.bit16_groups[i]: if lp._hp_mapping is not None: #print(f"Loading {self.param_names[lp]} {tp_rank=} {tp_world_size=}") lp.load_hp_checkpoint_state(os.path.join(checkpoint_dir, self.param_names[lp]), tp_rank, tp_world_size) def _load_legacy_checkpoint(self, state_dict_list, load_optimizer_states=True, load_from_fp32_weights=False): r"""Loading ZeRO checkpoint Arguments: state_dict_list: List of all saved ZeRO checkpoints, one for each saved partition. Note that the number of saved partitions may differ from number of loading partitions to support changing GPU count, specifically DP world size, between saving and loading checkpoints. load_optimizer_states: Boolean indicating whether or not to load base optimizer states load_from_fp32_weights: Boolean indicating whether to initialize fp32 master weights from fp32 copies in checkpoints (no precision loss) or from model's fp16 copies (with precision loss). """ """ Loads a state_dict created by an earlier call to state_dict(). If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``, whose parameters in turn came from ``model``, it is expected that the user will call ``model.load_state_dict()`` before ``fp16_optimizer_instance.load_state_dict()`` is called. Example:: model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half() optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) ... checkpoint = torch.load("saved.pth") model.load_state_dict(checkpoint['model']) optimizer.load_state_dict(checkpoint['optimizer']) """ # I think it should actually be ok to reload the optimizer before the model. dp_rank = dist.get_rank(group=self.dp_process_group) current_rank_sd = state_dict_list[dp_rank] self.loss_scaler = current_rank_sd.get('loss_scaler', self.loss_scaler) self.dynamic_loss_scale = current_rank_sd.get('dynamic_loss_scale', self.dynamic_loss_scale) self.overflow = current_rank_sd.get('overflow', self.overflow) self.clip_grad = current_rank_sd.get(CLIP_GRAD, self.clip_grad) ckpt_version = current_rank_sd.get(DS_VERSION, False) assert ckpt_version, f"Empty ds_version in checkpoint, not clear how to proceed" ckpt_version = pkg_version.parse(ckpt_version) # zero stage 1 mode if not self.partition_gradients: required_version = pkg_version.parse("0.3.17") error_str = f"ZeRO stage 1 changed in {required_version} and is not backwards compatible " \ "with older stage 1 checkpoints. If you'd like to load an old ZeRO-1 checkpoint " \ "please use an older version of DeepSpeed (<= 0.5.8) and set 'legacy_stage1': true in your zero config json." assert required_version <= ckpt_version, f"Old version: {ckpt_version} {error_str}" ckpt_is_rigid = isinstance(current_rank_sd[BASE_OPTIMIZER_STATE], dict) # padding is always at the last rank/partition # if DP=1024 and param-group elems=16 -> padding will be 1024-16 across all but one rank # scenario-1 (shrink): saving w. 4 gpus -> loading w. 2 gpus # scenario-2 (expand): saving w. 2 gpus -> loading w. 4 gpus # if load_optimizer_states: # if new_dp_size: # self.strip_padding() # self.add_padding_w_new_dp_size() # self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE]) if load_optimizer_states: if ckpt_is_rigid: # loading rigid ckpt into either rigid or elastic exec self.optimizer.load_state_dict(current_rank_sd[BASE_OPTIMIZER_STATE]) else: if self.elastic_checkpoint: # loading elastic into elastic exec self._restore_elastic_base_optimizer_state(state_dict_list) else: # loading an elastic checkpoint into rigid exec self._restore_base_optimizer_state(current_rank_sd[BASE_OPTIMIZER_STATE]) # At this point, the optimizer's references to the model's fp32 parameters are up to date. # The optimizer's hyperparameters and internal buffers are also up to date. # However, the fp32 master copies of the model's fp16 params stored by the optimizer are still # out of date. There are two options. # 1: Refresh the master params from the model's fp16 params. # This requires less storage but incurs precision loss. # 2: Save and restore the fp32 master copies separately. # We choose option 1 if changing DP degree and option 2 otherwise. # # Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device # of their associated parameters, because it's possible those buffers might not exist yet in # the current optimizer instance. In our case, as long as the current FP16_Optimizer has been # constructed in the same way as the one whose state_dict we are loading, the same master params # are guaranteed to exist, so we can just copy_() from the saved master params. if load_from_fp32_weights: # option 2 from above if self.elastic_checkpoint and not ckpt_is_rigid: self._restore_from_elastic_fp32_weights(state_dict_list) else: # For non-elastic checkpoint, simply copying from saved weights of current rank is sufficient. for current, saved in zip(self.single_partition_of_fp32_groups, current_rank_sd[SINGLE_PARTITION_OF_FP32_GROUPS]): src_tensor = _get_padded_tensor(saved, current.numel()) current.data.copy_(src_tensor.data) else: # option 1 from above self._restore_from_bit16_weights() if load_optimizer_states: self._link_all_hp_params() def _handle_overflow(cpu_sum, x, i): import math rank = dist.get_rank() if rank == 0: t_i = -1 for v_i, v in enumerate(x.data.contiguous().view(-1)): if not math.isfinite(float(v)): t_i = v_i break logger.info(f"rank {rank} detected overflow {cpu_sum} in tensor {i}:{t_i} shape {x.shape}") def estimate_zero2_model_states_mem_needs(total_params, num_gpus_per_node=1, num_nodes=1, cpu_offload=True, additional_buffer_factor=1.5): total_gpus = num_nodes * num_gpus_per_node if cpu_offload: gpu_mem = 2 * total_params cpu_mem = total_params * max(4 * total_gpus, 16) * additional_buffer_factor else: gpu_mem = 4 * total_params + int(16 * total_params / total_gpus) cpu_mem = total_params * 4 * num_gpus_per_node * additional_buffer_factor return int(cpu_mem), int(gpu_mem) def model_to_params(model): # shared params calculated only once total_params = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values()) return total_params def estimate_zero2_model_states_mem_needs_all_live(model, num_gpus_per_node=1, num_nodes=1, additional_buffer_factor=1.5): """ Print out estimates on memory usage requirements for ZeRO 2 params, optim states and gradients for a given ``model`` and hardware setup. If you have an actual model object, use this function and everything will be derived automatically. If it's a hypothetical model, use ``estimate_zero2_model_states_mem_needs_all_cold`` where you have to pass the ``total_params`` explicitly. Args: - ``model``: ``nn.Module`` object - ``num_gpus_per_node``: how many gpus per node (defaults to 1) - ``num_nodes``: how many nodes (defaults to 1), - ``additional_buffer_factor``: estimation factor (defaults to 1.5): """ total_params = model_to_params(model) estimate_zero2_model_states_mem_needs_all_cold(total_params=total_params, num_gpus_per_node=num_gpus_per_node, num_nodes=num_nodes, additional_buffer_factor=additional_buffer_factor) def estimate_zero2_model_states_mem_needs_all_cold(total_params, num_gpus_per_node=1, num_nodes=1, additional_buffer_factor=1.5): """ Print out estimates on memory usage requirements for ZeRO 2 params, optim states and gradients for a given ``model`` and hardware setup. If it's a hypothetical model, use this function where you have to pass the ``total_params`` and ``largest_layer_params`` explicitly. If you have an actual model object, use ``estimate_zero2_model_states_mem_needs_all_live`` and everything will be derived automatically. Args: - ``total_params``: total model params - ``num_gpus_per_node``: how many gpus per node (defaults to 1) - ``num_nodes``: how many nodes (defaults to 1), - ``additional_buffer_factor``: estimation factor (defaults to 1.5): """ def format_options(cpu_offload): enabled = [] device = f'{OffloadDeviceEnum.cpu:4}' if cpu_offload else "none" enabled.append(f"offload_optimizer={device}") return ", ".join(enabled) nodes_str = "nodes" if num_nodes > 1 else "node" gpus_str = "GPUs" if num_gpus_per_node > 1 else "GPU" print("Estimated memory needed for params, optim states and gradients for a:\n" f"HW: Setup with {num_nodes} {nodes_str}, {num_gpus_per_node} {gpus_str} per node.\n" f"SW: Model with {int(total_params/1e6)}M total params.") print(" per CPU | per GPU | Options") for cpu_offload in [True, False]: cpu_mem, gpu_mem = estimate_zero2_model_states_mem_needs(total_params=total_params, num_gpus_per_node=num_gpus_per_node, num_nodes=num_nodes, cpu_offload=cpu_offload, additional_buffer_factor=additional_buffer_factor) options_str = format_options(cpu_offload=cpu_offload) print(f" {cpu_mem/2**30:7.2f}GB | {gpu_mem/2**30:6.2f}GB | {options_str}")
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import math import os import types from typing import Callable, Iterable from enum import Enum import functools import itertools from typing import List import torch from torch import Tensor from deepspeed import comm as dist from torch.nn import Module from torch.nn import Parameter from .linear import zero3_linear_wrap import deepspeed from ..utils import get_only_unique_item, see_memory_usage from deepspeed.runtime.zero.config import DeepSpeedZeroConfig from deepspeed.runtime.zero.utils import assert_ints_same_as_other_ranks from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum from deepspeed.runtime.config_utils import get_config_default from deepspeed.utils import instrument_w_nvtx, logger from deepspeed.comm.comm import init_distributed from deepspeed.utils.debug import (debug_param2name_id_shape, debug_param2name_id_shape_device, debug_module2name, debug_param2name_id, debug_param2name_id_shape_status) from deepspeed.accelerator import get_accelerator from ..swap_tensor.partitioned_param_swapper import AsyncPartitionedParameterSwapper, PartitionedParamStatus param_count = 0 partitioned_param_data_shape = [0] zero_init_context = [] all_wrapped_classes = set() class NoGatherHandle: def __init__(self, param: Parameter) -> None: if param.ds_status != ZeroParamStatus.INFLIGHT: raise RuntimeError(f"expected param {param.ds_summary()} to be available") param.data = param.ds_tensor.data.to(device=get_accelerator().current_device_name(), non_blocking=True).view(param.ds_shape) self.__param = param def wait(self) -> None: get_accelerator().current_stream().synchronize() self.__param.ds_status = ZeroParamStatus.AVAILABLE class NoGatherCoalescedHandle: def __init__(self, params: List[Parameter]) -> None: self.__params = params self.__complete = False for param in self.__params: if param.ds_status != ZeroParamStatus.INFLIGHT: raise RuntimeError(f"expected param {param.ds_summary()} to not be available") param.data = param.ds_tensor.data.to(device=get_accelerator().current_device_name(), non_blocking=True).view(param.ds_shape) @instrument_w_nvtx def wait(self) -> None: if self.__complete: return get_accelerator().current_stream().synchronize() for param in self.__params: assert param.ds_status == ZeroParamStatus.INFLIGHT, f"expected param {param.ds_summary()} to be inflight" param.ds_status = ZeroParamStatus.AVAILABLE self.__complete = True def _dist_allgather_fn(input_tensor: Tensor, output_tensor: Tensor, group=None): return instrument_w_nvtx(dist.allgather_fn)(output_tensor, input_tensor, group=group, async_op=True) def print_rank_0(message, debug=False, force=False): rank = dist.get_rank() if rank == 0 and (debug or force): print(message) # other variations # - print for all ranks w/o interleaving # printflock(f"[{rank}] {message}") # - print to log file per rank # log_rank_file(rank, message) def debug_rank0(msg: str) -> None: if dist.get_rank() == 0: logger.debug(msg) def is_zero_param(parameter): if not torch.is_tensor(parameter): return False return hasattr(parameter, 'ds_id') def _init_external_params(module): if not hasattr(module, '_external_params'): module._external_params = {} def external_parameters(self): return self._external_params.items() def all_parameters(self): return itertools.chain(self.named_parameters(self, recurse=False), external_parameters(self)) module.ds_external_parameters = types.MethodType(external_parameters, module) module.all_parameters = types.MethodType(all_parameters, module) def register_external_parameter(module, parameter): """Instruct DeepSpeed to coordinate ``parameter``'s collection and partitioning in the forward and backward passes of ``module``. This is used when a parameter is accessed outside of its owning module's ``forward()``. DeepSpeed must know to collect it from its partitioned state and when to release the memory. .. note:: This is only applicable to training with ZeRO stage 3. Args: module (``torch.nn.Module``): The module that requires ``parameter`` in its forward pass. parameter (``torch.nn.Parameter``): The parameter to register. Raises: RuntimeError: If ``parameter`` is not of type ``torch.nn.Parameter``. Examples ======== #. Register a weight that is used in another module's forward pass (line 6). Parameter ``layer1.weight`` is used by ``layer2`` (line 11). .. code-block:: python :linenos: :emphasize-lines: 6,11 class ModuleZ3(torch.nn.Module): def __init__(self, *args): super().__init__(self, *args) self.layer1 = SomeLayer() self.layer2 = OtherLayer() deepspeed.zero.register_external_parameter(self, self.layer1.weight) def forward(self, input): x = self.layer1(input) # self.layer1.weight is required by self.layer2.forward y = self.layer2(x, self.layer1.weight) return y """ if not isinstance(parameter, torch.nn.Parameter): raise RuntimeError('Parameter is not a torch.nn.Parameter') if not hasattr(module, '_external_params'): _init_external_params(module) key = id(parameter) module._external_params[key] = parameter def unregister_external_parameter(module, parameter): """Reverses the effects of :meth:`register_external_parameter`. Args: module (``torch.nn.Module``): The module to affect. parameter (``torch.nn.Parameter``): The parameter to unregister. Raises: RuntimeError: If ``parameter`` is not of type ``torch.nn.Parameter``. RuntimeError: If ``parameter`` is not a registered external parameter of ``module``. """ if not isinstance(parameter, torch.nn.Parameter): raise RuntimeError('Parameter is not a torch.nn.Parameter') if not hasattr(module, '_external_params') or id(parameter) not in module._external_params: raise RuntimeError('Parameter is not a registered external parameter of module.') key = id(parameter) del module._external_params[key] class ZeroParamType(Enum): # same as regular pytorch parameters NORMAL = 1 # parameters are partitioned across data parallel process PARTITIONED = 2 # the parameter is held with a unique process rank # and is not available on all other process REMOTE = 3 class ZeroParamStatus(Enum): # parameters are fully present and ready for use on all processes AVAILABLE = 1 # parameters are either partitioned or remote in some or all process NOT_AVAILABLE = 2 # parameters are being gathered. INFLIGHT = 3 _orig_torch_empty = torch.empty _orig_torch_zeros = torch.zeros _orig_torch_ones = torch.ones _orig_torch_full = torch.full def zero_wrapper_for_fp_tensor_constructor(fn: Callable, target_fp_dtype: torch.dtype) -> Callable: def wrapped_fn(*args, **kwargs) -> Tensor: if kwargs.get("device", None) is None: kwargs['device'] = torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"])) tensor: Tensor = fn(*args, **kwargs) if tensor.is_floating_point(): tensor = tensor.to(target_fp_dtype) return tensor return wrapped_fn def get_new_tensor_fn_for_dtype(dtype: torch.dtype) -> Callable: def new_tensor(cls, *args) -> Tensor: device = torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"])) tensor = _orig_torch_empty(0, device=device).new_empty(*args) if tensor.is_floating_point(): tensor = tensor.to(dtype) return tensor return new_tensor # https://stackoverflow.com/a/63851681/9201239 def get_all_subclasses(cls): subclass_list = [] def recurse(cl): for subclass in cl.__subclasses__(): subclass_list.append(subclass) recurse(subclass) recurse(cls) return set(subclass_list) @instrument_w_nvtx def free_param(param: Parameter) -> None: """Free underlying storage of a parameter.""" assert not param.ds_active_sub_modules, param.ds_summary() if get_accelerator().on_accelerator(param.data): # need to make sure that we don't free the parameter while it is still # being used for computation param.data.record_stream(get_accelerator().current_stream()) # param.data doesn't store anything meaningful in partitioned state param.data = torch.empty(0, dtype=param.dtype, device=param.device) param.ds_status = ZeroParamStatus.NOT_AVAILABLE reuse_buffers = False temp_contiguous_tensor = None empty_buffers = {} # Inserts _post_init_method at the end of init method # for all sub classes of torch.nn.Module class InsertPostInitMethodToModuleSubClasses(object): def __init__(self, enabled=True, mem_efficient_linear=True, ds_config=None, dtype=None): self.mem_efficient_linear = mem_efficient_linear self.enabled = enabled self._set_dtype(ds_config, dtype) assert self.dtype in [ torch.half, torch.bfloat16, torch.float ], f"Invalid data type {self.dtype}, allowed values are [torch.half, torch.bfloat16, torch.float]" self.wrapped_cls = set() def __enter__(self): if not self.enabled: return def apply_with_gather(orig_module_apply_fn: Callable) -> Callable: """many models make use of child modules like Linear or Embedding which perform their own weight initialization in their __init__ methods, but will then have more weight initialization in a parent module's __init__ method that modifies weights of child modules, which is typically done using the Module.apply method. since the Init context manager partitions child modules immediately after they are initialized, without modifying apply we would entirely skip any initialization done by parent modules. to get around this issue, we wrap the function passed to Module.apply so that the applied function is applied to child modules correctly. """ def get_wrapped_fn_to_apply(fn_to_apply: Callable) -> Callable: if hasattr(fn_to_apply, "wrapped"): return fn_to_apply @functools.wraps(fn_to_apply) def wrapped_fn_to_apply(module_to_apply_fn_to: Module) -> None: """gathers parameters before calling apply function. afterwards parameters are broadcasted to ensure consistency across all ranks then re-partitioned. takes the following steps: 1. allgathers parameters for the current module being worked on 2. calls the original function 3. broadcasts root rank's parameters to the other ranks 4. re-partitions the parameters """ if not all(is_zero_param(p) for p in module_to_apply_fn_to.parameters(recurse=False)): raise RuntimeError(f"not all parameters for {module_to_apply_fn_to.__class__.__name__}, " f"were zero params, is it possible that the parameters were " f"overwritten after they were initialized? " f"params: {[p for p in module_to_apply_fn_to.parameters(recurse=False)]} ") params_to_apply_fn_to: Iterable[Parameter] = list( sorted(module_to_apply_fn_to.parameters(recurse=False), key=lambda p: p.ds_id)) for param in params_to_apply_fn_to: param.all_gather() fn_to_apply(module_to_apply_fn_to) for param in params_to_apply_fn_to: dist.broadcast(param.data, 0, group=param.ds_process_group) for param in params_to_apply_fn_to: param.partition(has_been_updated=True) wrapped_fn_to_apply.wrapped = True return wrapped_fn_to_apply @functools.wraps(orig_module_apply_fn) def wrapped_apply(module: Module, fn_to_apply: Callable) -> None: orig_module_apply_fn(module, get_wrapped_fn_to_apply(fn_to_apply)) return wrapped_apply def partition_after(f): @functools.wraps(f) def wrapper(module, *args, **kwargs): # important logic: We want to run post_init only after child's __init__ is # completed, and do nothing after __init__ of any of its parents and grandparents in # the inheritance ancestry. This way the partitioning will need to happen only once # when the whole object is ready to be partitioned and not before. This is because # often the child module will need to tweak the weights - for example running a # custom weights init function. So if a parent created the weights param, the child # won't need to gather it in order to tweak it print_rank_0(f'Before initializing {module.__class__.__name__}', force=False) is_child_module = False if not hasattr(module, "_ds_child_entered"): # child's __init__ was called, since parents all see the same object they can now skip post_init is_child_module = True setattr(module, "_ds_child_entered", True) f(module, *args, **kwargs) if is_child_module: # child's __init__ is done, now we can run a single post_init on the child object delattr(module, "_ds_child_entered") print_rank_0(f'Running post_init for {module.__class__.__name__}', force=False) self._post_init_method(module) print_rank_0(f'After initializing followed by post init for {module.__class__.__name__}', force=False) return wrapper def _enable_class(cls): cls._old_init = cls.__init__ cls.__init__ = partition_after(cls.__init__) def _init_subclass(cls, **kwargs): cls.__init__ = partition_after(cls.__init__) # Replace .__init__() for all existing subclasses of torch.nn.Module recursively global zero_init_context self.nest_level = len(zero_init_context) global all_wrapped_classes for subclass in get_all_subclasses(torch.nn.modules.module.Module): # Only wrap classes that haven't been wrapped yet if subclass not in all_wrapped_classes: _enable_class(subclass) self.wrapped_cls.add(subclass) all_wrapped_classes = all_wrapped_classes.union(self.wrapped_cls) # Wrap some functions only at top level call of Init if self.nest_level == 0: # holding onto some methods so we can put them back the way they were in __exit__ torch.nn.modules.module.Module._old_init_subclass = torch.nn.modules.module.Module.__init_subclass__ torch.nn.modules.module.Module._old_apply = torch.nn.modules.module.Module.apply torch.Tensor.__old_new__ = torch.Tensor.__new__ # Replace .__init__() for future subclasses of torch.nn.Module torch.nn.modules.module.Module.__init_subclass__ = classmethod(_init_subclass) torch.nn.modules.module.Module.apply = apply_with_gather(torch.nn.modules.module.Module._old_apply) torch.Tensor.__new__ = get_new_tensor_fn_for_dtype(self.dtype) torch.empty = zero_wrapper_for_fp_tensor_constructor(_orig_torch_empty, self.dtype) torch.zeros = zero_wrapper_for_fp_tensor_constructor(_orig_torch_zeros, self.dtype) torch.ones = zero_wrapper_for_fp_tensor_constructor(_orig_torch_ones, self.dtype) torch.full = zero_wrapper_for_fp_tensor_constructor(_orig_torch_full, self.dtype) if self.mem_efficient_linear: print_rank_0( "nn.functional.linear has been overridden with a more memory efficient version. This will persist unless manually reset.", force=False) self.linear_bk = torch.nn.functional.linear torch.nn.functional.linear = zero3_linear_wrap self.torch_func_wrapped = True zero_init_context.append(self) def __exit__(self, exc_type, exc_value, traceback): if not self.enabled: return self.remove_wrappers() # Exiting the top level context global zero_init_context zero_init_context.pop() if self.nest_level == 0: if dist.get_rank() == 0: logger.info("finished initializing model with %.2fB parameters", param_count / 1e9) # Now that we cleaned up the metaclass injection, raise the exception. if exc_type is not None: return False # To be implemented by inheriting classes def _post_init_method(self, module): pass def _set_dtype(self, ds_config, dtype): if ds_config is not None and dtype is None: if ds_config.bfloat16_enabled and ds_config.fp16_enabled: raise RuntimeError("bfloat16 and fp16 cannot be enabled at once") if ds_config.bfloat16_enabled: self.dtype = torch.bfloat16 elif ds_config.fp16_enabled: self.dtype = torch.half else: self.dtype = torch.float else: self.dtype = dtype or torch.half def remove_wrappers(self): def _disable_class(cls): cls.__init__ = cls._old_init for subclass in self.wrapped_cls: _disable_class(subclass) self.wrapped_cls.clear() # This context is the top level of nested Init if self.nest_level == 0 and self.torch_func_wrapped: # putting methods back the way we found them torch.nn.modules.module.Module.__init_subclass__ = torch.nn.modules.module.Module._old_init_subclass torch.nn.modules.module.Module.apply = torch.nn.modules.module.Module._old_apply torch.Tensor.__new__ = torch.Tensor.__old_new__ torch.empty = _orig_torch_empty torch.zeros = _orig_torch_zeros torch.ones = _orig_torch_ones torch.full = _orig_torch_full # un doing it here will undo it during training # if self.mem_efficient_linear: # torch.nn.functional.linear = self.linear_bk # if self.mem_efficient_linear: # torch.nn.functional.linear = self.linear_bk self.torch_func_wrapped = False global all_wrapped_classes for subclass in get_all_subclasses(torch.nn.modules.module.Module): if subclass not in all_wrapped_classes: msg = f"`{subclass}' was not properly set up for sharding by zero.Init(). A subclass of torch.nn.Module must be defined before zero.Init() where an instance of the class is created." raise RuntimeError(msg) all_wrapped_classes.clear() def shutdown_init_context(): """ This function is used to initialize deepspeed engine inside the context of Init. We need to remove the wrappers but keep the list of contexts. """ global zero_init_context for ctx in zero_init_context: ctx.remove_wrappers() class AllGatherHandle: def __init__(self, handle, param: Parameter) -> None: if param.ds_status != ZeroParamStatus.INFLIGHT: raise RuntimeError(f"expected param {param.ds_summary()} to be available") self.handle = handle self.param = param def wait(self) -> None: instrument_w_nvtx(self.handle.wait)() self.param.ds_status = ZeroParamStatus.AVAILABLE class AllGatherCoalescedHandle: def __init__( self, allgather_handle, params: List[Parameter], partitions: List[Tensor], world_size: int, ) -> None: # renaming the fields without double underscore to ease # the class inheritance self.allgather_handle = allgather_handle self.params = params self.partitions = partitions self.world_size = world_size self.complete = False for param in self.params: if param.ds_status != ZeroParamStatus.INFLIGHT: raise RuntimeError(f"expected param {param.ds_summary()} to not be available") @instrument_w_nvtx def wait(self) -> None: if self.complete: return instrument_w_nvtx(self.allgather_handle.wait)() # split the single tensor out into individual tensors param_offset = 0 for param in self.params: assert param.ds_status == ZeroParamStatus.INFLIGHT, f"expected param {param.ds_summary()} to be inflight" partitions: List[Tensor] = [] for rank in range(self.world_size): param_start = rank * param.ds_tensor.ds_numel if param_start < param.ds_numel: part_to_copy = self.partitions[rank].narrow( 0, param_offset, min(param.ds_numel - param_start, param.ds_tensor.ds_numel)) partitions.append(part_to_copy) param.data = instrument_w_nvtx(torch.cat)(partitions).view(param.ds_shape) param.ds_status = ZeroParamStatus.AVAILABLE for part_to_copy in partitions: part_to_copy.record_stream(get_accelerator().current_stream()) param_offset += param.ds_tensor.ds_numel self.complete = True def _no_gather_coalesced(params: Iterable[Parameter]) -> AllGatherCoalescedHandle: for param in params: if param.ds_status != ZeroParamStatus.NOT_AVAILABLE: raise RuntimeError(param.ds_summary()) param.ds_status = ZeroParamStatus.INFLIGHT params = sorted(params, key=lambda p: p.ds_id) if len(params) == 1: param, = params return NoGatherHandle(param) return NoGatherCoalescedHandle(params) # Replaces all parameters in module with Scattered Parameters class Init(InsertPostInitMethodToModuleSubClasses): param_id = 0 param_persistence_threshold = get_config_default(DeepSpeedZeroConfig, "param_persistence_threshold") model_persistence_threshold = get_config_default(DeepSpeedZeroConfig, "model_persistence_threshold") num_persisted_parameters = 0 num_persisted_elements = 0 apply_param_persistence = False def __init__(self, module=None, data_parallel_group=None, mem_efficient_linear=True, remote_device=None, pin_memory=False, config_dict_or_path=None, config=None, enabled=True, dtype=None, mpu=None): """A context to enable massive model construction for training with ZeRO-3. Models are automatically partitioned (or, sharded) across the system and converted to half precision. Args: module (``torch.nn.Module``, optional): If provided, partition the model as if it was constructed in the context. data_parallel_group (``deepspeed.comm`` process group, optional): The group of processes to partition among. Defaults to all processes. mem_efficient_linear (bool, optional): Replace torch.nn.functional.linear with an implementation that allows DeepSpeed to partition parameters. Defaults to ``True``. remote_device (string, optional): The initial device to store model weights e.g., ``cpu``, ``nvme``. Passing ``"cpu"`` will create the model in CPU memory. The model may still be moved to GPU based on the offload settings for training. Defaults to param offload device if a config is defined, otherwise GPU. pin_memory (bool, optional): Potentially increase performance by using pinned memory for model weights. ``remote_device`` must be ``"cpu"``. Defaults to pin_memory value in config, otherwise ``False``. config_dict_or_path (dict or ``json file``, optional): If provided, provides configuration for swapping fp16 params to NVMe. config (dict or ``json file``, optional): Deprecated, use config_dict_or_path instead. enabled (bool, optional): If ``False``, this context has no effect. Defaults to ``True``. dtype (``dtype``, optional): Can be used to change the data type of the parameters. Supported options are ``torch.half`` and ``torch.float``. Defaults to ``None`` mpu (``object``, optional): A model parallelism unit object that implements get_{model,data}_parallel_{rank,group,world_size}. This context accelerates model initialization and enables models that are too large to allocate in their entirety in CPU memory. It has the following effects: #. allocates tensors to either GPU or CPU memory or NVMe #. converts floating point tensors to half precision #. immediately partitions tensors among the group of data-parallel devices #. (*optional*) replaces ``torch.nn.functional.linear`` with a more memory-efficient implementation These modifications allow for models that exceed the size of local CPU/GPU memory/NVMe, but fit within the total NVMe capacity (*i.e.*, aggregate CPU or GPU memory or NVMe) across all nodes. Consider initializing a model with one trillion parameters, whose weights occupy two terabytes (TB) in half precision. The initial CPU allocation in full precision requires 4TB of memory *per process*, and so a system with 8 GPUs per node would need 32TB of CPU memory due to data-parallel redundancies. Instead, by immediately partitioning tensors we remove the redundancies. The result is that regardless of the number of GPUs, we still only require the original 4TB. This allows for a linear increase in model size with the aggregate system memory. For example, if a node has 1TB of memory and 8 GPUs, we could fit a trillion parameter model with 4 nodes and 32 GPUs. Important: If the fp16 weights of the model can't fit onto a single GPU memory this feature must be used. .. note:: Initializes ``deepspeed.comm`` if it has not already been done so. See :meth:`deepspeed.init_distributed` for more information. .. note:: Can also be used as a decorator: .. code-block:: python @deepspeed.zero.Init() def get_model(): return MyLargeModel() .. note:: Only applicable to training with ZeRO-3. Examples -------- #. Allocate a model and partition it among all processes: .. code-block:: python with deepspeed.zero.Init(): model = MyLargeModel() #. Allocate a model in pinned CPU memory and partition it among a subgroup of processes: .. code-block:: python with deepspeed.zero.Init(data_parallel_group=mpu.get_data_parallel_group(), remote_device="cpu", pin_memory=True): model = MyLargeModel() #. Partition an already-allocated model in CPU memory: .. code-block:: python model = deepspeed.zero.Init(module=model) """ if config is not None: config_dict_or_path = config logger.warning( f'zero.Init: the `config` argument is deprecated. Please use `config_dict_or_path` instead.') _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path, mpu) if config_dict_or_path is not None else None if _ds_config is not None: mem_efficient_linear = _ds_config.zero_config.memory_efficient_linear super().__init__(enabled=enabled, mem_efficient_linear=mem_efficient_linear, ds_config=_ds_config, dtype=dtype) if not dist.is_initialized(): init_distributed() assert dist.is_initialized(), "Parameters cannot be scattered without initializing deepspeed.comm" if data_parallel_group is None: self.ds_process_group = dist.get_world_group() else: self.ds_process_group = data_parallel_group self.rank = dist.get_rank(group=self.ds_process_group) self.dp_world_size = dist.get_world_size(group=self.ds_process_group) # Local device is the device where the parameters are consumed, must be default device. # It is the device where parameters are fully instantiated using allgather self.local_device = torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"])) get_accelerator().set_device(self.local_device) if _ds_config is not None: self._update_persist_config(_ds_config) if _ds_config.zero_config.offload_param is not None: remote_device = _ds_config.zero_config.offload_param.device pin_memory = _ds_config.zero_config.offload_param.pin_memory self._validate_remote_device(remote_device, _ds_config) # Remote device is the device where parameter partitions are stored # It can be same as local_device or it could be CPU or NVMe. self.remote_device = self.local_device if remote_device in [None, OffloadDeviceEnum.none] else remote_device self.pin_memory = pin_memory if (self.remote_device in [OffloadDeviceEnum.cpu, OffloadDeviceEnum.nvme ]) else False # Enable fp16 param swapping to NVMe if self.remote_device == OffloadDeviceEnum.nvme: self.param_swapper = AsyncPartitionedParameterSwapper(_ds_config, self.dtype) else: self.param_swapper = None # If we are provided an already-allocated module to prepare. if module is not None: assert isinstance(module, torch.nn.Module) self._convert_to_zero_parameters(module.parameters(recurse=True)) self.use_all_gather_into_tensor = dist.has_all_gather_into_tensor() if not self.use_all_gather_into_tensor: logger.info(f"all_gather_into_tensor API is not available in torch {torch.__version__}") def _update_persist_config(self, ds_config): Init.apply_param_persistence = True Init.param_persistence_threshold = ds_config.zero_config.param_persistence_threshold Init.model_persistence_threshold = ds_config.zero_config.model_persistence_threshold // self.num_partitions def _convert_to_zero_parameters(self, param_list): for param in param_list: if is_zero_param(param): continue self._convert_to_deepspeed_param(param) param.partition() def _validate_remote_device(self, remote_device, ds_config): if ds_config is not None: if remote_device in [None, OffloadDeviceEnum.cpu]: if ds_config.zero_config.offload_param is not None: offload_param_device = ds_config.zero_config.offload_param.device assert offload_param_device != OffloadDeviceEnum.nvme, \ f"'device' in DeepSpeed Config cannot be {offload_param_device} if remote device is {remote_device}." if remote_device == OffloadDeviceEnum.nvme: assert ds_config.zero_config.offload_param is not None, \ f'"offload_param" must be defined in DeepSpeed Config if remote device is {OffloadDeviceEnum.nvme}.' assert ds_config.zero_config.offload_param.nvme_path is not None, \ f'"nvme_path" in DeepSpeed Config cannot be None if remote device is {OffloadDeviceEnum.nvme}' def _post_init_method(self, module): #see_memory_usage(f"Before converting parmas in {module.__class__.__name__}", force=False) print_rank_0(f'Converting Params in {module.__class__.__name__}', force=False) see_memory_usage(f"Before converting and partitioning parmas in {module.__class__.__name__}", force=False) global param_count for name, param in module.named_parameters(recurse=False): param_count += param.numel() if not is_zero_param(param): self._convert_to_deepspeed_param(param) print_rank_0( f"Partitioning param {debug_param2name_id_shape(param)} module={debug_module2name(module)}") if get_accelerator().on_accelerator(param): if dist.get_world_group() == self.get_dp_process_group(): dist.broadcast(param, 0, self.get_dp_process_group()) else: dist.broadcast(param, dist.get_global_rank(self.get_dp_process_group(), 0), self.get_dp_process_group()) else: if dist.get_rank() == 0: logger.warn(f"param `{name}` in {module.__class__.__name__} " f"not on GPU so was not broadcasted from rank 0") param.partition() see_memory_usage( f"Param count {param_count}. After converting and partitioning parmas in {module.__class__.__name__}", force=False) def _convert_to_deepspeed_param(self, param): # Partitioned, Normal, Remote param.ds_param_type = ZeroParamType.PARTITIONED # Replicated vs Partitioned vs Inflight param.ds_status = ZeroParamStatus.AVAILABLE # Stores the shape of the original tensor param.ds_shape = param.shape # Stores the number of elements in the original parameter without padding param.ds_numel = param.numel() # Stores the partitioned copy of the tensor param.ds_tensor = None # Keeps track of how many active sub-modules need this param at any given point in time param.ds_active_sub_modules = set() # If this flag is true, then the parameters are replicated throughput training # And only partitioned before the step if Init.apply_param_persistence and param.ds_numel <= Init.param_persistence_threshold and Init.num_persisted_elements + param.ds_numel <= Init.model_persistence_threshold: param.ds_persist = True Init.num_persisted_parameters += 1 Init.num_persisted_elements += param.ds_numel else: param.ds_persist = False param.is_external_param = False # The group that the parameter is scattered across. param.ds_process_group = self.ds_process_group # This is set to the Async Param swapper if remote device is nvme # else this is set to None param.nvme_swapper = self.param_swapper # DeepSpeed Param ID param.ds_id = Init.param_id Init.param_id += 1 def all_gather(param_list=None, async_op=False, hierarchy=0): cls = param if param_list is None: param_list = [cls] return self._all_gather(param_list, async_op=async_op, hierarchy=hierarchy) @instrument_w_nvtx def all_gather_coalesced(params: Iterable[Parameter], safe_mode: bool = False) -> AllGatherCoalescedHandle: # fetches from nvme if the partition is not available and in nvme self._ensure_availability_of_partitioned_params(params) if self.num_partitions == 1: return _no_gather_coalesced(params) for param in params: if param.ds_status != ZeroParamStatus.NOT_AVAILABLE: raise RuntimeError(param.ds_summary()) param.ds_status = ZeroParamStatus.INFLIGHT # ensure that each rank has params in same order. the allgather # is done by flattening the parameter list into a single tensor that # can be allgathered in a single call - this means that if each rank # gives a list of the same parameters in a different order we will # silently get incorrect parameter values, and have very difficult # to debug correctness issues. params = sorted(params, key=lambda p: p.ds_id) debug_rank0(f"-allgather_coalesced: {[p.ds_id for p in params]}") if safe_mode: # ensure that same list (with same ordering) of parameters are # being allgathered across all ranks, otherwise could mix # data between tensors. assert_ints_same_as_other_ranks([p.ds_id for p in params]) # ensure that tensors from each rank agree on the same ds_numel # otherwise could mix data between tensors. assert_ints_same_as_other_ranks([p.ds_tensor.ds_numel for p in params]) if len(params) == 1: # have an opportunity to avoid some intermediate memory allocations param, = params param_buffer = torch.empty( math.ceil(param.ds_numel / self.num_partitions) * self.num_partitions, dtype=param.dtype, device=get_accelerator().current_device_name(), requires_grad=False, ) handle = _dist_allgather_fn(param.ds_tensor.to(get_accelerator().current_device_name()), param_buffer, self.get_partition_dp_group(param)) param.data = param_buffer.narrow(0, 0, param.ds_numel).view(param.ds_shape).to(param.device) return AllGatherHandle(handle, param) else: partition_sz = sum(p.ds_tensor.ds_numel for p in params) flat_tensor = torch.empty(partition_sz * self.num_partitions, dtype=get_only_unique_item(p.dtype for p in params), device=get_accelerator().current_device_name(), requires_grad=False) partitions: List[Parameter] = [] for i in range(self.num_partitions): partitions.append(flat_tensor.narrow(0, partition_sz * i, partition_sz)) instrument_w_nvtx(torch.cat)([p.ds_tensor.to(get_accelerator().current_device_name()) for p in params], out=partitions[self.get_partition_rank()]) handle = _dist_allgather_fn(partitions[self.get_partition_rank()], flat_tensor, self.get_partition_dp_group(params[0])) return AllGatherCoalescedHandle( allgather_handle=handle, params=params, partitions=partitions, world_size=self.num_partitions, ) def partition(param_list=None, hierarchy=0, has_been_updated=False): cls = param print_rank_0(f"{'--'*hierarchy}----Partitioning param {debug_param2name_id_shape_device(cls)}") if param_list is None: param_list = [cls] self._partition(param_list, has_been_updated=has_been_updated) def reduce_gradients_at_owner(param_list=None, hierarchy=0): cls = param if param_list is None: param_list = [cls] print_rank_0( f"{'--'*hierarchy}----Reducing Gradients for param with ids {[param.ds_id for param in param_list]} to owner" ) self._reduce_scatter_gradients(param_list) def partition_gradients(param_list=None, partition_buffers=None, hierarchy=0, accumulate=False): cls = param print_rank_0( f"{'--'*hierarchy}----Partitioning param gradient with id {debug_param2name_id_shape_device(cls)}") if param_list is None: param_list = [cls] if isinstance(partition_buffers, torch.Tensor): partition_buffers = [partition_buffers] self._partition_gradients(param_list, partition_buffers=partition_buffers, accumulate=accumulate) def aligned_size(): return self._aligned_size(param) def padding_size(): return self._padding_size(param) def partition_numel(): return self._partition_numel(param) def item_override(): param.all_gather() return param._orig_item() def ds_summary(slf: torch.Tensor, use_debug_name: bool = False) -> dict: return { "id": debug_param2name_id(slf) if use_debug_name else slf.ds_id, "status": slf.ds_status.name, "numel": slf.numel(), "ds_numel": slf.ds_numel, "shape": tuple(slf.shape), "ds_shape": tuple(slf.ds_shape), "requires_grad": slf.requires_grad, "grad_shape": tuple(slf.grad.shape) if slf.grad is not None else None, "persist": slf.ds_persist, "active_sub_modules": slf.ds_active_sub_modules, "ds_tensor.shape": slf.ds_tensor.shape if slf.ds_tensor is not None else None } def convert_to_zero_parameters(param_list): self._convert_to_zero_parameters(param_list) def allgather_before(func: Callable) -> Callable: def wrapped(*args, **kwargs): param.all_gather() return func(*args, **kwargs) return wrapped # Collectives for gathering and partitioning parameters param.all_gather = all_gather param.all_gather_coalesced = all_gather_coalesced param.partition = partition # Collective for averaging gradients param.reduce_gradients_at_owner = reduce_gradients_at_owner param.partition_gradients = partition_gradients # Partitioning size utilities param.aligned_size = aligned_size param.padding_size = padding_size param.partition_numel = partition_numel param.ds_summary = types.MethodType(ds_summary, param) param.item = allgather_before(param.item) param.convert_to_zero_parameters = convert_to_zero_parameters def _aligned_size(self, param): return param.ds_numel + self._padding_size(param) def _padding_size(self, param): remainder = param.ds_numel % self.num_partitions return (self.num_partitions - remainder) if remainder else 0 def _partition_numel(self, param): return param.ds_tensor.ds_numel def _ensure_availability_of_partitioned_params(self, params): swap_in_list = [] swap_in_flight = [] for param in params: if param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE: assert param.ds_tensor.final_location == OffloadDeviceEnum.nvme and param.ds_status == ZeroParamStatus.NOT_AVAILABLE swap_in_list.append(param) if param.ds_tensor.status == PartitionedParamStatus.INFLIGHT: assert param.ds_tensor.final_location == OffloadDeviceEnum.nvme and param.ds_status == ZeroParamStatus.NOT_AVAILABLE swap_in_flight.append(param) if len(swap_in_list) > 0: swap_in_list[0].nvme_swapper.swap_in(swap_in_list, async_op=False) elif len(swap_in_flight) > 0: swap_in_flight[0].nvme_swapper.synchronize_reads() @instrument_w_nvtx def _all_gather(self, param_list, async_op=False, hierarchy=None): # fetches from nvme if the partition is not available and in nvme self._ensure_availability_of_partitioned_params(param_list) handles = [] all_gather_list = [] for param in param_list: if param.ds_status == ZeroParamStatus.NOT_AVAILABLE: if async_op: handle = self._allgather_param(param, async_op=async_op, hierarchy=hierarchy) param.ds_status = ZeroParamStatus.INFLIGHT # if async_op else ZeroParamStatus.AVAILABLE handles.append(handle) else: all_gather_list.append(param) if not async_op: if len(param_list) == 1: ret_value = self._allgather_params(all_gather_list, hierarchy=hierarchy) else: ret_value = self._allgather_params_coalesced(all_gather_list, hierarchy) for param in all_gather_list: param.ds_status = ZeroParamStatus.AVAILABLE return ret_value return handles def _partition(self, param_list, force=False, has_been_updated=False): for param in param_list: #print_rank_0(f"Before Partitioning Param {param.ds_id}") # self._param_status(param) self._partition_param(param, has_been_updated=has_been_updated) param.ds_status = ZeroParamStatus.NOT_AVAILABLE # if param.ds_tensor is not None: # assert id(param.data) == id(param.ds_tensor.data), \ # "After the parameters are initially partitioned, make sure we are not recreating the partition." #print_rank_0(f"After Partitioning Param {param.ds_id}") # self._param_status(param) @instrument_w_nvtx def _partition_param(self, param, buffer=None, has_been_updated=False): assert param.ds_status is not ZeroParamStatus.INFLIGHT, f" {param} Cannot partition a param in flight" global reuse_buffers #print_rank_0(f"Param id {param.ds_id} status is {param.ds_status}") if param.ds_status is ZeroParamStatus.AVAILABLE: print_rank_0(f"Partitioning param id {param.ds_id} reuse buffers {reuse_buffers}", force=False) # if reuse_buffers and False: # numel = buffer.numel() # buffer = param.data.view(-1) # print_rank_0( # "Returning buffer for param {param.ds_id} with numel {param.ds_numel} to empty buffers", # force=False) # if numel in empty_buffers: # empty_buffers[numel].append(buffer) # if deepspeed.comm.get_rank(): # print(f"Releasing {param.data.numel()}") if param.ds_tensor is not None and not has_been_updated: #param.data = param.ds_tensor.data see_memory_usage(f'Before partitioning param {param.ds_id} {param.shape}', force=False) # param.data does not store anything meaningful in partitioned state free_param(param) see_memory_usage(f'After partitioning param {param.ds_id} {param.shape}', force=False) if param.ds_tensor.final_location == OffloadDeviceEnum.nvme: print_rank_0(f"Param {param.ds_id} partition released since it exists in nvme", force=False) param.nvme_swapper.remove_partition_and_release_buffers([param]) return tensor_size = self._aligned_size(param) partition_size = tensor_size // self.num_partitions if param.ds_tensor is None: final_location = None if self.remote_device == OffloadDeviceEnum.nvme and self.param_swapper.swappable_tensor( numel=partition_size): final_location = OffloadDeviceEnum.nvme buffer = self.param_swapper.get_buffer(param, partition_size) partitioned_tensor = torch.empty(0, dtype=param.dtype, device=buffer.device) partitioned_tensor.data = buffer.data print_rank_0(f"ID {param.ds_id} Initializing partition for the first time for nvme offload.") else: if param.ds_persist: device = self.local_device elif self.remote_device == OffloadDeviceEnum.nvme: device = OffloadDeviceEnum.cpu else: device = self.remote_device partitioned_tensor = torch.empty(partition_size, dtype=param.dtype, device=device) if device == OffloadDeviceEnum.cpu and self.pin_memory: partitioned_tensor = get_accelerator().pin_memory(partitioned_tensor) partitioned_tensor.requires_grad = False param.ds_tensor = partitioned_tensor param.ds_tensor.ds_numel = partition_size param.ds_tensor.status = PartitionedParamStatus.AVAILABLE param.ds_tensor.final_location = final_location start = partition_size * self.get_partition_rank() end = start + partition_size one_dim_param = param.contiguous().view(-1) if start < param.ds_numel and end <= param.ds_numel: src_tensor = one_dim_param.narrow(0, start, partition_size) param.ds_tensor.copy_(src_tensor) #partitioned_tensor = src_tensor.clone().detach().to(self.remote_device) else: # partitioned_tensor = torch.zeros(partition_size, # dtype=param.dtype, # device=self.remote_device ) if start < param.ds_numel: elements_to_copy = param.ds_numel - start param.ds_tensor.narrow(0, 0, elements_to_copy).copy_(one_dim_param.narrow(0, start, elements_to_copy)) #print(f"Remote device {self.remote_device}") #param.ds_tensor = partitioned_tensor #param.data = param.ds_tensor.data # param.data does not store anything meaningful in partitioned state see_memory_usage(f'Before partitioning param {param.ds_id} {param.shape}', force=False) free_param(param) see_memory_usage(f'After partitioning param {param.ds_id} {param.shape}', force=False) if param.ds_tensor.final_location == OffloadDeviceEnum.nvme: self.param_swapper.swap_out_and_release([param]) print_rank_0(f"ID {param.ds_id} Offloaded to nvme offload and buffers released.") see_memory_usage(f"ID {param.ds_id} Offloaded to nvme offload and buffers released.", force=False) print_rank_0(f"ID {param.ds_id} partitioned type {param.dtype} dev {param.device} shape {param.shape}") def _param_status(self, param): if param.ds_tensor is not None: print_rank_0( f"Param id {param.ds_id}, param status: {param.ds_status}, param numel {param.ds_numel}, partitioned numel {param.ds_tensor.numel()}, data numel {param.data.numel()}" ) else: print_rank_0( f"Param id {param.ds_id}, param status: {param.ds_status}, param numel {param.ds_numel}, partitioned ds_tensor {param.ds_tensor}, data numel {param.data.numel()}" ) def _allgather_param(self, param, async_op=False, hierarchy=0): partition_size = param.ds_tensor.ds_numel tensor_size = partition_size * self.num_partitions aligned_param_size = self._aligned_size(param) assert tensor_size == aligned_param_size, f'param id {param.ds_id} aligned size {aligned_param_size} does not match tensor size {tensor_size}' print_rank_0( f"{'--'* hierarchy}---- Before allocating allgather param {debug_param2name_id_shape_status(param)} partition size={partition_size}" ) see_memory_usage( f'Before allocate allgather param {debug_param2name_id_shape_status(param)} partition_size={partition_size} ', force=False) flat_tensor = torch.zeros(aligned_param_size, dtype=param.dtype, device=param.device).view(-1) see_memory_usage( f'After allocate allgather param {debug_param2name_id_shape_status(param)} {aligned_param_size} {partition_size} ', force=False) get_accelerator().synchronize() print_rank_0( f"{'--'* hierarchy}----allgather param with {debug_param2name_id_shape_status(param)} partition size={partition_size}" ) # if not flat_tensor.numel() > 100000: # replicated_tensor = flat_tensor.narrow(0, # 0, # param.ds_numel).view(param.ds_shape) # param.data = replicated_tensor.data # return None if self.use_all_gather_into_tensor: handle = dist.all_gather_into_tensor(flat_tensor, param.ds_tensor.to(get_accelerator().device_name()), group=self.get_partition_dp_group(param), async_op=async_op) else: partitions = [] for i in range(self.num_partitions): partitions.append(flat_tensor.narrow(0, partition_size * i, partition_size)) if i == dist.get_rank(group=self.get_partition_dp_group(param)): partitions[i].data.copy_(param.ds_tensor.data, non_blocking=True) handle = dist.all_gather(partitions, partitions[self.get_partition_rank()], group=self.get_partition_dp_group(param), async_op=async_op) replicated_tensor = flat_tensor.narrow(0, 0, param.ds_numel).view(param.ds_shape) param.data = replicated_tensor.data return handle def _allgather_params_coalesced(self, param_list, hierarchy=0): """ blocking call avoid explicit memory copy in _allgather_params """ if len(param_list) == 0: return if self.num_partitions == 1: handle = _no_gather_coalesced(param_list) handle.wait() return None # collect local tensors and partition sizes partition_sizes = [] local_tensors = [] for param in param_list: partition_sizes.append(param.ds_tensor.ds_numel) local_tensors.append(param.ds_tensor.to(get_accelerator().device_name())) # allocate memory for allgather params allgather_params = [] for psize in partition_sizes: tensor_size = psize * self.num_partitions flat_tensor = torch.empty(tensor_size, dtype=param_list[0].dtype, device=self.local_device).view(-1) flat_tensor.requires_grad = False allgather_params.append(flat_tensor) # launch launch_handles = [] for param_idx, param in enumerate(param_list): input_tensor = local_tensors[param_idx].view(-1) if self.use_all_gather_into_tensor: # try the _all_gather_base from Pytorch master h = dist.all_gather_into_tensor(allgather_params[param_idx], input_tensor, group=self.get_partition_dp_group(param), async_op=True) else: output_list = [] for i in range(self.num_partitions): psize = partition_sizes[param_idx] partition = allgather_params[param_idx].narrow(0, i * psize, psize) output_list.append(partition) if not get_accelerator().on_accelerator(partition): logger.warning( f'param {param_idx}, partition {i} is not on CUDA, partition shape {partition.size()}') # back to old all_gather function h = dist.all_gather(output_list, input_tensor, group=self.get_partition_dp_group(param), async_op=True) launch_handles.append(h) # Wait ensures the operation is enqueued, but not necessarily complete. launch_handles[-1].wait() # assign to param.data (not copy) for i, param in enumerate(param_list): gathered_tensor = allgather_params[i] param.data = gathered_tensor.narrow(0, 0, param.ds_numel).view(param.ds_shape).data # guarantee the communication to be completed get_accelerator().synchronize() return None def _allgather_params(self, param_list, hierarchy=0): if len(param_list) == 0: return partition_size = sum([param.ds_tensor.ds_numel for param in param_list]) tensor_size = partition_size * self.num_partitions flat_tensor = torch.empty(tensor_size, dtype=param_list[0].dtype, device=self.local_device) flat_tensor.requires_grad = False partitions = [] for i in range(self.num_partitions): start = partition_size * i partitions.append(flat_tensor.narrow(0, start, partition_size)) if i == self.get_partition_rank(): offset = 0 for param in param_list: param_numel = param.ds_tensor.ds_numel partitions[i].narrow(0, offset, param_numel).copy_(param.ds_tensor.data) offset += param_numel dist.all_gather(partitions, partitions[self.get_partition_rank()], group=self.get_partition_dp_group(param), async_op=False) param_offset = 0 for param in param_list: param_partition_size = param.ds_tensor.ds_numel param_size = param.ds_numel replicated_tensor = torch.empty(param.ds_shape, dtype=param.dtype, device=self.local_device) for i in range(self.num_partitions): start = i * partition_size param_start = i * param_partition_size if param_start < param_size: numel_to_copy = min(param_size - param_start, param_partition_size) part_to_copy = partitions[i].narrow(0, param_offset, numel_to_copy) replicated_tensor.view(-1).narrow(0, param_start, numel_to_copy).copy_(part_to_copy) #param_offset += param.data.numel() param_offset += param.ds_tensor.ds_numel param.data = replicated_tensor.data return None def _reduce_scatter_gradients(self, param_list): #print_rank_0([param.grad for param in param_list]) #assert any([param.grad is None for param in param_list]), "None gradients cannot be reduce scattered" handles_and_reduced_partitions = [] for param in param_list: assert param.grad.numel( ) == param.ds_numel, f"{param.grad.numel()} != {param.ds_numel} Cannot reduce scatter gradients whose size is not same as the params" handles_and_reduced_partitions.append(self._reduce_scatter_gradient(param)) for param, (handle, reduced_partition) in zip(param_list, handles_and_reduced_partitions): if handle is not None: handle.wait() # some ranks may have partitions that are padded to go beyond the grad size. # For these ranks the output of reduce scatter is a separate buffer and needs # to be copied in partition_size = param.ds_tensor.ds_numel start = self.get_partition_rank() * partition_size end = start + partition_size #print_rank_0("REduce scatter was executed for praam {param.ds_id}") if start < param.ds_numel and end > param.ds_numel: elements = param.ds_numel - start param.grad.view(-1).narrow(0, start, elements).copy_(reduced_partition.narrow(0, 0, elements)) def _reduce_scatter_gradient(self, param): partition_size = param.ds_tensor.ds_numel #output = torch.empty(partition_size, dtype=param.dtype, device=param.device) total_size = partition_size * self.num_partitions input_list = [] for i in range(self.num_partitions): start = i * partition_size end = start + partition_size #print("before reduce scatter gradients") if start < param.ds_numel and end <= param.ds_numel: input = param.grad.view(-1).narrow(0, start, partition_size) else: input = torch.zeros(partition_size, dtype=param.dtype, device=param.device) if start < param.ds_numel: elements = param.ds_numel - start input.narrow(0, 0, elements).copy_(param.grad.view(-1).narrow(0, start, elements)) #print("after reduce scatter gradients") input_list.append(input) rank = dist.get_rank(group=self.get_partition_dp_group(param)) handle = dist.reduce_scatter(input_list[rank], input_list, group=self.get_partition_dp_group(param), async_op=True) return handle, input_list[rank] def _partition_gradients(self, param_list, partition_buffers=None, accumulate=False): if partition_buffers is None: partition_buffers = [None] * len(param_list) for param, partition_buffer in zip(param_list, partition_buffers): self._partition_gradient(param, partition_buffer=partition_buffer, accumulate=accumulate) def _partition_gradient(self, param, partition_buffer=None, accumulate=False): #import pdb;pdb.set_trace() # param.grad=None # param.grad.test() print_rank_0( f"Partitioning param {param.ds_id} gradient of size {param.grad.numel()} type {param.grad.dtype} part_size {param.ds_tensor.ds_numel}" ) see_memory_usage("Before partitioning gradients", force=False) partition_size = param.ds_tensor.ds_numel if partition_buffer is None: assert not accumulate, "No buffer to accumulate to" partition_buffer = torch.zeros(partition_size, dtype=param.dtype, device=param.device) else: assert partition_buffer.numel( ) >= partition_size, f"The partition buffer size {partition_buffer.numel()} should match the size of param.ds_tensor {partition_size}" rank = dist.get_rank(group=self.get_partition_dp_group(param)) start = partition_size * rank end = start + partition_size dest_tensor_full_buffer = partition_buffer.view(-1).narrow(0, 0, partition_size) #print("before partition gradients") if start < param.ds_numel: elements = min(param.ds_numel - start, partition_size) dest_tensor = dest_tensor_full_buffer.narrow(0, 0, elements) src_tensor = param.grad.view(-1).narrow(0, start, elements) # just copy the grad partition to the buffer if not accumulate: dest_tensor.copy_(src_tensor) # if source and destination are on same device, # add to the provided buffer elif src_tensor.device == dest_tensor.device: dest_tensor.add_(src_tensor) # if source and destination are on different device, copy first to src # then add and move back to the destination. This seems to run faster # when src is gpu and dest is cpu # adding directly to cpu is very slow else: acc_tensor = torch.empty(src_tensor.numel(), dtype=param.dtype, device=param.device) acc_tensor.copy_(dest_tensor) acc_tensor.add_(src_tensor) dest_tensor.copy_(acc_tensor) # partition_buffer.view(-1).narrow( # 0, # 0, # elements).copy_(param.grad.view(-1).narrow(0, # start, # elements)) #print("after partition gradients") param.grad.data = dest_tensor_full_buffer.data see_memory_usage("After partitioning gradients", force=False) def get_partition_dp_group(self, param): return param.ds_process_group def get_partition_rank(self): """subclass can overload to specify different relative rank in parameter partition group""" return self.rank @property def num_partitions(self): return self.dp_world_size def get_dp_process_group(self): """ Return the communication group with all data-parallel ranks """ return self.ds_process_group class GatheredParameters: def __init__(self, params, modifier_rank=None, fwd_module=None, enabled=True): """A context that collects parameters that were partitioned via a :class:`deepspeed.zero.Init` context. The parameters are partitioned again upon exit. Args: params (``torch.nn.Parameter``): A single parameter, or an iterable of parameters (list, tuple, generator) of parameters to collect. It's assumed that all parameters are zero params. modifier_rank (int, optional): If specified, this rank's parameter will be broadcasted on exit from the context. This argument is required if ``params`` are modified, so that all processes have a consistent view of the data. Defaults to ``None``. fwd_module (``torch.nn.Module``, optional): If specified, ``params`` will be registered as external parameters of ``fwd_module``. See :meth:`deepspeed.zero.register_external_parameter`. enabled (bool, optional): If ``False``, this context is a no-op. Defaults to ``True``. Important: Make sure to use ``modifier_rank`` that is not ``None`` (e.g., ``modifier_rank=0``) if you need the GPU memory allocated by gather to be released upon exit from the context manager. Important: if ``params`` isn't an iterable of parameters or a single parameter it'll be silently ignored! Examples ======== #. Allocate a partitioned module, initialize its weight on rank 0, and update all processes. .. code-block:: python with deepspeed.zero.Init(): linear = torch.nn.Linear(1000,1000) with deepspeed.zero.GatheredParameters(linear.weight, modifier_rank=0): if deepspeed.comm.get_rank() == 0: linear.weight.zero_() with deepspeed.zero.GatheredParameters(linear.weight, modifier_rank=0): if deepspeed.comm.get_rank() == 0: linear.weight.zero_() #. Collect a partitioned weight to pass to another module during training. The parameter will be registered as an external parameter and made available during the backward pass. .. code-block:: python :emphasize-lines: 6 def forward(self, input): x = self.layer1(input) # self.layer1.weight is required by self.layer2.forward with deepspeed.zero.GatheredParameters(self.layer1.weight, fwd_module=self): y = self.layer2(x, self.layer1.weight) return y #. Pretrained model loading .. code-block:: python with deepspeed.zero.Init(): model = MyModel() state_dict = torch.load(model_path, map_location="cpu") def load(module: nn.Module, prefix=""): # because zero3 puts placeholders in model params, this context # manager gathers (unpartitions) the params of the current layer, then loads from # the state dict and then re-partitions them again with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0): if deepspeed.comm.get_rank() == 0: module._load_from_state_dict(state_dict, prefix) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + ".") load(model, prefix="") If this approach is not used, then the full model will first be copied to each GPU. For models bigger than the memory of a single GPU, this method is required. """ self.enabled = enabled if not enabled: return if isinstance(params, Iterable) and not isinstance(params, torch.Tensor): # deal with generators like model.parameters() # must convert to list to be able to iterate more than once if we get a generator params = list(params) else: # single param params = [params] # enable if at least one is zero-param, otherwise a noop if not any(is_zero_param(p) for p in params): self.enabled = False return self.params = [p for p in params if hasattr(p, "ds_id")] self.src_rank = None if modifier_rank is not None: if self.params[0].ds_process_group == dist.get_world_group(): self.src_rank = modifier_rank else: # A group was specified; convert DP rank to global rank self.src_rank = dist.get_global_rank(self.params[0].ds_process_group, modifier_rank) self.fwd_module = fwd_module if self.fwd_module is not None: # is a no-op if already registered for p in self.params: register_external_parameter(self.fwd_module, p) def __enter__(self): if not self.enabled: return self.params[0].all_gather(param_list=self.params) def __exit__(self, *exc): if not self.enabled: return if self.src_rank is None: self.params[0].partition(param_list=self.params, has_been_updated=False) return handles = [dist.broadcast(p, self.src_rank, group=p.ds_process_group, async_op=True) for p in self.params] for h in handles: h.wait() self.params[0].partition(param_list=self.params, has_been_updated=True)
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team #Linear Module to use with ZeRO Stage 3 to allow for parameter memory release #after the module execution during forward #Instead of saving variables using save_for_backward, we save variable ids #Allowing us to retrieve the variable without creating pointer to it #Which allows for underlying tensor to be garbage collected #When partitioned as needed by the Zero Stage 3 optimizer #TODO instead of patching Linear module, we could patch the ctx.save_for_backward #ctx.saved_tensors so that this approach works for all nn modules that are built upon #torch.nn.function. However the issue is that many modules uses C++ implementations #which does not have pytorch implementation. Eg torch.addmm which acts as a functional #when implemented outside of torch.autograd.Function import math import torch from torch import Tensor from torch.nn.parameter import Parameter from torch.nn import init from torch.nn.modules.module import Module from deepspeed.runtime.utils import noop_decorator from deepspeed import comm as dist from deepspeed.accelerator import get_accelerator def print_rank_0(message, debug=False, force=False): if dist.get_rank() == 0 and (debug or force): print(message) try: autocast_custom_fwd = get_accelerator().amp().custom_fwd autocast_custom_bwd = get_accelerator().amp().custom_bwd except (ImportError, AttributeError) as exp: autocast_custom_fwd = noop_decorator autocast_custom_bwd = noop_decorator class LinearFunctionForZeroStage3(torch.autograd.Function): # Note that both forward and backward are @staticmethods @staticmethod @autocast_custom_fwd # bias is an optional argument def forward(ctx, input, weight, bias=None): ctx.save_for_backward(input, weight, bias) if input.dim() == 2 and bias is not None: # fused op is marginally faster ret = torch.addmm(bias, input, weight.t()) else: output = input.matmul(weight.t()) if bias is not None: output += bias ret = output return ret # This function has only a single output, so it gets only one gradient @staticmethod @autocast_custom_bwd def backward(ctx, grad_output): # This is a pattern that is very convenient - at the top of backward # unpack saved_tensors and initialize all gradients w.r.t. inputs to # None. Thanks to the fact that additional trailing Nones are # ignored, the return statement is simple even when the function has # optional inputs. input, weight, bias = ctx.saved_tensors grad_input = grad_weight = grad_bias = None #print(f"backward shaped grad_output {grad_output.shape}, input {input.shape}, weight {weight.shape} and bias {bias.shape if bias is not None else None}") # These needs_input_grad checks are optional and there only to # improve efficiency. If you want to make your code simpler, you can # skip them. Returning gradients for inputs that don't require it is # not an error. if ctx.needs_input_grad[0]: #print(f"Computing grad input weight {weight.shape} grad_output {grad_output.shape}") grad_input = grad_output.matmul(weight) #print(f"Computed grad input {grad_input.shape}") if ctx.needs_input_grad[1]: #print("Computing grad weight") dim = grad_output.dim() if dim > 2: grad_weight = grad_output.reshape(-1, grad_output.shape[-1]).t().matmul(input.reshape(-1, input.shape[-1])) else: grad_weight = grad_output.t().matmul(input) #print(f"Computed grad weight grad_weight {grad_weight.shape}") if bias is not None and ctx.needs_input_grad[2]: #print("Computing grad bias") grad_bias = grad_output.sum(0) #print("Done computing grad bias") #print("needs bias") #print(f"backward shaped grad_input {grad_input.shape}, grad_weight {grad_weight.shape}, grad_bias {grad_bias.shape if grad_bias is not None else None}") return grad_input, grad_weight, grad_bias def zero3_linear_wrap(input, weight, bias=None): if bias is None: return LinearFunctionForZeroStage3.apply(input, weight) else: return LinearFunctionForZeroStage3.apply(input, weight, bias) class LinearModuleForZeroStage3(Module): r"""Applies a linear transformation to the incoming data: :math:`y = xA^T + b`. The weights are pre-transposed and stored as A^T instead of transposing during each forward. Memory savings proportional to the parameter size. Args: in_features: size of each input sample out_features: size of each output sample bias: If set to ``False``, the layer will not learn an additive bias. Default: ``True`` Shape: - Input: :math:`(N, *, H_{in})` where :math:`*` means any number of additional dimensions and :math:`H_{in} = \text{in\_features}` - Output: :math:`(N, *, H_{out})` where all but the last dimension are the same shape as the input and :math:`H_{out} = \text{out\_features}`. Attributes: weight: the learnable weights of the module of shape :math:`(\text{out\_features}, \text{in\_features})`. The values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`, where :math:`k = \frac{1}{\text{in\_features}}` bias: the learnable bias of the module of shape :math:`(\text{out\_features})`. If :attr:`bias` is ``True``, the values are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where :math:`k = \frac{1}{\text{in\_features}}` Examples:: >>> m = nn.Linear(20, 30) >>> input = torch.randn(128, 20) >>> output = m(input) >>> print(output.size()) torch.Size([128, 30]) """ __constants__ = ['in_features', 'out_features'] in_features: int out_features: int weight: Tensor def __init__(self, in_features: int, out_features: int, bias: bool = True) -> None: super(LinearModuleForZeroStage3, self).__init__() print("Building ZeRO module") self.in_features = in_features self.out_features = out_features self.weight = Parameter(torch.Tensor(out_features, in_features)) if bias: self.bias = Parameter(torch.Tensor(out_features)) else: self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self) -> None: init.kaiming_uniform_(self.weight, a=math.sqrt(5)) if self.bias is not None: fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) bound = 1 / math.sqrt(fan_in) init.uniform_(self.bias, -bound, bound) def forward(self, input: Tensor) -> Tensor: return LinearFunctionForZeroStage3.apply(input, self.weight, self.bias) def extra_repr(self) -> str: return 'in_features={}, out_features={}, bias={}'.format(self.in_features, self.out_features, self.bias is not None)
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from .partition_parameters import ZeroParamType from .partition_parameters import ZeroParamStatus from .partition_parameters import Init from .partition_parameters import GatheredParameters from .partition_parameters import register_external_parameter from .tiling import TiledLinear from .tiling import TiledLinearReturnBias from .mics import MiCS_Init
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import os from dataclasses import dataclass from typing import List import numpy as np import torch from torch import Tensor from deepspeed import comm as dist from deepspeed.accelerator import get_accelerator from deepspeed.utils import logger def _log_rank0(msg): if dist.get_rank() == 0: logger.info(msg) @torch.jit.script def scale_tensors(tensors: List[Tensor], scale: int): for t in tensors: t.div_(scale) @dataclass class MiCS_CommGroups: """""" param_shard_group = None param_shard_size = -1 param_shard_rank = -1 param_repli_group = None param_repli_size = -1 param_repli_rank = -1 param_intra_node_group = None param_inter_node_shard_group = None def create_mics_comm_groups( shard_size, dp_group, hierarchical_allgather=False, mpu=None, ): """ create shard-group, replicate-group from config_file TODO: consider broadcast the config from rank0 Returns: MiCS_CommGroups """ # env var for debugging purpose ndevices_per_node = int(os.environ.get("NDEV_PER_NODE", get_accelerator().device_count())) _log_rank0(f'creating MiCS communication groups with per node device size {ndevices_per_node}') groups = MiCS_CommGroups() if mpu is not None: assert dp_group == mpu.get_data_parallel_group() # full size of the world world_size = dist.get_world_size() # global rank global_rank = dist.get_rank() config = _generate_mics_config(world_size, ndevices_per_node, shard_size, 1) ranks_of_shard_group = config['shard_groups'] ranks_of_repli_group = config['replicate_groups'] if len(ranks_of_repli_group) == 0: assert len(ranks_of_shard_group) == 1, "replicate groups are empty only for single shard group" for r in ranks_of_shard_group[0]: ranks_of_repli_group.append([r]) # for simplicity assert _sizes_all_same(ranks_of_repli_group), "replicate groups must have the same size" assert _sizes_all_same(ranks_of_shard_group), "shard groups must have the same size" assert sum([len(g) for g in ranks_of_shard_group]) == dist.get_world_size(), "all sharded ranks " if len(ranks_of_shard_group) > 1: # if only shard on one group then no need for replicate groups assert len(ranks_of_shard_group) == len( ranks_of_repli_group[0]), "number of shard groups must equal to the size of each replicate group" global_rank = dist.get_rank() # create shard groups for shard_ranks in ranks_of_shard_group: _group = dist.new_group(shard_ranks) if global_rank in shard_ranks: groups.param_shard_group = _group groups.param_shard_size = len(shard_ranks) groups.param_shard_rank = dist.get_rank(_group) logger.info(f'rank {global_rank}, shard group' f' {groups.param_shard_rank}/{dist.get_world_size(group=_group)}') # create replicate groups for repli_ranks in ranks_of_repli_group: if len(repli_ranks) > 1: _group = dist.new_group(repli_ranks) if global_rank in repli_ranks: groups.param_repli_group = _group groups.param_repli_size = len(repli_ranks) groups.param_repli_rank = dist.get_rank(group=_group) logger.info(f'rank {global_rank} ' f'replicate group {groups.param_repli_rank}/{dist.get_world_size(group=_group)}') else: groups.param_repli_group = None groups.param_repli_size = 1 groups.param_repli_rank = 0 logger.info(f'rank {global_rank} replicate group 0/1') # assign shard group size as world size assert groups.param_shard_size == len(ranks_of_shard_group[0]) if hierarchical_allgather: # create hierarchy inter-node, intra-node groups # n_span_nodes = config['shard_span'] n_span_nodes = config['span_nodes'] assert n_span_nodes > 1, "sharding spans on single node, no need for hierarchy allgather" assert len(ranks_of_shard_group[0]) % n_span_nodes == 0 n_gpu_per_node = len(ranks_of_shard_group[0]) // n_span_nodes intra_node_ranks_group = [] inter_node_ranks_group = [] for shard_group in ranks_of_shard_group: _intra_node_ranks = [] for i in range(0, len(shard_group), n_gpu_per_node): _intra_node_ranks.append(shard_group[i:i + n_gpu_per_node]) _inter_node_ranks = [] for i in range(n_gpu_per_node): _ranks = [_g[i] for _g in _intra_node_ranks] _inter_node_ranks.append(_ranks) intra_node_ranks_group.append(_intra_node_ranks) inter_node_ranks_group.append(_inter_node_ranks) _log_rank0(f"create for hierarchy all-gather groups: intra nodes {intra_node_ranks_group}") _log_rank0(f"create for hierarchy all-gather groups: inter nodes {inter_node_ranks_group}") # create communicators for shard_group in intra_node_ranks_group: for intra_node_ranks in shard_group: _group = dist.new_group(intra_node_ranks) if global_rank in intra_node_ranks: groups.param_intra_node_group = _group _log_rank0(f'create group for intra node ranks {intra_node_ranks}') for shard_group in inter_node_ranks_group: for inter_node_ranks in shard_group: _group = dist.new_group(inter_node_ranks) if global_rank in inter_node_ranks: groups.param_inter_node_shard_group = _group _log_rank0(f'create group for inter node ranks {inter_node_ranks}') return groups def _generate_mics_config(world_size, ndev_per_node, shard_size, pp_size=1): """Generating the configuration for sharding This shard config generation assume that the pipeline stages are partitioned in order, i.e., first ranks hold the stage0, etc. Args: shard_size (int): zero3 data-parallel shard size, FIXME: change the name later pp_size (int): pipeline parallel size, currently, only work with pipeline parallelism + zero """ assert world_size % pp_size == 0 assert (world_size // pp_size) % shard_size == 0, \ f"dp group size is not dividable by dp_shard_size, "\ f" (world_size {world_size}, pp_size {pp_size}, dp_shard_size {shard_size})" config = {} shard_groups = np.arange(world_size).reshape(-1, shard_size) replicate_groups = [] for i in range(shard_size): same_shard_ranks = shard_groups[:, i].tolist() n_ranks = len(same_shard_ranks) replicate_size = n_ranks // pp_size replicate_groups.extend([same_shard_ranks[j:j + replicate_size] for j in range(0, n_ranks, replicate_size)]) config['replicate_groups'] = replicate_groups config['shard_groups'] = shard_groups.tolist() config["span_nodes"] = len(shard_groups[0]) // ndev_per_node return config def _sizes_all_same(groups): """all groups have same length""" all_same = True for g in groups: if len(g) != len(groups[0]): return False return all_same
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch from deepspeed.runtime.zero.contiguous_memory_allocator import ContiguousMemoryAllocator def test1(): mem = ContiguousMemoryAllocator(1024, torch.half, 'cpu') mem.print_allocation(resolution=100) a1 = mem.allocate_tensor(64).mul_(0.0).add_(1.0) mem.print_allocation(resolution=100) mem.release_tensor(a1) mem.print_allocation(resolution=100) a2 = mem.allocate_tensor(64).mul_(0.0).add_(2.0) a3 = mem.allocate_tensor(256).mul_(0.0).add_(3.0) a4 = mem.allocate_tensor(128).mul_(0.0).add_(4.0) mem.print_allocation(resolution=100) mem.release_tensor(a3) mem.print_allocation(resolution=100) a5 = mem.allocate_tensor(64).mul_(0.0).add_(5.0) a6 = mem.allocate_tensor(256).mul_(0.0).add_(6.0) a7 = mem.allocate_tensor(128).mul_(0.0).add_(7.0) mem.print_allocation(resolution=100) a8 = mem.allocate_tensor(256).mul_(0.0).add_(8.0) a9 = mem.allocate_tensor(128).mul_(0.0).add_(9.0) mem.print_allocation(resolution=100) mem.release_tensor(a9) mem.release_tensor(a6) mem.release_tensor(a2) mem.release_tensor(a5) a10 = mem.allocate_tensor(512).mul_(0.0).add_(10.0) mem.print_allocation(resolution=100) #print(f"a4:{a4}") #print(f"a7:{a7}") #print(f"a8:{a8}") #print(f"a10:{a10}") assert (a4.norm() + a7.norm() + a8.norm() + a10.norm()).item() == 474.50, "Test failed" def test2(): mem = ContiguousMemoryAllocator(512, torch.half, 'cpu') a1 = mem.allocate_tensor(64).mul_(0.0).add_(1.0) a2 = mem.allocate_tensor(64).mul_(0.0).add_(2.0) a3 = mem.allocate_tensor(64).mul_(0.0).add_(3.0) a4 = mem.allocate_tensor(64).mul_(0.0).add_(4.0) a5 = mem.allocate_tensor(64).mul_(0.0).add_(5.0) a6 = mem.allocate_tensor(64).mul_(0.0).add_(6.0) a7 = mem.allocate_tensor(64).mul_(0.0).add_(7.0) a8 = mem.allocate_tensor(64).mul_(0.0).add_(8.0) mem.release_tensor(a2) mem.release_tensor(a4) mem.release_tensor(a6) mem.release_tensor(a8) mem.print_allocation(resolution=100) a9 = mem.allocate_tensor(128).mul_(0.0).add_(9.0) a10 = mem.allocate_tensor(64).mul_(0.0).add_(10.0) a11 = mem.allocate_tensor(64).mul_(0.0).add_(11.0) mem.release_tensor(a1) mem.release_tensor(a5) mem.print_allocation(resolution=100) a12 = mem.allocate_tensor(128).mul_(0.0).add_(12.0) mem.print_allocation(resolution=100) print(f"a7:{a7}") print(f"a9:{a9}") print(f"a10:{a10}") print(f"a11:{a11}") print(f"a12:{a12}") assert (a7.norm() + a9.norm() + a10.norm() + a11.norm() + a12.norm()) == 460.75, "TestFailed" test1() test2()
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch from deepspeed import comm as dist def print_rank_0(message): if dist.get_rank() == 0: print(message) class ContiguousMemoryAllocator(object): def __init__(self, size, dtype, device): self.buffer = torch.zeros(size, dtype=dtype, device=device) #address to contiguous size available self.contiguous_sizes = {} self.contiguous_sizes[0] = size #tensor id to its address self.tensor_addresses = {} #tensor address to its size self.tensor_sizes = {} #tensor address to ids self.tensor_ids = {} #id to tensors self.tensor_map = {} #id to params. Maps each tensor buffer to list of parameters that uses it self.id_to_params = {} self.total_size = size self.total_free = size self.largest_contiguous = size self.max_allocated = 0 self.count = 0 #create a tensor of size from the pre-allocated buffer #if not enough free space will fail #if not enough contiguous space, will defragment and allocate def allocate_tensor(self, size): free_before = self.total_free assert size <= self.total_free, "Not enough memory in buffer. Allocation failed" if self.largest_contiguous < size: print_rank_0("Needs defragmentation to allocate. Before Defragmentation:") self.print_allocation(resolution=100) self._defragment_memory() #set the param data to the new tensor buffer locations self._reset_param_data() print_rank_0("After defragmentation:") self.print_allocation(resolution=100) self.total_free = self.total_free - size allocated = self.total_size - self.total_free if allocated > self.max_allocated: self.max_allocated = allocated tensor_address = self._get_new_tensor_address(size) ret_tensor = self._get_new_tensor(tensor_address, size) print_rank_0( f"Free before allocation {free_before}. Allocating {size}. Free after allocation {self.total_free}. Max allocated {self.max_allocated}" ) assert self.total_free + size == free_before, "Allocation bookkeeping error" return ret_tensor #assigns the tensor data to the param data and keeps track of the assignment #any change the the underlying buffer from defragmentation will cause a #reassignment of the param data def assign_to_param(self, tensor, param, numel, shape): tensor_id = id(tensor) assert tensor_id in self.tensor_map.keys(), "No such tensor allocated by the allocator." assert tensor.numel() >= numel, "Assert tensor buffer does is not large enough" assert not tensor_id in self.id_to_params.keys(), "This tensor has already been assigned to a param" self.id_to_params[tensor_id] = [param] replicated_tensor = tensor.narrow(0, 0, numel).view(shape) param.data = replicated_tensor.data param.contiguous_tensor_id = tensor_id #deletes the tensor and frees up the underlying buffer def release_tensor(self, tensor): free_before = self.total_free tensor_id = id(tensor) tensor_size = tensor.numel() self._release_tensor(tensor_id) self._unassign_params(tensor_id) self.total_free += tensor_size print_rank_0( f"Free before release {free_before}. Released {tensor.numel()}. Total free after {self.total_free}.") assert self.total_free - tensor_size == free_before, "Release bookkeeping error" def release_tensor_with_id(self, tensor_id): free_before = self.total_free assert tensor_id in self.tensor_map.keys(), "Invalid tensor id" tensor = self.tensor_map[tensor_id] tensor_size = tensor.numel() self._release_tensor(tensor_id) self._unassign_params(tensor_id) self.total_free += tensor_size print_rank_0( f"Free before release {free_before}. Released {tensor.numel()}. Total free after {self.total_free}.") assert self.total_free - tensor_size == free_before, "Release bookkeeping error" #shows the current memory allocation at specified resolution def print_allocation(self, resolution=200): total_size = self.buffer.numel() * 1.0 empty = [] for addr, size in self.contiguous_sizes.items(): start = int(addr * resolution / total_size) end = int((addr + size) * resolution / total_size) empty.extend(range(start, end)) s = '' for i in range(resolution): s += '.' if i in empty else '|' print_rank_0(s) def max_allocated(self): return self.max_allocated #to be called after defragmentation that moves the tensor buffers #this call reassigns the data of all the parameters using the tensor buffers def _reset_param_data(self): for id, tensor in self.tensor_map.items(): for param in self.id_to_params[id]: param.data = tensor.narrow(0, 0, param.numel()).view(param.data.shape).data def _unassign_params(self, tensor_id): if tensor_id in self.id_to_params.keys(): del self.id_to_params[tensor_id] def _release_tensor(self, tensor_id): assert tensor_id in self.tensor_addresses, f"Tensor id {tensor_id} not found" address = self.tensor_addresses[tensor_id] contiguous_size = self.tensor_map[tensor_id].numel() del self.tensor_addresses[tensor_id] del self.tensor_ids[address] del self.tensor_map[tensor_id] del self.tensor_sizes[address] self._consolidate_address(address, contiguous_size) self.largest_contiguous = self._largest_contiguous() def _consolidate_address(self, address, contiguous_size): #consolidate next buffer end_address = address + contiguous_size if end_address in self.contiguous_sizes: contiguous_size += self.contiguous_sizes[end_address] del self.contiguous_sizes[end_address] #consolidate previous buffer for addr, size in self.contiguous_sizes.items(): if addr + size == address: del self.contiguous_sizes[addr] contiguous_size += size address = addr break self.contiguous_sizes[address] = contiguous_size def _defragment_memory(self): empty_addresses = sorted(self.contiguous_sizes.keys()) tensor_addresses = sorted(self.tensor_addresses.values()) tensor_index = 0 while tensor_index < len(tensor_addresses): empty_addr = empty_addresses[0] empty_size = self.contiguous_sizes[empty_addr] tensor_addr = tensor_addresses[tensor_index] tensor_size = self.tensor_sizes[tensor_addr] tensor_id = self.tensor_ids[tensor_addr] tensor = self.tensor_map[self.tensor_ids[tensor_addr]] assert tensor_size == tensor.numel(), \ "Size mismatch. {tensor_size} is allocated at addr {tensor_addr} but tensor size is {tensor.numel()} " assert empty_addr != tensor_addr, \ f"Cannot have same empty address {empty_addr} and tensor address {tensor_addr}" if empty_addr < tensor_addr: if empty_size >= tensor_size: dest_buffer = self.buffer.narrow(0, empty_addr, tensor_size) src_buffer = self.buffer.narrow(0, tensor_addr, tensor_size) dest_buffer.data.copy_(src_buffer.data) else: #print_rank_0(f'empty addr : {empty_addr}, empty size {empty_size} tensor addr {tensor_addr} tensor size {tensor_size}') src_addr = tensor_addr dest_addr = empty_addr while src_addr < (tensor_addr + tensor_size): copy_size = min(empty_size, tensor_addr + tensor_size - src_addr) dest_buffer = self.buffer.narrow(0, dest_addr, copy_size) src_buffer = self.buffer.narrow(0, src_addr, copy_size) dest_buffer.data.copy_(src_buffer.data) src_addr += copy_size dest_addr += copy_size self._replace_old_address_with_new(tensor_id, empty_addr) tensor_index += 1 else: tensor_index += 1 empty_addresses = sorted(self.contiguous_sizes.keys()) def _replace_old_address_with_new(self, tensor_id, new_address): tensor = self.tensor_map[tensor_id] tensor_size = tensor.numel() tensor.data = self.buffer.narrow(0, new_address, tensor_size).data self._release_tensor(tensor_id) self._mark_as_occupied(new_address, tensor_size) self.tensor_ids[new_address] = tensor_id self.tensor_map[tensor_id] = tensor self.tensor_addresses[tensor_id] = new_address self.tensor_sizes[new_address] = tensor_size def _get_new_tensor_address(self, size): tensor_address = None for address, contiguous_size in self.contiguous_sizes.items(): if contiguous_size >= size and \ (tensor_address is None or \ contiguous_size < self.contiguous_sizes[tensor_address]): tensor_address = address assert tensor_address is not None, "address cannot be None" return tensor_address def _get_new_tensor(self, address, size): available_contiguous_size = self.contiguous_sizes[address] assert size <= available_contiguous_size, \ f"Tensor numel {size} is large than available contiguous size {available_contiguous_size}" self.count += 1 new_tensor = self.buffer.narrow(0, address, size) tensor_id = id(new_tensor) self.tensor_addresses[tensor_id] = address self.tensor_sizes[address] = size self.tensor_ids[address] = tensor_id self.tensor_map[tensor_id] = new_tensor self._mark_as_occupied(address, size) return new_tensor def _largest_contiguous(self): if len(self.contiguous_sizes) > 0: return max([size for _, size in self.contiguous_sizes.items()]) else: return 0 def _mark_as_occupied(self, address, size): available_contiguous_size = self.contiguous_sizes[address] del self.contiguous_sizes[address] if available_contiguous_size != size: self.contiguous_sizes[address + size] = available_contiguous_size - size self.largest_contiguous = self._largest_contiguous()
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os from typing import List import torch from deepspeed import comm as dist from deepspeed.utils import logger from deepspeed.ops.adam import DeepSpeedCPUAdam from deepspeed.ops.adagrad import DeepSpeedCPUAdagrad from deepspeed.ops.adam import FusedAdam from deepspeed.utils.nvtx import instrument_w_nvtx from deepspeed.accelerator import get_accelerator def _initialize_parameter_parallel_groups(parameter_parallel_size=None): data_parallel_size = int(dist.get_world_size()) parameter_parallel_size = parameter_parallel_size or data_parallel_size logger.info("data_parallel_size: %s, parameter_parallel_size: %s", data_parallel_size, parameter_parallel_size) assert data_parallel_size % parameter_parallel_size == 0, \ 'world size should be divisible by parameter parallel size' rank = dist.get_rank() my_group = None for i in range(data_parallel_size // parameter_parallel_size): ranks = range(i * parameter_parallel_size, (i + 1) * parameter_parallel_size) group = dist.new_group(ranks) if rank in ranks: my_group = group return my_group class ZeRORuntimeException(Exception): pass ZERO_SUPPORTED_OPTIMIZERS = [ torch.optim.Adam, torch.optim.AdamW, FusedAdam, DeepSpeedCPUAdam, torch.optim.Adagrad, DeepSpeedCPUAdagrad ] # Add apex FusedAdam to supported list if apex is installed try: import apex if hasattr(apex, 'optimizers') and hasattr(apex.optimizers, 'FusedAdam'): ZERO_SUPPORTED_OPTIMIZERS.append(apex.optimizers.FusedAdam) except ImportError: pass def is_zero_supported_optimizer(optimizer): if dist.get_rank() == 0: logger.info(f'Checking ZeRO support for optimizer={optimizer.__class__.__name__} type={type(optimizer)}') return type(optimizer) in ZERO_SUPPORTED_OPTIMIZERS def get_lst_from_rank0(lst: List[int]) -> None: """ NOTE: creates both communication and synchronization overhead so should be used sparingly """ lst_tensor = torch.tensor( lst if dist.get_rank() == 0 else [-1] * len(lst), dtype=int, # device=get_accelerator().current_device_name(), device=torch.device(get_accelerator().device_name(os.environ["LOCAL_RANK"])), requires_grad=False, ) dist.broadcast(lst_tensor, src=0, async_op=False) return list(lst_tensor.cpu().numpy()) @instrument_w_nvtx def assert_ints_same_as_other_ranks(ints: List[int]) -> None: """ NOTE: creates both communication and synchronization overhead so should be used sparingly takes a list of ints from each rank and ensures that they are the same across ranks, throwing an exception if they are not. """ rank0_ints = get_lst_from_rank0(ints) if ints != rank0_ints: raise RuntimeError(f"disagreement between rank0 and rank{dist.get_rank()}: " f"rank0: {rank0_ints}, rank{dist.get_rank()}: {ints}")
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 import sys from typing import List import deepspeed import torch from deepspeed import comm as dist from deepspeed.runtime.zero.mics_utils import (MiCS_CommGroups, create_mics_comm_groups, scale_tensors) from deepspeed.runtime.zero.parameter_offload import (DeepSpeedZeRoOffload, is_zero_param) from deepspeed.runtime.zero.partition_parameters import Init, AllGatherCoalescedHandle, ZeroParamStatus from deepspeed.runtime.zero.stage3 import DeepSpeedZeroOptimizer_Stage3 from deepspeed.utils import instrument_w_nvtx, log_dist from torch import Tensor from torch.nn import Parameter def has_hierarchical_all_gather_groups(comm_groups: MiCS_CommGroups): result = False if comm_groups.param_intra_node_group is not None and comm_groups.param_inter_node_shard_group is not None: result = True return result class MiCS_AllGatherCoalescedHandle(AllGatherCoalescedHandle): """ This handle assumes that no need to copy data out from a contiguous tensor """ def __init__(self, allgather_handle, params: List[Parameter], partitions: List[Tensor], world_size: int) -> None: super().__init__(allgather_handle, params, partitions, world_size) def wait(self) -> None: """ """ # let the current stream to op instrument_w_nvtx(self.allgather_handle.wait)() if self.complete: return for _, param in enumerate(self.params): assert param.ds_status == ZeroParamStatus.INFLIGHT, f"expected param {param.ds_summary()} to be inflight" param.ds_status = ZeroParamStatus.AVAILABLE self.complete = True class MiCS_Init(Init): def __init__(self, module=None, data_parallel_group=None, mem_efficient_linear=True, remote_device=None, pin_memory=False, config_dict_or_path=None, config=None, enabled=True, dtype=None, mpu=None): """A context manager to partition the model parameters during the model construction with MiCS partition strategy. Model states are partitioned to the number of devices specified via ``mics_shard_size`` field in the deepspeed config json file. The context manager also introduces hierarchical communication method to reduce the cost of inter-node communications, which can be enabled with ``mics_hierarchical_params_gather`` field in deepspeed config. Args: module (``torch.nn.Module``, optional): If provided, partition the model as if it was constructed in the context. data_parallel_group (``deepspeed.comm`` process group, optional): The group of processes to partition among. Defaults to all processes. mem_efficient_linear (bool, optional): Replace torch.nn.functional.linear with an implementation that allows DeepSpeed to partition parameters. Defaults to ``True``. remote_device (string, optional): The initial device to store model weights e.g., ``cpu``, ``nvme``. Passing ``"cpu"`` will create the model in CPU memory. The model may still be moved to GPU based on the offload settings for training. Defaults to param offload device if a config is defined, otherwise GPU. pin_memory (bool, optional): Potentially increase performance by using pinned memory for model weights. ``remote_device`` must be ``"cpu"``. Defaults to pin_memory value in config, otherwise ``False``. config_dict_or_path (dict or ``json file``, optional): If provided, provides configuration for swapping fp16 params to NVMe. config (dict or ``json file``, optional): Deprecated, use config_dict_or_path instead. enabled (bool, optional): If ``False``, this context has no effect. Defaults to ``True``. dtype (``dtype``, optional): Can be used to change the data type of the parameters. Supported options are ``torch.half`` and ``torch.float``. Defaults to ``None`` mpu (``object``, optional): A model parallelism unit object that implements get_{model,data}_parallel_{rank,group,world_size}. This context follows the same logic as ``deepspeed.zero.Init()``, but with the modification for partition size of each parameter. Examples -------- #. Allocate a model and partition it among all processes: .. code-block:: python # the config_dict_or_path is required to let the context manager know # how partition the parameters. # The configuration has to include the field ``mics_shard_size`` with deepspeed.zero.MiCS_Init(config_dict_or_path=ds_config): model = MyLargeModel() #. Allocate a model in pinned CPU memory and partition it among a subgroup of processes: .. code-block:: python with deepspeed.zero.MiCS_Init(data_parallel_group=mpu.get_data_parallel_group(), remote_device="cpu", pin_memory=True config_dict_or_path=ds_config): model = MyLargeModel() #. Partition an already-allocated model in CPU memory: .. code-block:: python model = deepspeed.zero.MiCS_Init(module=model, config_dict_or_path=ds_config) """ assert config_dict_or_path is not None, "Must provide configuration for MiCS Initialization" _ds_config = deepspeed.runtime.config.DeepSpeedConfig(config_dict_or_path, mpu) if not dist.is_initialized(): dist.init_distributed() assert dist.is_initialized(), "Parameters cannot be scattered without initializing deepspeed.comm" self.mics_comm_groups = create_mics_comm_groups( _ds_config.mics_shard_size, data_parallel_group, hierarchical_allgather=_ds_config.mics_hierarchial_params_gather, mpu=mpu) super().__init__(module, data_parallel_group, mem_efficient_linear, remote_device, pin_memory, config_dict_or_path, config, enabled, dtype, mpu) def _convert_to_deepspeed_param(self, param): super()._convert_to_deepspeed_param(param) # attach communication groups to every param param.comm = self.mics_comm_groups # record existing all_gather_coalesced implementation # so that we can fallback later old_all_gather_coalesced = param.all_gather_coalesced def _param_all_gather_coalesced(params, safe_mode=False, param_buffers=None): """""" mics_comm_groups: MiCS_CommGroups = params[0].comm hierarchical_all_gather = has_hierarchical_all_gather_groups(mics_comm_groups) if dist.has_coalescing_manager() and hierarchical_all_gather: return self._hierarchical_all_gather_params(params, param_buffers) elif dist.has_coalescing_manager(): return self._flat_all_gather_with_coalescing_manager(params, param_buffers) else: return old_all_gather_coalesced(params, safe_mode) # change the all_gather_coalesced method param.all_gather_coalesced = _param_all_gather_coalesced def _pre_all_gather(self, params, params_buffers=None): # fetches from nvme if the partition is not available and in nvme self._ensure_availability_of_partitioned_params(params) for param in params: if param.ds_status != ZeroParamStatus.NOT_AVAILABLE: raise RuntimeError(param.ds_summary()) param.ds_status = ZeroParamStatus.INFLIGHT # ensure that each rank has params in same order. the allgather # is done by flattening the parameter list into a single tensor that # can be allgathered in a single call - this means that if each rank # gives a list of the same parameters in a different order we will # silently get incorrect parameter values, and have very difficult # to debug correctness issues. params = sorted(params, key=lambda p: p.ds_id) return params, params_buffers def _flat_all_gather_with_coalescing_manager(self, params, params_buffers=None): """""" # must have to change the status of the param # and ensure they are on the device params, params_buffers = self._pre_all_gather(params, params_buffers) mics_comm_groups: MiCS_CommGroups = params[0].comm param_shard_size = mics_comm_groups.param_shard_size output_tensors = [] input_tensors = [] for i, p in enumerate(params): t_size = p.ds_tensor.ds_numel * param_shard_size if params_buffers is not None and params_buffers[i] is not None: assert params_buffers[i].numel( ) == t_size, f'params_to_gather_buffers[{i}] size {params_buffers[i].numel()} does not match with t_size {t_size}' flat_out = params_buffers[i] else: flat_out = torch.empty(t_size, dtype=p.dtype, device=self.local_device, requires_grad=False).view(-1) output_tensors.append(flat_out) _flat_input = p.ds_tensor.data.view(-1) input_tensors.append(_flat_input) all_gather_handle = dist.all_gather_coalesced(output_tensors, input_tensors, group=mics_comm_groups.param_shard_group, async_op=True) for idx, param in enumerate(params): param.data = output_tensors[idx].narrow(0, 0, param.ds_numel).view(param.ds_shape).data return MiCS_AllGatherCoalescedHandle(allgather_handle=all_gather_handle, params=params, partitions=[], world_size=param_shard_size) def _hierarchical_all_gather_params(self, params, params_buffers=None): """""" params, params_buffers = self._pre_all_gather(params, params_buffers) mics_comm_groups: MiCS_CommGroups = params[0].comm local_rank = dist.get_rank(group=mics_comm_groups.param_intra_node_group) inter_node_comm_group = mics_comm_groups.param_inter_node_shard_group intra_node_comm_group = mics_comm_groups.param_intra_node_group param_shard_size = mics_comm_groups.param_shard_size inter_node_size = dist.get_world_size(group=inter_node_comm_group) intra_node_size = dist.get_world_size(group=intra_node_comm_group) param_tensors = [] for i, p in enumerate(params): param_size = p.ds_tensor.ds_numel * param_shard_size if params_buffers is not None and params_buffers[i] is not None: assert params_buffers[i].numel( ) == param_size, f'param_buffers[{i}] size {params_buffers[i].numel()} does not match with param_size {param_size}' param_tensor = params_buffers[i] else: param_tensor = torch.empty(param_size, dtype=p.dtype, device=self.local_device, requires_grad=False).view(-1) param_tensors.append(param_tensor) # inter node all-gather inter_outputs = [] inter_inputs = [] for i, p in enumerate(params): inter_size = p.ds_tensor.ds_numel * inter_node_size _out = param_tensors[i].narrow(0, local_rank * inter_size, inter_size) inter_outputs.append(_out) inter_inputs.append(p.ds_tensor.data.view(-1).to(self.local_device)) # sync enqueue dist.all_gather_coalesced(inter_outputs, inter_inputs, group=inter_node_comm_group, async_op=False) # intra node all-gather intra_outputs = [] intra_inputs = [] for i, p in enumerate(params): # partition param into multiple chunks for allgather # because inter-node all-gather outputs are in a continues memory # while in param memory, those inter-node data are placed in different # location. # each chunk is an intra-node output param_chunk = param_tensors[i].view( (inter_node_size, intra_node_size, p.ds_tensor.ds_numel)).narrow(1, local_rank, 1) param_chunk.copy_(inter_outputs[i].detach().clone().view(param_chunk.size())) output_chunks = torch.chunk(param_tensors[i], inter_node_size) for j, _out in enumerate(output_chunks): intra_chunk_size = intra_node_size * p.ds_tensor.ds_numel local_offset = local_rank * p.ds_tensor.ds_numel _in = param_tensors[i].narrow(0, j * intra_chunk_size + local_offset, p.ds_tensor.ds_numel) intra_outputs.append(_out) intra_inputs.append(_in) all_gather_handle = dist.all_gather_coalesced(intra_outputs, intra_inputs, group=intra_node_comm_group, async_op=True) for i, param in enumerate(params): param.data = param_tensors[i].narrow(0, 0, param.ds_numel).view(param.ds_shape).data return MiCS_AllGatherCoalescedHandle( allgather_handle=all_gather_handle, params=params, partitions=[], world_size=param_shard_size, ) def get_partition_dp_group(self, param): return param.comm.param_shard_group def get_partition_rank(self): return self.mics_comm_groups.param_shard_rank @property def num_partitions(self): return self.mics_comm_groups.param_shard_size class MiCS_Offload(DeepSpeedZeRoOffload): """ Wrapper to change the behavior for parameter sharding """ def __init__(self, module, timers, ds_config, overlap_comm=True, prefetch_bucket_size=50000000, max_reuse_distance=1000000000, max_live_parameters=1000000000, param_persistence_threshold=100000, model_persistence_threshold=sys.maxsize, offload_param_config=None, mpu=None): super().__init__(module, timers, ds_config, overlap_comm, prefetch_bucket_size, max_reuse_distance, max_live_parameters, param_persistence_threshold, model_persistence_threshold, offload_param_config, mpu) def _convert_to_zero_parameters(self, ds_config, module, mpu): """ overload the parent class function for convert the parameters """ log_dist(f'Convert to zero parameters from MiCS Offload manager', ranks=[0]) non_zero_params = [p for p in module.parameters() if not is_zero_param(p)] if non_zero_params: zero_params = [p for p in module.parameters() if is_zero_param(p)] if zero_params: zero_params[0].convert_to_zero_parameters(param_list=non_zero_params) else: group = None if mpu: group = mpu.get_data_parallel_group() MiCS_Init(module=module, data_parallel_group=group, dtype=self.dtype, config_dict_or_path=ds_config, remote_device=self.offload_device, pin_memory=self.offload_param_pin_memory, mpu=mpu) class MiCS_Optimizer(DeepSpeedZeroOptimizer_Stage3): """ MiCS Optimizer """ def __init__(self, module, init_optimizer, timers, ds_config, static_loss_scale=1, dynamic_loss_scale=False, dynamic_loss_args=None, verbose=True, contiguous_gradients=True, reduce_bucket_size=500000000, prefetch_bucket_size=50000000, max_reuse_distance=1000000000, max_live_parameters=1000000000, param_persistence_threshold=100000, model_persistence_threshold=sys.maxsize, dp_process_group=None, reduce_scatter=True, overlap_comm=False, offload_optimizer_config=None, offload_param_config=None, sub_group_size=1000000000000, mpu=None, clip_grad=0, communication_data_type=torch.float16, postscale_gradients=True, gradient_predivide_factor=1, gradient_accumulation_steps=1, elastic_checkpoint=False, aio_config=None): log_dist("Init MiCS optimizer", ranks=[0]) super().__init__(module, init_optimizer, timers, ds_config, static_loss_scale, dynamic_loss_scale, dynamic_loss_args, verbose, contiguous_gradients, reduce_bucket_size, prefetch_bucket_size, max_reuse_distance, max_live_parameters, param_persistence_threshold, model_persistence_threshold, dp_process_group, reduce_scatter, overlap_comm, offload_optimizer_config, offload_param_config, sub_group_size, mpu, clip_grad, communication_data_type, postscale_gradients, gradient_predivide_factor, gradient_accumulation_steps, elastic_checkpoint, aio_config) first_param = next(module.parameters()) # overload the dp_process_group and partition_count self.dp_process_group = first_param.comm.param_shard_group self.partition_count = first_param.comm.param_shard_size def initialize_ds_offload(self, module, timers, ds_config, overlap_comm, prefetch_bucket_size, max_reuse_distance, max_live_parameters, param_persistence_threshold, model_persistence_threshold, offload_optimizer_config, mpu): return MiCS_Offload(module, timers, ds_config, overlap_comm, prefetch_bucket_size, max_reuse_distance, max_live_parameters, param_persistence_threshold, model_persistence_threshold, offload_optimizer_config, mpu) def partition_grads(self, params_to_release: List[Parameter], grad_partitions: List[Tensor]) -> None: grad_buffers = super().partition_grads(params_to_release, grad_partitions) # perform all-reduce among replication groups # the function will perform accumulation boundary check self.allreduce_mics_shard_grads(params_to_release, grad_buffers) @instrument_w_nvtx def allreduce_mics_shard_grads(self, params, partitioned_grads_buffers: List[Tensor]): """ """ # TODO: improve the condition check if not self.is_gradient_accumulation_boundary or \ len(partitioned_grads_buffers) == 0: return mics_comm_groups: MiCS_CommGroups = params[0].comm param_repli_group = mics_comm_groups.param_repli_group param_repli_size = mics_comm_groups.param_repli_size if param_repli_size is None or param_repli_size <= 1: return if not partitioned_grads_buffers[0].is_cuda: raise RuntimeError("Local sharding has no support for CPU offloading") if dist.has_all_reduce_coalesced(): scale_tensors(partitioned_grads_buffers, param_repli_size) dist.all_reduce_coalesced(tensors=partitioned_grads_buffers, group=param_repli_group) else: # manually coalescing all-reduce aggregated_buffer: Tensor = torch.cat(partitioned_grads_buffers) aggregated_buffer.div_(param_repli_size) dist.all_reduce(aggregated_buffer, group=param_repli_group) offset = 0 for grad_buff in partitioned_grads_buffers: grad_buff.view(-1).copy_(aggregated_buffer.narrow(0, offset, grad_buff.numel())) offset += grad_buff.numel() # TODO: Support different/changing load/save DP degree. def load_state_dict(self, state_dict_list, load_optimizer_states=True, load_from_fp32_weights=False, checkpoint_folder=None): r""" Loading the MiCS checkpoints TODO: move the implementation from zhen/merged_ds_master branch """ raise NotImplementedError("Not implemented for loading MiCS checkpoints")
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch import deepspeed from deepspeed.runtime.utils import partition_uniform as partition def split_tensor_along_last_dim(tensor, partitions, contiguous_split_chunks=False): """Split a tensor along its last dimension. Adapted from Megatron-LM. Arguments: tensor: input tensor. partitions: list of partition sizes to supply to torch.split contiguous_split_chunks: If True, make each chunk contiguous in memory. """ # Get the size and dimension. last_dim = tensor.dim() - 1 # Split. tensor_list = torch.split(tensor, partitions, dim=last_dim) # Note: torch.split does not create contiguous tensors by default. if contiguous_split_chunks: return tuple(chunk.contiguous() for chunk in tensor_list) return tensor_list class TiledLinear(torch.nn.Module): def __init__(self, in_features, out_features, bias=True, in_splits=1, out_splits=1, input_is_already_split=False, combine_out_splits=True, linear_cls=torch.nn.Linear, init_linear=None, **kwargs): """A replacement for ``torch.nn.Linear`` that works with ZeRO-3 to reduce memory requirements via tiling. TiledLinear breaks the input and output dimensions of a linear layer into tiles that are processed in sequence. This class enables huge linear layers when combined with ZeRO-3 because inactive tiles can be partitioned and offloaded. .. note:: We recommend using as few tiles as necessary. Tiling significantly reduces memory usage, but can reduce throughput for inexpensive layers. This due to the smaller kernels having less parallelism and lower arithmetic intensity, while introducing more frequent synchronization and communication. Args: in_features (int): See ``torch.nn.Linear`` out_features (int): See ``torch.nn.Linear`` bias (bool, optional): See ``torch.nn.Linear`` in_splits (int, optional): The number of tiles along the input dimension. Defaults to 1. out_splits (int, optional): The number of tiles along the output dimension. Defaults to 1. input_is_already_split (bool, optional): If set to ``True``, assume that the ``input_`` in to ``forward()`` is already split into ``in_splits`` chunks. Defaults to ``False``. combine_out_splits (bool, optional): If set to ``False``, do not combine the ``out_splits`` outputs into a single tensor. Defaults to ``True``. linear_cls (class, optional): The underlying class to build individual tiles. Defaults to ``torch.nn.Linear``. init_linear (``torch.nn.Linear``, optional): If set, copy the parameters of ``init_linear``. Useful for debugging. Defaults to ``None``. kwargs (dict, optional): additional keyword arguments to provide to ``linear_cls()``. Raises: RuntimeError: ``in_splits`` must be within the range [1, in_features). RuntimeError: ``out_splits`` must be within the range of [1, out_features). """ super().__init__() if (in_splits < 1) or (in_splits > in_features): raise RuntimeError('in splits must be in range [1, in_features].') if (out_splits < 1) or (out_splits > out_features): raise RuntimeError('out splits must be in range [1, out_features].') # global, not necessarily local self.in_features = in_features self.out_features = out_features self.use_bias = bias self.out_splits = out_splits self.in_splits = in_splits self.input_is_already_split = input_is_already_split self.combine_out_splits = combine_out_splits # Build partition-lists. These are CSR-style splits [0, part0, part1, ..., features] # For example, row_parts[p] gives the start of partition p and row_parts[p+1] # is the exclusive end. self.in_parts = partition(num_items=in_features, num_parts=in_splits) self.out_parts = partition(num_items=out_features, num_parts=out_splits) assert len(self.out_parts) == out_splits + 1 assert len(self.in_parts) == in_splits + 1 assert self.out_parts[0] == 0 assert self.out_parts[out_splits] == out_features assert self.in_parts[in_splits] == in_features self.linears = torch.nn.ModuleList() for out_id in range(out_splits): self.linears.append(torch.nn.ModuleList()) local_out_dim = self.out_parts[out_id + 1] - self.out_parts[out_id] for in_id in range(in_splits): #if input_size is split, we only need one bias local_bias = bias if in_id == (in_splits - 1) else False local_in_dim = self.in_parts[in_id + 1] - self.in_parts[in_id] local = linear_cls(local_in_dim, local_out_dim, bias=local_bias, **kwargs) self.linears[out_id].append(local) # Optionally initialize with a known tensor if init_linear is not None: self.copy_params_from(init_linear) def forward(self, input_): if self.in_splits > 1 and not self.input_is_already_split: input_parts = partition(input_.shape[-1], self.in_splits) split_sizes = [input_parts[p + 1] - input_parts[p] for p in range(self.in_splits)] inputs = self._split_global_input(input_, split_sizes) elif self.in_splits > 1: inputs = input_ assert len( inputs) == self.in_splits, f"Col splits {self.in_splits} does not match input splits {len(inputs)}" else: # no splits inputs = [input_] outputs = [None] * self.out_splits for out_id in range(self.out_splits): for in_id in range(self.in_splits): local_output = self.linears[out_id][in_id](inputs[in_id]) outputs[out_id] = self._reduce_local_output(in_id=in_id, out_id=out_id, current_out=outputs[out_id], new_out=local_output) if self.combine_out_splits: return self._combine_output_splits(outputs) return outputs def _split_global_input(self, input, split_sizes): """Partition an input tensor along the last dimension, aligned with given splits. Subclasses should override this method to account for new input types. Args: input (List[Tensor]): The tensor to partition along the last dimension. split_sizes (List[int]): The size of each partition. Returns: List[Any]: A list of the chunks of ``input``. """ return split_tensor_along_last_dim(input, split_sizes) def _reduce_local_output(self, in_id, out_id, current_out, new_out): """Reduce (sum) a new local result into the existing local results. Subclasses should override this method. For a given ``out_id``, this method is called ``in_id-1`` times. The first input split is a simple assignment. Args: in_id (int): The input split that produced ``new_out``. out_id (int): The output split that produced ``new_out``. current_out (Any): The reduced form of all previous ``out_id`` results. new_out (Any): The local result from forward (``in_id``, ``out_id``)e Returns: Any: The combined result of ``current_out`` and ``new_out``. """ if current_out is None: #this clone is necessary to preserve auto grad #there is some issue with inplace update for outputs that are views return new_out.clone() else: return current_out + new_out def _combine_output_splits(self, outputs): """Join the splits of the output into a single result. Args: outputs (List[Any]): The reduced outputs for each output split. Returns: Any: The combined outputs. """ assert len(outputs) == self.out_splits return torch.cat(outputs, dim=-1) @torch.no_grad() def copy_params_from(self, other): """Copy the weight and bias data from ``other``. This is especially useful for reproducible initialization and testing. Equivalent to: .. code-block:: python with torch.no_grad(): self.weight.copy_(other.weight) if self.bias is not None: self.bias.copy_(other.bias) .. note:: If ZeRO-3 is enabled, this is a collective operation and the updated parameters of data-parallel rank 0 will be visible on all ranks. See :class:`deepspeed.zero.GatheredParameters` for more information. Args: other (``torch.nn.Linear``): the linear layer to copy from. """ assert hasattr(other, 'weight') assert other.weight.size() == (self.out_features, self.in_features) if self.use_bias: assert hasattr(other, 'bias') assert other.bias is not None assert other.bias.size() == (self.out_features, ) else: assert other.bias is None for row in range(self.out_splits): rstart = self.out_parts[row] rstop = self.out_parts[row + 1] for col in range(self.in_splits): cstart = self.in_parts[col] cstop = self.in_parts[col + 1] local = self.linears[row][col] global_weight = other.weight[rstart:rstop, cstart:cstop] with deepspeed.zero.GatheredParameters(local.weight, modifier_rank=0): local.weight.copy_(global_weight) if local.bias is not None: with deepspeed.zero.GatheredParameters(local.bias, modifier_rank=0): local.bias.data.copy_(other.bias[rstart:rstop].data) class TiledLinearReturnBias(TiledLinear): """Wrapper for a Linear class that returns its own bias parameter, such as used by Megatron-LM. """ def _reduce_local_output(self, in_id, out_id, current_out, new_out): """Reduces output tensors, but not the returned bias. """ if current_out is not None: old_tensor, old_bias = current_out else: old_tensor, old_bias = None, None assert isinstance(new_out, tuple) assert len(new_out) == 2 tensor, bias = new_out assert tensor is not None tensor = super()._reduce_local_output(in_id=in_id, out_id=out_id, current_out=old_tensor, new_out=tensor) if bias is None: bias = old_bias return tensor, bias def _combine_output_splits(self, outputs): # stack output tensors tensors = [o[0] for o in outputs] tensor = super()._combine_output_splits(tensors) # stack biases if applicable biases = [o[1] for o in outputs if o[1] is not None] if len(biases) > 0: bias = super()._combine_output_splits(biases) else: bias = None return tensor, bias
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from dataclasses import dataclass import collections from collections import UserDict from typing import Deque, Set from deepspeed import comm as dist from deepspeed.utils.logging import logger from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum from deepspeed.runtime.zero.partition_parameters import * from deepspeed.runtime.swap_tensor.partitioned_param_swapper import PartitionedParamStatus from deepspeed.utils.debug import debug_module2name_id, debug_param2name_id from deepspeed.accelerator import get_accelerator def debug_rank0(message: str) -> None: if dist.get_rank() == 0: logger.debug(message) @instrument_w_nvtx def get_all_parameters(sub_module, recurse=False): return itertools.chain(sub_module.named_parameters(recurse=recurse), sub_module.ds_external_parameters()) def iter_params(module: Module, recurse=False) -> Iterable[Parameter]: return map(lambda pair: pair[1], get_all_parameters(module, recurse)) class ZeRoTraceMode(Enum): # Record trace of the network during a single forward+backward (for training) or forward (for inference) RECORD = 1 # Use recorded network trace to optimize current forward+backward or forward COMPLETE = 2 # Recorded trace does not match current forward+backward or forward pass. INVALID = 3 class InflightParamRegistry(UserDict): """registry for parameters in flight""" def __setitem__(self, param: Parameter, handle: AllGatherCoalescedHandle) -> None: if param in self.data: raise RuntimeError(f"{param.ds_summary()} already in registry") if param.ds_status != ZeroParamStatus.INFLIGHT: raise RuntimeError(f"attempted to add non-inflight parameter to registry {param.ds_summary()}") self.data[param] = handle class PartitionedParameterCoordinator: """Handles partitioning and gathering of parameters.""" @dataclass class __ParamInTrace: param: Parameter step_id_last_used_at: int def __init__( self, prefetch_bucket_sz: int, max_reuse_distance_in_numel: int, max_available_parameters_in_numel: int, allgather_stream: get_accelerator().Stream, inflight_param_registry: InflightParamRegistry, prefetch_nvme: bool = False, ) -> None: # mapping of param -> handle for each param that is currently in flight self.__inflight_param_registry = inflight_param_registry # keeps track of the number of submodules invoked so far. self.__step_id: int = 0 # network tracing mode self.__trace_mode: ZeRoTraceMode = ZeRoTraceMode.RECORD # sequence of submodules/parameters in forward pass + backward pass self.__submodule_order: Iterable[Module] = [] self.__param_order: Iterable[__class__.__ParamInTrace] = [] self.__most_recent_step_id_param_fetched_for = collections.defaultdict(lambda: int(-1e10)) self.__step_id_module_fetched_for = collections.defaultdict(lambda: collections.deque()) # number of available params, and max number of available params self.__n_available_params: int = 0 self.__max_n_available_params: int = max_available_parameters_in_numel # max distance between two use of the module beyond which module is released self.__max_reuse_dist_in_numel: int = max_reuse_distance_in_numel # queue for parameters to fetch. parameters will be popped off the left # side of the dequeue as they are fetched self.__param_queue: Deque[__class__.__ParamInTrace] = None self.__prefetch_bucket_sz: int = prefetch_bucket_sz self.__prefetch_nvme: bool = prefetch_nvme self.hierarchy: int = 0 # stream that will be used for allgather operations self.__allgather_stream: get_accelerator().Stream = allgather_stream # limit the number of fetch events that can be queued at once # otherwise, what happens is memory is allocated by the host thread at the # time of the call, but not used until later by the asynchronous cuda stream. # allowing an infinite number of these to queue up causes a lot of memory # pressure that then becomes detrimental to performance. # this is a much less elegant way of fixing this vs something like using # cudaMallocAsync/cudaFreeAsync. Choosing to not expose this to the user now # because ideally in the future its replaced by an async allocation # mechanism which doesn't require any configuration by the user. self.__ongoing_fetch_events: Deque[get_accelerator().Event] = collections.deque() # TODO. make this configurable via JSON self.__max_ongoing_fetch_events: int = 2 """Tracing and Tracking TODO. consider performing trace before initializing PartitionedParameterCoordinator and passing trace results into constructor. This way all the code in here can just assume that the trace is complete and the results can be entirely immutable. Bookkeeping operations used to track where we are in the forward/backward pass """ def _clear_trace_structures(self) -> None: self.__submodule_order = [] self.__param_order = [] self.__most_recent_step_id_param_fetched_for = collections.defaultdict(lambda: int(-1e10)) self.__param_queue = None def is_complete_trace(self) -> bool: return self.__trace_mode == ZeRoTraceMode.COMPLETE def is_invalid_trace(self) -> bool: return self.__trace_mode == ZeRoTraceMode.INVALID def is_record_trace(self) -> bool: return self.__trace_mode == ZeRoTraceMode.RECORD def _invalidate_trace(self) -> None: if self.is_invalid_trace(): raise RuntimeError("attempted to invalidate already invalid trace") self.__trace_mode = ZeRoTraceMode.INVALID self._clear_trace_structures() def trace_prologue(self, sub_module: Module) -> None: if self.is_complete_trace(): # sub_module must match expectation else invalidate trace cache if len(self.__submodule_order) <= self.__step_id: print_rank_0( f"Invalidate trace cache @ step {self.__step_id} and module {sub_module.id}: " f"cache has only {len(self.__submodule_order)} modules", force=True) self._invalidate_trace() return if sub_module != self.__submodule_order[self.__step_id]: expected_module_id = self.__submodule_order[self.__step_id].id print_rank_0( f"Invalidate trace cache @ step {self.__step_id}: " f"expected module {expected_module_id}, but got module {sub_module.id}", force=True) self._invalidate_trace() def record_module(self, sub_module: Module) -> None: """adds sub module to trace""" if not self.is_record_trace(): raise RuntimeError(f"attempted to record trace when status = {self.__trace_mode}") self.__submodule_order.append(sub_module) self.__step_id_module_fetched_for[sub_module.id].append(self.__step_id) def record_parameters(self, sub_module: Module) -> None: """adds sub module to trace""" if not self.is_record_trace(): raise RuntimeError(f"attempted to record trace when status = {self.__trace_mode}") step_id = self.__step_id_module_fetched_for[sub_module.id].popleft() for param in sorted(set(iter_params(sub_module)), key=lambda p: p.ds_id): self.__param_order.append(__class__.__ParamInTrace(param=param, step_id_last_used_at=step_id)) def construct_parameter_trace_from_module_trace(self): """use module trace to construct parameter trace""" self.__param_order = [] for sub_module in self.__submodule_order: self.record_parameters(sub_module) def reset_step(self) -> None: """indicate that we have completed one fwd+bwd for the model""" if self.__inflight_param_registry: raise RuntimeError(f"still have inflight params " f"{[p.ds_summary for p in self.__inflight_param_registry.keys()]}") if not self.is_complete_trace(): # not self.trace_complete: # Make sure that recorded submodule orders are identical across ranks assert_ints_same_as_other_ranks([m.id for m in self.__submodule_order]) if self.is_record_trace(): # Successfully recorded a trace self.construct_parameter_trace_from_module_trace() # Make sure that recorded parameter orders are identical across ranks assert_ints_same_as_other_ranks([p.param.ds_id for p in self.__param_order]) assert_ints_same_as_other_ranks([p.step_id_last_used_at for p in self.__param_order]) self.__submodule_order = tuple(self.__submodule_order) # freeze self.__param_order = tuple(self.__param_order) # freeze self.__trace_mode = ZeRoTraceMode.COMPLETE print_rank_0( f"completed record trace of {len(self.__submodule_order)} sub modules: {[m.id for m in self.__submodule_order]}", force=False) else: # Enable trace recording for next forward/backward pass self.__trace_mode = ZeRoTraceMode.RECORD self.__param_queue = collections.deque(self.__param_order) # reset fetch queue self.__most_recent_step_id_param_fetched_for = collections.defaultdict(lambda: int(-1e10)) self.__step_id_module_fetched_for = collections.defaultdict(lambda: collections.deque()) self.__step_id = 0 self.__n_available_params = 0 def _dump_params(self, tag, sub_module, params, step_id=None): if step_id is None: step_id = self.__step_id param_names = [debug_param2name_id(p) for p in params] print(f'{tag} step = {step_id} mod = {debug_module2name_id(sub_module)} p_names = {param_names}') def _dump_param_ids(self, tag, mod_id, p_ids, step_id=None): if step_id is None: step_id = self.__step_id print(f'{tag} mod = {mod_id}, step = {step_id}, p_ids = {p_ids}') """Fetch and Release Fetching, prefetching, and releasing parameters """ @instrument_w_nvtx @torch.no_grad() def fetch_sub_module(self, current_submodule: Module) -> None: """This method does the following (in order): 1. kick off fetch for parameters in immediately required sub module 2. kick off fetch for next few parameters we will need later (prefetch) 3. block on parameters in immediately required sub module """ debug_rank0( f"{self.__step_id}: M{current_submodule.id}({type(current_submodule).__name__}) P{[p.ds_id for p in iter_params(current_submodule)]} " + str({ "avail": f"{self.__n_available_params:.1e}", "queue_sz": f"{len(self.__param_queue or [])}", "inflight": [p.ds_id for p in self.__inflight_param_registry], })) params_to_fetch = frozenset(iter_params(current_submodule)) # kick off all gather for params in the immediately required submodule for param in params_to_fetch: debug_rank0(f"-fetch: {param.ds_summary()}") self.__all_gather_params(params_to_fetch) # wait for parameters in the immediately needed submodule to become available for param in params_to_fetch: param.ds_active_sub_modules.add(current_submodule.id) debug_rank0(f"-wait: {param.ds_summary()}") if param in self.__inflight_param_registry: with get_accelerator().stream(self.__allgather_stream): while self.__ongoing_fetch_events and self.__ongoing_fetch_events[0].query(): self.__ongoing_fetch_events.popleft() if len(self.__ongoing_fetch_events) > self.__max_ongoing_fetch_events: self.__ongoing_fetch_events.popleft().synchronize() self.__inflight_param_registry.pop(param).wait() event = get_accelerator().Event() event.record() self.__ongoing_fetch_events.append(event) assert param.ds_status == ZeroParamStatus.AVAILABLE, param.ds_summary() get_accelerator().current_stream().wait_stream(self.__allgather_stream) # kick off parameter prefetches for upcoming modules # don't prefetch if we dont have a completed model trace if self.is_complete_trace(): # go through the parameters we need for the current module and pop them # off the fetch queue so that they aren't prefetched later. # if params have already been popped off the fetch queue by earlier # prefetches we won't look for them here discarded_from_prefetch_queue = set() params_not_already_fetched = set( filter(lambda p: self.__most_recent_step_id_param_fetched_for[p] < self.__step_id, params_to_fetch)) while self.__param_queue and len(discarded_from_prefetch_queue) < len(params_not_already_fetched): param_in_trace = self.__param_queue.popleft() self.__most_recent_step_id_param_fetched_for[ param_in_trace.param] = param_in_trace.step_id_last_used_at discarded_from_prefetch_queue.add(param_in_trace.param) if discarded_from_prefetch_queue != params_not_already_fetched: raise RuntimeError( f"tracing error at step {self.__step_id}: \n" f"module id: {current_submodule.id}, training: {current_submodule.training}\n" f"expected the next {len(params_not_already_fetched)} parameters in the " f"parameter fetch queue to be {tuple(p.ds_summary(use_debug_name=True) for p in params_not_already_fetched)} \n" f"but got \n {tuple(p.ds_summary(use_debug_name=True) for p in discarded_from_prefetch_queue)}.") def _is_currently_on_nvme(param): if param.nvme_swapper is None: return False return param.ds_tensor.final_location == OffloadDeviceEnum.nvme \ and param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE # kick off all gather for params in the next few submodules (prefetch) if self.__prefetch_bucket_sz > 0: max_params_to_prefetch = min(self.__max_n_available_params - self.__n_available_params, self.__prefetch_bucket_sz) params_to_prefetch = set() numel_prefetching = 0 while self.__param_queue and numel_prefetching < max_params_to_prefetch: param_in_trace: __class__.__ParamInTrace = self.__param_queue.popleft() if _is_currently_on_nvme(param_in_trace.param): # nvme prefetch is handled elsewhere. Need to break here to preserve fetch order self.__param_queue.appendleft(param_in_trace) break do_prefetch = param_in_trace.param.ds_status == ZeroParamStatus.NOT_AVAILABLE if param_in_trace.param in params_to_prefetch: # Avoid duplicates do_prefetch = False self.__most_recent_step_id_param_fetched_for[param_in_trace.param] = \ max(self.__most_recent_step_id_param_fetched_for[param_in_trace.param], param_in_trace.step_id_last_used_at) if do_prefetch: params_to_prefetch.add(param_in_trace.param) numel_prefetching += param_in_trace.param.ds_numel for param in params_to_prefetch: debug_rank0(f"-prefetch: {param.ds_summary()}") self.__all_gather_params(params_to_prefetch) if self.__prefetch_nvme: self.__prefetch_nvme_param_partitions() self.__step_id += 1 @instrument_w_nvtx @torch.no_grad() def release_sub_module(self, submodule: Module) -> None: """release the parameters of a sub module, assuming they meet conditions to be released.""" params_to_release = (self.__params_to_release(submodule, self.__step_id) if self.is_complete_trace() else set( p.ds_id for p in iter_params(submodule))) for param in iter_params(submodule): param.ds_active_sub_modules.discard(submodule.id) if param.ds_id in params_to_release and not param.is_external_param: self.__release_param(param) @instrument_w_nvtx @torch.no_grad() def release_and_reset_all(self, module: Module) -> None: """release all module parameters""" for param in iter_params(module, recurse=True): if param in self.__inflight_param_registry: raise RuntimeError(f"param {param.ds_summary()} still in flight") # TODO. make this throw if if there are still active submodules. currently # there's a hook execution issue param.ds_active_sub_modules.clear() self.__release_param(param) for param in iter_params(module, recurse=True): if param.ds_status != ZeroParamStatus.NOT_AVAILABLE: raise RuntimeError(f"{param.ds_summary()} expected to be released") @instrument_w_nvtx def __all_gather_params(self, params: Set[Parameter]) -> None: """for each partitioned parameter, kick off an async allgather and store the work handle for the in flight parameters.""" partitioned_params = [] for param in params: if param.ds_status == ZeroParamStatus.NOT_AVAILABLE: partitioned_params.append(param) self.__n_available_params += param.ds_numel if partitioned_params: with get_accelerator().stream(self.__allgather_stream): handle = partitioned_params[0].all_gather_coalesced(partitioned_params) for param in partitioned_params: assert param.ds_status == ZeroParamStatus.INFLIGHT, param.ds_summary() self.__inflight_param_registry[param] = handle # Release swap buffers for persisted params on nvme since they will never be partitioned or evicted from GPU swap_persisted_params = [ p for p in partitioned_params if p.ds_persist and p.ds_tensor.final_location == OffloadDeviceEnum.nvme ] if swap_persisted_params: swap_persisted_params[0].nvme_swapper.remove_partition_and_release_buffers(swap_persisted_params) @instrument_w_nvtx def __release_param(self, param: Parameter) -> None: if param.ds_status == ZeroParamStatus.AVAILABLE and not param.ds_active_sub_modules: debug_rank0(f"-release: {param.ds_summary()}") param.partition() self.__n_available_params -= param.ds_numel @instrument_w_nvtx @functools.lru_cache(maxsize=None) def __params_to_release(self, submodule_to_release: Module, step_id: int) -> Set[int]: if not self.is_complete_trace(): raise RuntimeError("expected trace to be complete") params_to_release = set(p.ds_id for p in iter_params(submodule_to_release) if not p.ds_persist) # Problem: When prefetcher scans the param trace, it skips AVAILABLE params. # This creates issues if those params are released before the skipped uses: # 1) It hurts performance as the skipped uses are never prefetched. # 2) For nvme params, we run out of swap buffers because the prefetch order # diverges from the trace. # Solution: Don't release params whose reuse was skipped by prefetch. This is # possible because we detect such skips during prefetch and mark those params. for param in iter_params(submodule_to_release): if self.__most_recent_step_id_param_fetched_for[param] > step_id: params_to_release.discard(param.ds_id) # examine all modules within `max_reuse_dist_in_numel` of the current step, # if we see any of the candidate parameters to be released reoccur while # doing this, remove them from the set of parameters to release. params_traversed = 0 for module in self.__submodule_order[step_id:]: if params_traversed >= self.__max_reuse_dist_in_numel: break for param in iter_params(module): params_to_release.discard(param.ds_id) params_traversed += param.ds_numel return params_to_release @instrument_w_nvtx def __prefetch_nvme_param_partitions(self) -> None: """swap in parameter partitions from nvme for those parameters that will be used after the ones that are already being prefetched into full parameters """ if not self.is_complete_trace(): return numel_in_flight = sum(param.ds_numel for param in self.__inflight_param_registry) numel_considered = 0 swap_in_params = [] for param_in_trace in self.__param_queue: param = param_in_trace.param if param.nvme_swapper is None: continue if (numel_considered > 2 * numel_in_flight or len(swap_in_params) >= param.nvme_swapper.available_swap_in_buffers()): break if param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE: swap_in_params.append(param) numel_considered += param.ds_numel if swap_in_params: swap_in_params[0].nvme_swapper.swap_in(swap_in_params, async_op=True)
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from pydantic import Field, validator from enum import Enum from pathlib import Path from deepspeed.runtime.config_utils import DeepSpeedConfigModel, pp_int class OffloadDeviceEnum(str, Enum): """ Enum for valid offload devices """ none = "none" cpu = "cpu" nvme = "nvme" class DeepSpeedZeroOffloadParamConfig(DeepSpeedConfigModel): """ Set options for parameter offload. Valid only with stage 3. """ device: OffloadDeviceEnum = "none" """ Device memory to offload model parameters. Supported options are `cpu` and `nvme`. """ nvme_path: Path = None """ Filesystem path for NVMe device for parameter offloading. """ buffer_count: int = Field(5, ge=0) """ Number of buffers in buffer pool for parameter offloading to NVMe. """ buffer_size: int = Field(pp_int(1e8), ge=0) """ Size of buffers in buffer pool for parameter offloading to NVMe. """ max_in_cpu: int = Field(pp_int(1e9), ge=0) """ Number of parameter elements to maintain in CPU memory when offloading to NVMe is enabled. """ pin_memory: bool = False """ Offload to page-locked CPU memory. This could boost throughput at the cost of extra memory overhead. """ class DeepSpeedZeroOffloadOptimizerConfig(DeepSpeedConfigModel): """ Set options for optimizer offload. Valid with stage 1, 2, and 3. """ device: OffloadDeviceEnum = "none" """ Device memory to offload optimizer state. Supported options are `cpu` and `nvme`. Optimizer computation is offload to CPU regardless of device option. """ nvme_path: Path = None """ Filesystem path for NVMe device for optimizer state offloading. """ buffer_count: int = Field(4, ge=0) """ Number of buffers in buffer pool for optimizer state offloading to NVMe. This should be at least the number of states maintained per parameter by the optimizer. For example, Adam optimizer has 4 states (parameter, gradient, momentum, and variance). """ pin_memory: bool = False """ Offload to page-locked CPU memory. This could boost throughput at the cost of extra memory overhead. """ pipeline_read: bool = False """ For tile-based optimizer step processing, overlap read of next tile with computation of current tile. Used in ZeRO-Infinity. """ pipeline_write: bool = False """ For tile-based optimizer step processing, overlap write of previous tile with computation of current tile. """ fast_init: bool = False """ Enable fast optimizer initialization when offloading to NVMe. """ @validator("pipeline_read", "pipeline_write", always=True) def set_pipeline(cls, field_value, values): values["pipeline"] = field_value or values.get("pipeline", False) return field_value
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import sys import gc import collections from typing import Deque, Dict, Tuple from deepspeed.runtime import ZeROOptimizer from deepspeed.utils import logger from deepspeed.runtime.fp16.loss_scaler import CreateLossScaler from deepspeed.runtime.comm.coalesced_collectives import reduce_scatter_coalesced from deepspeed.runtime.utils import inf, get_global_norm, is_model_parallel_parameter from deepspeed.runtime.zero.partition_parameters import * from deepspeed.runtime.zero.config import ZeroStageEnum from deepspeed.runtime.zero.offload_config import OffloadDeviceEnum from deepspeed.runtime.zero.parameter_offload import DeepSpeedZeRoOffload from deepspeed.ops.adam import DeepSpeedCPUAdam from deepspeed.runtime.swap_tensor.partitioned_param_swapper import PartitionedParamStatus from deepspeed.runtime.swap_tensor.partitioned_optimizer_swapper import PartitionedOptimizerSwapper from deepspeed.runtime.swap_tensor.pipelined_optimizer_swapper import PipelinedOptimizerSwapper from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT, FP32_FLAT_GROUPS, PARTITION_COUNT, ZERO_STAGE from deepspeed.accelerator import get_accelerator from deepspeed.ops.op_builder import UtilsBuilder # Toggle this to true to enable correctness test # with gradient partitioning and without pg_correctness_test = False def print_rank_0(message, debug=False, force=False): rank = dist.get_rank() if rank == 0 and (debug or force): logger.info(message) # other variations # - print for all ranks w/o interleaving # printflock(f"[{rank}] {message}") # - print to log file per rank # log_rank_file(rank, message) def input(msg): return def isclose(a, b, rtol=1e-09, atol=0.0): return abs(a - b) <= max(rtol * max(abs(a), abs(b)), atol) def lcm(x, y): from fractions import gcd # or can import gcd from `math` in Python 3 return x * y // gcd(x, y) def move_to_cpu(tensor_list): for tensor in tensor_list: tensor.data = tensor.data.cpu() INITIAL_MICRO_STEP_ID = -1 class DeepSpeedZeroOptimizer_Stage3(ZeROOptimizer): """ DeepSpeedZeroOptimizer designed to reduce the memory footprint required for training large deep learning models. For more details please see ZeRO: Memory Optimization Towards Training A Trillion Parameter Models https://arxiv.org/abs/1910.02054 For usage examples, refer to TODO: DeepSpeed Tutorial """ def __init__(self, module, init_optimizer, timers, ds_config, static_loss_scale=1.0, dynamic_loss_scale=False, dynamic_loss_args=None, verbose=True, contiguous_gradients=True, reduce_bucket_size=500000000, prefetch_bucket_size=50000000, max_reuse_distance=1000000000, max_live_parameters=1000000000, param_persistence_threshold=100000, model_persistence_threshold=sys.maxsize, dp_process_group=None, reduce_scatter=True, overlap_comm=False, offload_optimizer_config=None, offload_param_config=None, sub_group_size=1000000000000, mpu=None, clip_grad=0.0, communication_data_type=torch.float16, postscale_gradients=True, gradient_predivide_factor=1.0, gradient_accumulation_steps=1, elastic_checkpoint=False, aio_config=None): see_memory_usage("Stage 3 initialize beginning", force=True) print_rank_0(f"initialized {__class__.__name__} with args: {locals()}", force=False) if dist.get_rank() == 0: logger.info(f"Reduce bucket size {reduce_bucket_size}") logger.info(f"Prefetch bucket size {prefetch_bucket_size}") # The fused optimizer does all the work. We need this layer for two reason: # 1. maintain same user API from apex.fp16_utils # 2. keep common stuff here in case we need to add ne552w fused optimizer later # differences from apex.fp16_utils: # - assume all model params in fp16 # - assume all params requires grad # - flat by groups, not keeping state. TODO: remove state explicitly? # - master grad and unflat master weight never exist. TODO: a way to save out unflat master? if not get_accelerator().is_available(): raise SystemError("Cannot use fp16 without accelerator.") self.optimizer = init_optimizer # Load pre-built or JIT compile (un)flatten ops util_ops = UtilsBuilder().load() self.flatten = util_ops.flatten self.unflatten = util_ops.unflatten self.dtype = self.optimizer.param_groups[0]['params'][0].dtype self._global_grad_norm = 0. self.custom_loss_scaler = False self.external_loss_scale = None self.optimizer_swapper = None self.swap_optimizer = False self.offload_optimizer = False self.offload_optimizer_pin_memory = False self.offload_optimizer_fast_init = False self.offload_param = False self.offload_param_pin_memory = False self.params_in_nvme_and_cpu = False self.max_params_in_cpu = 0 self.parameter_offload = self.initialize_ds_offload(module=module, timers=timers, ds_config=ds_config, overlap_comm=overlap_comm, prefetch_bucket_size=prefetch_bucket_size, max_reuse_distance=max_reuse_distance, max_live_parameters=max_live_parameters, param_persistence_threshold=param_persistence_threshold, model_persistence_threshold=model_persistence_threshold, offload_param_config=offload_param_config, mpu=mpu) self.persistent_parameters = self.parameter_offload.persistent_parameters self._configure_offloading(offload_optimizer_config, offload_param_config) self.module = module self.elastic_checkpoint = elastic_checkpoint self.inf_or_nan_tracker: Tensor = torch.zeros(1, dtype=torch.bool, device=get_accelerator().current_device_name(), requires_grad=False) self.deepspeed_adam_offload = (self.offload_optimizer and type(init_optimizer) == DeepSpeedCPUAdam) self.device = get_accelerator().current_device_name() if not self.offload_optimizer else OffloadDeviceEnum.cpu ### streams used for overlapping computation with communication self.reduce_and_partition_stream = get_accelerator().Stream() if overlap_comm else get_accelerator( ).default_stream() ############################################################################ self.n_caching_allocator_flushes = 0 #-------------Stage 3 Setup-------------------# self.timers = timers self.reduce_scatter = reduce_scatter self.dp_process_group = dp_process_group self.partition_count = dist.get_world_size(group=self.dp_process_group) if mpu is None: self.model_parallel_group = None self.model_parallel_rank = 0 else: self.model_parallel_group = mpu.get_model_parallel_group() self.model_parallel_rank = mpu.get_model_parallel_rank() self.overflow = False self.clip_grad = clip_grad self.communication_data_type = communication_data_type self.gradient_predivide_factor = gradient_predivide_factor self.postscale_gradients = postscale_gradients self.gradient_accumulation_steps = gradient_accumulation_steps self.micro_step_id = 0 self.reduce_bucket_size = int(reduce_bucket_size) if self.reduce_scatter: valid_reduce_scatter_dtypes = (torch.float16, torch.bfloat16, torch.float32) assert self.communication_data_type in valid_reduce_scatter_dtypes, f"ZeRO-3 supports {valid_reduce_scatter_dtypes} communication_data_type with reduce scatter enabled. Got: '{self.communication_data_type}'" assert self.gradient_predivide_factor == 1.0, "gradient_predivide_factor != 1.0 is not yet supported with ZeRO-3 with reduce scatter enabled" assert self.postscale_gradients, "pre-scale gradients is not yet supported with ZeRO-3 with reduce scatter enabled" # Holds the mode parameter # The param.data may not hold any meaningful data # when param's status is NOT_AVAILABLE or IN_FLGHT self.fp16_groups = [] # Hold partitioned parameters self.fp16_partitioned_groups = [] # Holds a fused and flattened copy of the parameters self.fp16_partitioned_groups_flat = [] self.fp16_partitioned_groups_flat_numel = [] #defragmented pinned memory self.param_groups_fp16_flat_cpu_memory = [] #a single 32-bit partition of the parallel partitioned parameters #that this process will update self.fp32_partitioned_groups_flat = [] self.next_swappable_fp32_partitioned_groups = [] # number of elements per partition in each group self.partition_size = [] self.all_reduce_print = False self.prefetch_elements = int(prefetch_bucket_size) self.contiguous_gradients = contiguous_gradients # padding on each partition for alignment purposes self.groups_padding = [] self.sub_group_size = sub_group_size self.sub_group_to_group_id = {} # Trainable parameters self.trainable_param_groups = self._get_trainable_parameter_groups() see_memory_usage("Before creating fp16 partitions", force=True) self._create_fp16_partitions_with_defragmentation(self.trainable_param_groups) num_fp16_subgroups = len(self.fp16_partitioned_groups_flat) see_memory_usage(f"After creating fp16 partitions: {num_fp16_subgroups}", force=True) # Optimizer tensor swapping if self.swap_optimizer: self._configure_tensor_swapping(offload_optimizer_config, aio_config) self.params_in_ipg_bucket = [] self.is_gradient_accumulation_boundary: bool = True self.param_reduce_events: Deque[get_accelerator().Event] = collections.deque() # TODO. make this configurable via JSON self.max_param_reduce_events: int = 2 self.param_dict = {} # map between param_id and bool to specify if a param is in this partition self.is_param_in_current_partition = {} self.extra_large_param_to_reduce = None self.grads_in_ipg_bucket = [] self.params_in_ipg_bucket = [] self.params_already_reduced = [] self.is_gradient_accumulation_boundary = True self._release_ipg_buffers() self.previous_reduced_grads = None # simplified param id self.param_id = {} count = 0 for i, params_group in enumerate(self.fp16_groups): for param in params_group: unique_id = id(param) self.param_id[unique_id] = count self.param_dict[count] = param self.params_already_reduced.append(False) count = count + 1 #Largest partitioned param largest_partitioned_param_numel = max([ max([max(tensor.numel(), tensor.ds_numel) for tensor in fp16_partitioned_group]) for fp16_partitioned_group in self.fp16_partitioned_groups ]) print_rank_0(f'Largest partitioned param numel = {largest_partitioned_param_numel}', force=False) self._setup_for_real_optimizer() self.grad_position = {} self.set_grad_positions() if self.offload_optimizer: self.norm_for_param_grads = {} self.local_overflow = False # stores if a partition has been reduced in this step self.is_partition_reduced = {} # stores if a grad in a partition has been computed or not self.is_grad_computed = {} # will store the averaged gradients required by this partition self.averaged_gradients = {} #creates backward hooks for gradient partitioning self.create_reduce_and_remove_grad_hooks() #exit(0) # we may have a way of fusing dynamic scale. Do not support for now self.loss_scaler = CreateLossScaler(dtype=self.dtype, static_loss_scale=static_loss_scale, dynamic_scaling=dynamic_loss_scale, dynamic_loss_args=dynamic_loss_args) self.dynamic_loss_scale = self.loss_scaler.dynamic self.debug_fp16_grads = [{} for _ in self.fp16_groups] self._link_all_hp_params() if dist.get_rank(group=self.dp_process_group) == 0: see_memory_usage(f"After initializing ZeRO optimizer", force=True) def destroy(self): self.parameter_offload.destroy() def initialize_ds_offload( self, module, timers, ds_config, overlap_comm, prefetch_bucket_size, max_reuse_distance, max_live_parameters, param_persistence_threshold, model_persistence_threshold, offload_param_config, mpu, ): return DeepSpeedZeRoOffload(module=module, timers=timers, ds_config=ds_config, overlap_comm=overlap_comm, prefetch_bucket_size=prefetch_bucket_size, max_reuse_distance=max_reuse_distance, max_live_parameters=max_live_parameters, param_persistence_threshold=param_persistence_threshold, model_persistence_threshold=model_persistence_threshold, offload_param_config=offload_param_config, mpu=mpu) def _get_trainable_parameter_groups(self): param_groups = [] for param_group in self.optimizer.param_groups: trainable_params = {"params": [p for p in param_group["params"] if p.requires_grad]} param_groups.append(trainable_params) return param_groups def _setup_for_real_optimizer(self): see_memory_usage("Before creating fp32 partitions", force=True) self._create_fp32_partitions() see_memory_usage("After creating fp32 partitions", force=True) dist.barrier() # To support pipelined optimizer swapping self._create_next_swappable_fp32_groups() see_memory_usage("Before initializing optimizer states", force=True) self.initialize_optimizer_states() see_memory_usage("After initializing optimizer states", force=True) dist.barrier() if dist.get_rank() == 0: logger.info(f"optimizer state initialized") # IPG if self.contiguous_gradients: self.__ipg_bucket_flat_buffer: Tensor = torch.empty(self.reduce_bucket_size, dtype=self.dtype, device=get_accelerator().current_device_name()) grad_partitions_flat_buffer = None self.__param_id_to_grad_partition: Dict[int, Tensor] = {} all_params = list(itertools.chain.from_iterable(self.fp16_groups)) grad_partitions_flat_buffer: Tensor = torch.zeros(sum(p.partition_numel() for p in all_params), dtype=self.dtype, device=self.device) if self.offload_optimizer_pin_memory: grad_partitions_flat_buffer = get_accelerator().pin_memory(grad_partitions_flat_buffer) offset = 0 for param in all_params: self.__param_id_to_grad_partition[param.ds_id] = grad_partitions_flat_buffer.narrow( 0, offset, param.partition_numel()) offset += param.partition_numel() def _link_all_hp_params(self): for p in self.module.parameters(): p._z3_optimizer = self def set_lr(self, lr): """Set the learning rate.""" for param_group in self.optimizer.param_groups: param_group["lr"] = lr def get_lr(self): """Return the current learning rate.""" return self.optimizer.param_groups[0]["lr"] # TODO. factor out to a utility outside of stage3 @staticmethod def defragment(tensors: List[Tensor]) -> Tensor: """move provided tensors into a contiguous flat buffer, with some additional measures taken to reduce memory fragmentation""" assert len(set(t.dtype for t in tensors)) == 1 assert len(set(t.device for t in tensors)) == 1 cpu_buffer = torch.empty(sum(p.numel() for p in tensors), dtype=get_only_unique_item(t.dtype for t in tensors), device="cpu") tensor_infos: List[Tuple[Tensor, int, int]] = [] orig_device = get_only_unique_item(t.device for t in tensors) offset = 0 for tensor in tensors: tensor_numel = tensor.numel() # move the tensor from device memory to host memory cpu_buffer.narrow(0, offset, tensor_numel).copy_(tensor) tensor.data = torch.empty(0, dtype=tensor.dtype, device=tensor.device) # record some data so we can restore the device tensor later tensor_infos.append((tensor, offset, tensor_numel)) offset += tensor_numel gc.collect() get_accelerator().empty_cache() # copy tensors (now flattened and contiguous) back to GPU device_buffer = cpu_buffer.to(orig_device) # restore device tensors for tensor, offset, tensor_numel in tensor_infos: tensor.data = device_buffer.narrow(0, offset, tensor_numel) return device_buffer def _get_param_coordinator(self, training): return self.parameter_offload.get_param_coordinator(training) def _configure_offloading(self, offload_optimizer_config, offload_param_config): ###################### offload optimizer setup ################################## if offload_optimizer_config is not None and offload_optimizer_config.device != OffloadDeviceEnum.none: self.offload_optimizer = True self.offload_optimizer_pin_memory = offload_optimizer_config.pin_memory self.swap_optimizer = offload_optimizer_config.device == OffloadDeviceEnum.nvme self.offload_optimizer_fast_init = offload_optimizer_config.fast_init ###################### offload param setup ################################## if offload_param_config is not None and offload_param_config.device != OffloadDeviceEnum.none: self.offload_param = True self.offload_param_pin_memory = offload_param_config.pin_memory self.params_in_nvme_and_cpu = offload_param_config.device == OffloadDeviceEnum.nvme self.max_params_in_cpu = offload_param_config.max_in_cpu print_rank_0( f"FP16 params swapping is {self.params_in_nvme_and_cpu}, Max params in CPU is {self.max_params_in_cpu}", force=False) def _configure_tensor_swapping(self, offload_optimizer_config, aio_config): nvme_swap_folder = os.path.join(offload_optimizer_config.nvme_path, 'zero_stage_3') os.makedirs(nvme_swap_folder, exist_ok=True) if dist.get_rank() == 0: logger.info(f'Tensor Swapping: Adding optimizer tensors') swapper_type = PipelinedOptimizerSwapper if offload_optimizer_config.pipeline else PartitionedOptimizerSwapper self.optimizer_swapper = swapper_type(swap_config=offload_optimizer_config, aio_config=aio_config, base_folder=nvme_swap_folder, optimizer=self.optimizer, largest_numel=max(self.fp16_partitioned_groups_flat_numel), device=self.device, dtype=torch.float32, timers=self.timers) @property def elements_in_ipg_bucket(self): return sum(p.ds_numel for p in self.params_in_ipg_bucket) def _move_to_flat_buffer(self, param_list, flat_buffer, avoid_copy=False): '''If flat buffer is None then the parameters in the param_list are not copied to the flat buffer. This is because they exceed the number of max_params_in_cpu Some of these parameters may already be in CPU in unflattened buffers or they maybe in GPU, or they maybe in NVME. If they are in NVME, then they will be marked as NOT_AVAILABLE, and will be moved to CPU when they are needed during training.''' if flat_buffer is None: # this dst buffer is on NVMe, so skip this return start = 0 for param in param_list: src = param.ds_tensor dest = flat_buffer.narrow(0, start, src.ds_numel) start = start + src.ds_numel '''if the parameter was initialized in nvme then bring it to the destination buffer directly''' if src.status == PartitionedParamStatus.NOT_AVAILABLE: print_rank_0( f"Swapping in {param.ds_id} with partition size {param.partition_numel()} permanently to CPU") param.nvme_swapper.swap_into_buffer(param, dest) src.data = dest.data src.status = PartitionedParamStatus.AVAILABLE else: assert src.status == PartitionedParamStatus.AVAILABLE, "Partitioned Param must be available here" if not avoid_copy: dest.data.copy_(src.data) src.data = dest.data # Final location must be gpu/cpu in this case param.ds_tensor.final_location = 'not-nvme' def _create_param_groups_fp16_flat_cpu_memory(self): aggregate_params_count = 0 for j, param_group in enumerate(self.trainable_param_groups): params_in_group = sum([p.partition_numel() for p in param_group['params']]) flat_buffer_size = params_in_group if self.params_in_nvme_and_cpu and \ aggregate_params_count + params_in_group > self.max_params_in_cpu: flat_buffer_size = max(0, self.max_params_in_cpu - aggregate_params_count) aggregate_params_count += params_in_group if flat_buffer_size > 0: print_rank_0(f"group {j} flat buffer size {flat_buffer_size}", force=False) self.param_groups_fp16_flat_cpu_memory.append(get_accelerator().pin_memory( torch.empty(int(flat_buffer_size), dtype=self.dtype))) else: print_rank_0(f"No flat buffer size. Param group size was {params_in_group}", force=False) self.param_groups_fp16_flat_cpu_memory.append(torch.empty(1, dtype=self.dtype)) def _create_fp16_partitions_with_defragmentation(self, fp16_param_groups): dist.barrier() param_groups: List[List[Parameter]] = tuple( self._create_fp16_sub_groups(param_group["params"]) for param_group in fp16_param_groups) # bookkeeping related to param groups for param_group_idx, param_group in enumerate(param_groups): for sub_group in param_group: sub_group_idx = len(self.fp16_groups) # record sub group and partitions self.fp16_groups.append(sub_group) self.fp16_partitioned_groups.append([param.ds_tensor for param in sub_group]) # record sub group -> group mapping self.sub_group_to_group_id[sub_group_idx] = param_group_idx # record total elements of parameter partitions in sub group self.fp16_partitioned_groups_flat_numel.append(sum(p.partition_numel() for p in sub_group)) # record padding required to align group to world size (only applies to last rank) rank_requires_padding = dist.get_rank( self.dp_process_group) == dist.get_world_size(self.dp_process_group) - 1 self.groups_padding.append([p.padding_size() if rank_requires_padding else 0 for p in sub_group]) # move parameters to flattened buffer if not self.offload_param: # partitioned params remain in GPU during training # move parameter partitions into a single contiguous flat buffer parameter_partitions: List[Tensor] = [] for sub_group in self.fp16_groups: for param in sub_group: parameter_partitions.append(param.ds_tensor) device_buffer = __class__.defragment(parameter_partitions) # setup flat buffers per subgroup, these are each just sections of the # contiguous flat buffer for all parameters that we created earlier offset = 0 for sub_group in self.fp16_groups: sub_group_numel = sum(param.partition_numel() for param in sub_group) self.fp16_partitioned_groups_flat.append(device_buffer.narrow(0, offset, sub_group_numel)) offset += sub_group_numel else: # partitioned params offloaded to CPU when not in use # create a flat CPU memory allocation for each param group self._create_param_groups_fp16_flat_cpu_memory() for param_group_idx, param_group in enumerate(param_groups): flat_offset = 0 for i, sub_group in enumerate(param_group): total_elements = sum(p.partition_numel() for p in sub_group) print_rank_0(f"Params in nvme and cpu {self.params_in_nvme_and_cpu}") #Flat buffer may not be available for parameters that reside in NVME if not self.params_in_nvme_and_cpu or flat_offset + total_elements <= self.param_groups_fp16_flat_cpu_memory[ param_group_idx].numel(): fp16_partitioned_group_flat = self.param_groups_fp16_flat_cpu_memory[param_group_idx].narrow( 0, flat_offset, total_elements) print_rank_0( f"Creating a flat buffer for subgroup {i} requiring {total_elements} elements, and cumulative CPU elements {flat_offset + total_elements}", force=False) elif self.params_in_nvme_and_cpu: fp16_partitioned_group_flat = None print_rank_0(f"No flat buffer for sub group {i} of {total_elements} elements", force=False) else: assert False, "Either params are in nvme, or they are in CPU memory. This code path should not be triggered. Please see you max_params_in_cpu and params_in_nvme configs" self.fp16_partitioned_groups_flat.append(fp16_partitioned_group_flat) flat_offset += total_elements self._move_to_flat_buffer(sub_group, fp16_partitioned_group_flat, avoid_copy=not self.offload_param) # if necessary, create a pinned memory buffer to be used for swapping out # params to NVME after optimizer step should_create_fp16_flat_reuse_buffer = any(flattened_partition_group is None for flattened_partition_group in self.fp16_partitioned_groups_flat) if should_create_fp16_flat_reuse_buffer: max_partition_numel, largest_partition_numel = 0, None for sub_group in self.fp16_groups: total_elements = sum(t.partition_numel() for t in sub_group) if total_elements > max_partition_numel: largest_partition_numel = [t.ds_numel for t in sub_group] max_partition_numel = total_elements assert len(largest_partition_numel) > 0, f'Unexpected that largest partition is empty' self.fp16_groups[0][0].nvme_swapper.reserve_partitioned_swap_space(largest_partition_numel) def _swap_in_sub_group_to_flat_buffer(self, flat_buffer, sub_group_id): offset = 0 elements_in_sub_group = sum([t.ds_numel for t in self.fp16_partitioned_groups[sub_group_id]]) assert (flat_buffer.numel() == elements_in_sub_group) for param, partitioned_param in zip(self.fp16_groups[sub_group_id], self.fp16_partitioned_groups[sub_group_id]): dest = flat_buffer.narrow(0, offset, partitioned_param.ds_numel) if partitioned_param.status == PartitionedParamStatus.NOT_AVAILABLE: print_rank_0( f"Swapping in {param.ds_id} with elements {param.ds_numel} and partition {param.partition_numel()}" ) param.nvme_swapper.swap_in([param], async_op=False) dest.data.copy_(partitioned_param.data) param.nvme_swapper.remove_partition_and_release_buffers([param]) print_rank_0(f"Swapping in {param.ds_id} done") else: dest.data.copy_(partitioned_param.data) offset += partitioned_param.ds_numel def _create_next_swappable_fp32_groups(self): reverse_order_indices = [i for i in range(len(self.fp32_partitioned_groups_flat))] reverse_order_indices.reverse() next_group = None for i in reverse_order_indices: self.next_swappable_fp32_partitioned_groups.append(next_group) if self._swappable_optimizer_subgroup(i): next_group = self.fp32_partitioned_groups_flat[i] self.next_swappable_fp32_partitioned_groups.reverse() def _get_sub_group_partitions(self, sub_group_id): sub_group_partitions = [] for param, partitioned_param in zip(self.fp16_groups[sub_group_id], self.fp16_partitioned_groups[sub_group_id]): if partitioned_param.status == PartitionedParamStatus.NOT_AVAILABLE: swap_path = param.nvme_swapper.get_path(param, True) sub_group_partitions.append((partitioned_param, param.partition_numel(), swap_path)) else: sub_group_partitions.append((partitioned_param, partitioned_param.ds_numel, None)) return sub_group_partitions def _create_fp32_partitions(self): cpu_memory_usage = 0 cpu_memory_sub_groups = 0 nvme_memory_usage = 0 num_swappable_partitions = 0 num_swap_from_nvme_partitions = 0 num_swap_from_cpu_partitions = 0 swap_from_nvme_memory_usage = 0 swap_from_cpu_memory_usage = 0 GIGA_BYTES = (1024**3) swappable_fp32_tensors = [] swappable_fp16_src_tensors = [] nvme_fp16_partitions_info = [] nvme_fp16_num_elems = [] nvme_fp32_dest_tensors = [] fp32_element_size = torch.tensor([], dtype=torch.float32).element_size() for i, tensor in enumerate(self.fp16_partitioned_groups_flat): num_elements = self.fp16_partitioned_groups_flat_numel[i] # a partition of the fp32 master weights that will be updated by this process if self._swappable_optimizer_subgroup(i): self.fp32_partitioned_groups_flat.append(torch.Tensor()) nvme_memory_usage += (fp32_element_size * num_elements) num_swappable_partitions += 1 if self.params_in_nvme_and_cpu and tensor is None: num_swap_from_nvme_partitions += 1 swap_from_nvme_memory_usage += (fp32_element_size * num_elements) if self.offload_optimizer_fast_init: sub_group_partitions = self._get_sub_group_partitions(i) nvme_fp16_partitions_info.append(sub_group_partitions) nvme_fp16_num_elems.append(num_elements) nvme_fp32_dest_tensors.append(self.fp32_partitioned_groups_flat[i]) else: unpinned_fp32_buffer = torch.empty(num_elements, device=self.device, dtype=torch.float) self._swap_in_sub_group_to_flat_buffer(unpinned_fp32_buffer, i) self.optimizer_swapper.initialize_parameters(parameters=[self.fp32_partitioned_groups_flat[i]], src_tensors=[unpinned_fp32_buffer]) else: num_swap_from_cpu_partitions += 1 swap_from_cpu_memory_usage += (fp32_element_size * num_elements) swappable_fp32_tensors.append(self.fp32_partitioned_groups_flat[i]) swappable_fp16_src_tensors.append(self.fp16_partitioned_groups_flat[i]) else: cpu_memory_usage += (fp32_element_size * num_elements) cpu_memory_sub_groups += 1 if self.params_in_nvme_and_cpu and tensor is None: unpinned_fp32_buffer = torch.empty(num_elements, device=self.device, dtype=torch.float) self._swap_in_sub_group_to_flat_buffer(unpinned_fp32_buffer, i) self.fp32_partitioned_groups_flat.append(unpinned_fp32_buffer) else: self.fp32_partitioned_groups_flat.append(self.fp16_partitioned_groups_flat[i].to( self.device).clone().float().detach()) self.fp32_partitioned_groups_flat[i].requires_grad = True # keep this in case internal optimizer uses it if len(swappable_fp32_tensors) > 0: self.optimizer_swapper.initialize_parameters(parameters=swappable_fp32_tensors, src_tensors=swappable_fp16_src_tensors) if len(nvme_fp32_dest_tensors) > 0: fp16_pinned_buffers = self.fp16_groups[0][0].nvme_swapper.reserve_available_buffers() assert len(fp16_pinned_buffers) > 0 self.optimizer_swapper.initialize_from_swapped_fp16_params(fp16_partitions_info=nvme_fp16_partitions_info, fp16_num_elems=nvme_fp16_num_elems, fp16_pinned_buffers=fp16_pinned_buffers, fp32_parameters=nvme_fp32_dest_tensors) self.fp16_groups[0][0].nvme_swapper.release_reserved_buffers() nvme_gigabytes = nvme_memory_usage / GIGA_BYTES print_rank_0(f'Swappable FP32 Partitions: count={num_swappable_partitions} size={nvme_gigabytes:5.2f} GB', force=False) if self.params_in_nvme_and_cpu: print_rank_0( f'Swap from NVMe Partitions: count = {num_swap_from_nvme_partitions}, size = {swap_from_nvme_memory_usage/GIGA_BYTES:5.2f}GB', force=False) print_rank_0( f'Swap from CPU Partitions: count = {num_swap_from_cpu_partitions}, size = {swap_from_cpu_memory_usage/GIGA_BYTES:5.2f}GB', force=False) cpu_memory_gigabytes = cpu_memory_usage / GIGA_BYTES print_rank_0(f'In-Memory FP32 Partitions: count={cpu_memory_sub_groups} size={cpu_memory_gigabytes:5.2f} GB', force=False) # Clear for on-the-fly population before the optimizer step for param_group in self.optimizer.param_groups: param_group['params'] = [] def _create_fp16_sub_groups(self, params_group): params_group_numel = sum([param.partition_numel() for param in params_group]) sub_group_size = self.sub_group_size if sub_group_size is None or sub_group_size >= params_group_numel: return [params_group] sub_groups = [] sub_group = [] local_sub_group_size = 0 for param in params_group: sub_group.append(param) local_sub_group_size += param.partition_numel() if local_sub_group_size >= sub_group_size or id(param) == id(params_group[-1]): sub_groups.append(sub_group) sub_group = [] local_sub_group_size = 0 return sub_groups def _release_ipg_buffers(self): if self.contiguous_gradients: self.ipg_buffer = None def _optimizer_step(self, sub_group_id): param_group_id = self.sub_group_to_group_id[sub_group_id] fp32_param = self.fp32_partitioned_groups_flat[sub_group_id] self.optimizer.param_groups[param_group_id]['params'] = [fp32_param] self.optimizer.step() self.optimizer.param_groups[param_group_id]['params'] = [] def _swappable_optimizer_subgroup(self, sub_group_id): if not self.swap_optimizer: return False return self.optimizer_swapper.swappable_tensor(None, numel=self.fp16_partitioned_groups_flat_numel[sub_group_id]) def _partitioned_params_swap_out(self, i): offset = 0 fp32_param = self.fp32_partitioned_groups_flat[i] assert fp32_param is not None, \ f'fp32 parameters of sub_group {i} is None' swap_fp16_params = [] swap_fp32_params = [] for param, partitioned_param in zip(self.fp16_groups[i], self.fp16_partitioned_groups[i]): src = fp32_param.narrow(0, offset, partitioned_param.ds_numel) if partitioned_param.status == PartitionedParamStatus.AVAILABLE: partitioned_param.data.copy_(src.data) else: swap_fp32_params.append(src) swap_fp16_params.append(param) offset += partitioned_param.ds_numel if len(swap_fp16_params): swap_fp16_params[0].nvme_swapper.swap_out_partitioned_params(dst_fp16_params=swap_fp16_params, src_fp32_params=swap_fp32_params) def initialize_optimizer_states(self): num_subgroups = len(self.fp16_groups) largest_numel = max([sum([p.ds_numel for p in psg]) for psg in self.fp16_partitioned_groups]) gradient_dtype = self.fp32_partitioned_groups_flat[0].dtype gradient_buffer = torch.zeros(int(largest_numel), dtype=gradient_dtype, device=self.device) timer_names = set() # State initialization for the Adagrad optimizer occurs at construction as opposed to other optimizers # which do lazy initialization of the state at the first call to step. is_adagrad = isinstance(self.optimizer, torch.optim.Adagrad) if self.swap_optimizer: self.optimizer_swapper.init_timers() INIT_OPTIMIZER_TIMER = 'init_optimizer_state' timer_names.add(INIT_OPTIMIZER_TIMER) self.start_timers([INIT_OPTIMIZER_TIMER]) for i, group in enumerate(self.fp16_groups): swappable_optimizer_subgroup = self._swappable_optimizer_subgroup(i) swappable_param_subgroup = self.fp16_partitioned_groups_flat[i] is None num_elements = int(self.fp16_partitioned_groups_flat_numel[i]) see_memory_usage( f'[Begin] Initialize optimizer states {i} / {num_subgroups} subgroups, num_elems: {num_elements}, swappable opt/param:{swappable_optimizer_subgroup}/{swappable_param_subgroup}', force=False) if swappable_optimizer_subgroup: self._optimizer_states_and_gradient_swap_in(i, timer_names) if self.offload_optimizer and not swappable_optimizer_subgroup: subgroup_gradient_buffer = torch.zeros(num_elements, dtype=gradient_dtype, device=self.device) if self.offload_optimizer_pin_memory: subgroup_gradient_buffer = get_accelerator().pin_memory(subgroup_gradient_buffer) self.fp32_partitioned_groups_flat[i].grad = subgroup_gradient_buffer else: self.fp32_partitioned_groups_flat[i].grad = gradient_buffer.narrow(0, 0, num_elements) # Initialize the optimizer states with the flattended fp32 partition. if not is_adagrad: self._optimizer_step(i) if swappable_param_subgroup: self._partitioned_params_swap_out(i) if swappable_optimizer_subgroup: self._optimizer_states_and_gradient_swap_out(i, timer_names) see_memory_usage( f'[End] Initialize optimizer states {i} / {num_subgroups} subgroups, num_elems: {num_elements}, swappable opt/param:{swappable_optimizer_subgroup}/{swappable_param_subgroup}', force=False) # Initialize the optimizer states with the flattended fp32 partition. if is_adagrad: self.optimizer = torch.optim.Adagrad(self.fp32_partitioned_groups_flat, **self.optimizer.defaults) self.stop_timers([INIT_OPTIMIZER_TIMER]) self.log_timers(timer_names) if self.swap_optimizer: self.optimizer_swapper.log_timers() if not self.offload_optimizer: for group in self.fp32_partitioned_groups_flat: group.grad = None # Reset steps return ######################################################################### #########################ZeRO Partition Gradients######################## ######################################################################### def get_first_param_index(self, group_id, param_group, partition_id): for index, param in enumerate(param_group): param_id = self.get_param_id(param) if partition_id in self.param_to_partition_ids[group_id][param_id]: return index return None def initialize_gradient_partitioning_data_structures(self): total_partitions = dist.get_world_size(group=self.dp_process_group) for i, param_group in enumerate(self.fp16_groups): self.param_to_partition_ids[i] = {} self.is_partition_reduced[i] = {} self.total_grads_in_partition[i] = {} self.remaining_grads_in_partition[i] = {} self.is_grad_computed[i] = {} self.grad_partition_insertion_offset[i] = {} self.grad_start_offset[i] = {} self.first_param_index_in_partition[i] = {} for partition_id in range(total_partitions): self.is_grad_computed[i][partition_id] = {} self.grad_partition_insertion_offset[i][partition_id] = {} self.grad_start_offset[i][partition_id] = {} self.initialize_gradient_partition(i, param_group, partition_id) self.is_partition_reduced[i][partition_id] = False self.first_param_index_in_partition[i][partition_id] = self.get_first_param_index( i, param_group, partition_id) @instrument_w_nvtx def independent_gradient_partition_epilogue(self): self.report_ipg_memory_usage(f"In ipg_epilogue before reduce_ipg_grads", 0) self.__reduce_and_partition_ipg_grads() self.report_ipg_memory_usage(f"In ipg_epilogue after reduce_ipg_grads", 0) self.reduce_and_partition_stream.synchronize() # if dist.get_rank() == 0: # logger.info("Params already reduced %s", self.params_already_reduced) for i in range(len(self.params_already_reduced)): self.params_already_reduced[i] = False #in case of cpu offload, averaged gradients are already in fp32_partitioned_groups_flat.grad #TODO: use a similar code path for both cpu_offload and non-cpu offload if not self.offload_optimizer: for i, sub_group in enumerate(self.fp16_groups): self.averaged_gradients[i] = [ self.__param_id_to_grad_partition[param.ds_id] if param.requires_grad else torch.zeros_like(param.ds_tensor) for param in sub_group ] # self.averaged_gradients[i] = self.get_flat_partition( # self.fp16_groups[i], # 0, # self.fp32_partitioned_groups_flat[i].numel(), # return_tensor_list=True) # this method gets called after every backward. need to increment # here because if it gets incremented in backward() the micro step # id will be off by one when we do the reduce and partition at the. # start of this method. # TODO. make this less error prone self.micro_step_id += 1 def overlapping_partition_gradients_reduce_epilogue(self): self.independent_gradient_partition_epilogue() def create_reduce_and_remove_grad_hooks(self): print_rank_0(f'[Begin] Create gradient reduction hooks') self.grad_accs = [] for i, param_group in enumerate(self.fp16_groups): for param in param_group: if param.requires_grad: #print_rank_0(f" Before all gather {param.device}, {param.shape}") # The hook must be created in un-partitioned parameter param.all_gather() #print(f"After all gather {param.device}, {param.shape}") def wrapper(param, i): param_tmp = param.expand_as(param) grad_acc = param_tmp.grad_fn.next_functions[0][0] @instrument_w_nvtx def reduce_partition_and_remove_grads(*notneeded): self.reduce_ready_partitions_and_remove_grads(param, i) grad_acc.register_hook(reduce_partition_and_remove_grads) self.grad_accs.append(grad_acc) #print(f"param grad fn {param.expand_as(param).grad_fn}") wrapper(param, i) # Partition the parameter after creating the hook param.partition() print_rank_0(f'[End] Create gradient reduction hooks') def get_param_id(self, param): unique_id = id(param) return self.param_id[unique_id] def report_ipg_memory_usage(self, tag, param_elems): elem_count = self.elements_in_ipg_bucket + param_elems percent_of_bucket_size = (100.0 * elem_count) // self.reduce_bucket_size see_memory_usage( f"{tag}: elems in_bucket {self.elements_in_ipg_bucket} param {param_elems} max_percent {percent_of_bucket_size}", force=False) ###############Independent Partition Gradient ######################## def reduce_independent_p_g_buckets_and_remove_grads(self, param, i): #print_rank_0(f"Inside reduce ipg buckets. {debug_param2name_id_shape(param)}, ipg elements {self.elements_in_ipg_bucket}, reduce bucket size {self.reduce_bucket_size}", force=True) # Because the ipg bucket is initialized with a random place holder tensor, we must # explicitly check that the bucket has any real data in it (self.elements_in_ipg_bucket > # 0). Otherwise if the incoming param.ds_numel is large, this branch may get triggered on a # garbage data and `self.average_tensor()` will crash because its params_to_reduce will be # empty, while reduction_list will have that garbage data. if self.elements_in_ipg_bucket > 0 and self.elements_in_ipg_bucket + param.ds_numel > self.reduce_bucket_size: self.report_ipg_memory_usage("In ipg_remove_grads before reduce_ipg_grads", param.ds_numel) self.__reduce_and_partition_ipg_grads() param_id = self.get_param_id(param) assert self.params_already_reduced[param_id] == False, \ f"The parameter {param_id} has already been reduced. \ Gradient computed twice for this partition. \ Multiple gradient reduction is currently not supported" self.__add_grad_to_ipg_bucket(param) @instrument_w_nvtx @torch.no_grad() def __add_grad_to_ipg_bucket(self, param: Parameter) -> None: self.reduce_and_partition_stream.wait_stream(get_accelerator().default_stream()) if self.contiguous_gradients and self.elements_in_ipg_bucket + param.grad.numel() < self.reduce_bucket_size: # move the gradient to a contiguous buffer with get_accelerator().stream(self.reduce_and_partition_stream): # move the parameter's gradient to the contiguous flat buffer new_grad_tensor = self.__ipg_bucket_flat_buffer.narrow(0, self.elements_in_ipg_bucket, param.grad.numel()).view_as(param.grad) new_grad_tensor.copy_(param.grad, non_blocking=True) param.grad.record_stream(get_accelerator().current_stream()) param.grad.data = new_grad_tensor self.params_in_ipg_bucket.append(param) @instrument_w_nvtx @torch.no_grad() def __reduce_and_partition_ipg_grads(self, safe_mode: bool = False) -> None: if not self.params_in_ipg_bucket: return for param in self.params_in_ipg_bucket: if param.grad.numel() != param.ds_numel: raise RuntimeError(f"{param.grad.numel()} != {param.ds_numel} Cannot reduce scatter " f"gradients whose size is not same as the params") self.params_in_ipg_bucket.sort(key=lambda p: p.ds_id) assert len(set(p.ds_id for p in self.params_in_ipg_bucket)) == len(self.params_in_ipg_bucket) while self.param_reduce_events and self.param_reduce_events[0].query(): self.param_reduce_events.popleft() if len(self.param_reduce_events) > self.max_param_reduce_events: self.param_reduce_events.popleft().synchronize() with get_accelerator().stream(self.reduce_and_partition_stream): if safe_mode: assert_ints_same_as_other_ranks([p.ds_id for p in self.params_in_ipg_bucket]) grad_partitions = self.__avg_scatter_grads(self.params_in_ipg_bucket) self.partition_grads(self.params_in_ipg_bucket, grad_partitions) self.params_in_ipg_bucket.clear() event = get_accelerator().Event() event.record() self.param_reduce_events.append(event) @instrument_w_nvtx def __avg_scatter_grads(self, params_to_reduce: List[Parameter]) -> List[Tensor]: """average gradients and scatter partitions across ranks""" full_grads_for_rank = [p.grad for p in params_to_reduce] if self.communication_data_type != self.dtype: full_grads_for_rank = [g.to(self.communication_data_type) for g in full_grads_for_rank] if self.postscale_gradients and self.gradient_predivide_factor != 1.0: full_grads_for_rank = [g.div(self.gradient_predivide_factor) for g in full_grads_for_rank] grad_partitions_for_rank = reduce_scatter_coalesced(full_grads_for_rank, self.dp_process_group) if self.postscale_gradients and self.gradient_predivide_factor != dist.get_world_size(self.dp_process_group): grad_partitions_for_rank = [g.mul(self.gradient_predivide_factor) for g in grad_partitions_for_rank] if self.communication_data_type != self.dtype: grad_partitions_for_rank = [g.to(self.dtype) for g in grad_partitions_for_rank] return grad_partitions_for_rank def set_grad_positions(self): for i, group in enumerate(self.fp16_groups): current_offset = 0 for param in group: param_id = self.get_param_id(param) num_elements = param.partition_numel() self.grad_position[param_id] = [int(i), int(current_offset), int(num_elements)] #print(f"param id {param_id} i:{i}, ds_tensor {num_elements} numel {param.numel()}") current_offset += num_elements see_memory_usage(f"After Set Grad positions", force=False) def _constant_buffered_norm2(self, input, buffer_size=250000000): norm = None for part in input.view(-1).split(buffer_size): if norm is None: norm = part.data.double().norm(2)**2.0 else: norm += part.data.double().norm(2)**2.0 return norm**0.5 def set_norm_for_param_grad_in_gpu(self, param): param_id = self.get_param_id(param) #self.norm_for_param_grads[param_id] = param.grad.data.double().norm(2) #Using a more memory efficient version self.norm_for_param_grads[param_id] = self._constant_buffered_norm2(param.grad) def async_inplace_copy_grad_to_fp32_buffer_from_gpu(self, param, fp32_grad_tensor): with get_accelerator().stream(self.copy_grad_stream): param_id = self.get_param_id(param) src_tensor = param.grad.view(-1).float() #print(f"src_tensor {src_tensor.size()} and fp32 grad {fp32_grad_tensor.size()}") fp32_grad_tensor.copy_(src_tensor, non_blocking=True) param.grad = None def complete_grad_norm_calculation_for_cpu_offload(self, params): total_norm = 0.0 norm_type = 2.0 for p in params: if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0): param_id = self.get_param_id(p) if param_id in self.norm_for_param_grads.keys(): param_norm = self.norm_for_param_grads[param_id] total_norm += param_norm.item()**2 # Sum across all model parallel GPUs. total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=self.dp_process_group) self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM) total_norm = total_norm_cuda[0].item()**(1. / norm_type) if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm: total_norm = -1 return total_norm @instrument_w_nvtx def partition_grads(self, params_to_release: List[Parameter], grad_partitions: List[Tensor]) -> None: offload_fp32_gradients = {} offload_fp32_offsets = {} buffers = [] for param, grad_partition in zip(params_to_release, grad_partitions): contains_real_data = param.partition_numel() * dist.get_rank(self.dp_process_group) < param.ds_numel if not contains_real_data: # this grad partition is empty - don't need to do anything param.grad = None continue # move or accumulate gradient partition to target buffer grad_buffer = self.__param_id_to_grad_partition[param.ds_id].narrow(0, 0, grad_partition.numel()) buffers.append(grad_buffer) if self.micro_step_id == 0: # don't accumulate grad_buffer.copy_(grad_partition, non_blocking=True) # ensure grad buffer is a CUDA buffer to speed up the next few # operations and so it can be used asynchronously grad_buffer = grad_buffer.to(grad_partition.device, non_blocking=True) elif get_accelerator().on_accelerator(grad_buffer): grad_buffer.add_(grad_partition) else: # if dst is CPU, copy first to src device, do the addition # there, then move back to dst. adding directly to cpu is very slow cuda_grad_buffer = grad_buffer.to(grad_partition.device, non_blocking=True) cuda_grad_buffer.add_(grad_partition) grad_buffer.copy_(cuda_grad_buffer, non_blocking=True) # ensure grad buffer is a CUDA buffer to speed up the next few # operations and so it can be used asynchronously grad_buffer = cuda_grad_buffer if hasattr(self.inf_or_nan_tracker, "logical_or_"): self.inf_or_nan_tracker.logical_or_(torch.isinf(grad_buffer).any()) self.inf_or_nan_tracker.logical_or_(torch.isnan(grad_buffer).any()) else: # logical_or_ not available in older versions of pytorch self.inf_or_nan_tracker += torch.isinf(grad_buffer).any() self.inf_or_nan_tracker += torch.isnan(grad_buffer).any() self.inf_or_nan_tracker = self.inf_or_nan_tracker > 0 # offload the gradient partition if applicable if self.offload_optimizer: i, dest_offset, _ = self.grad_position[self.get_param_id(param)] if self.is_gradient_accumulation_boundary: self.norm_for_param_grads[self.get_param_id(param)] = self._constant_buffered_norm2(grad_buffer) if self._swappable_optimizer_subgroup(i): if not i in offload_fp32_gradients.keys(): offload_fp32_gradients[i] = [] offload_fp32_offsets[i] = [] offload_fp32_gradients[i].append(grad_buffer.float()) offload_fp32_offsets[i].append(dest_offset) else: fp32_grad_tensor = self.fp32_partitioned_groups_flat[i].grad.narrow( 0, dest_offset, grad_buffer.numel()) fp32_grad_tensor.copy_(grad_buffer) # free the gradient param.grad.record_stream(get_accelerator().current_stream()) param.grad = None if self.offload_optimizer and self.swap_optimizer: for i in offload_fp32_gradients.keys(): self.optimizer_swapper.swap_out_gradients(parameter=self.fp32_partitioned_groups_flat[i], gradient_offsets=offload_fp32_offsets[i], gradient_tensors=offload_fp32_gradients[i]) return buffers def reduce_ready_partitions_and_remove_grads(self, param, i): #print_rank_0(f"Backward {debug_param2name_id_shape(param)}", force=True) self.reduce_independent_p_g_buckets_and_remove_grads(param, i) def zero_reduced_gradients(self, partition_id, i): def are_all_related_partitions_reduced(params_id): for partition_id in self.param_to_partition_ids[i][params_id]: if not self.is_partition_reduced[i][partition_id]: return False return True for params_id in self.is_grad_computed[i][partition_id]: if are_all_related_partitions_reduced(params_id): self.param_dict[params_id].grad = None def flatten_and_print(self, message, tensors, start=0, n=5): flatten_tensor = self.flatten(tensors) def print_func(): logger.info(flatten_tensor.contiguous().view(-1).narrow(0, start, n)) self.sequential_execution(print_func, message) def get_grads_to_reduce(self, i, partition_id): def get_reducible_portion(key): grad = self.param_dict[key].grad total_elements = grad.numel() start = self.grad_start_offset[i][partition_id][key] num_elements = min(total_elements - start, self.partition_size[i] - self.grad_partition_insertion_offset[i][partition_id][key]) if not pg_correctness_test: if num_elements == total_elements: return grad else: return grad.contiguous().view(-1).narrow(0, int(start), int(num_elements)) else: if num_elements == total_elements: return grad.clone() else: return grad.clone().contiguous().view(-1).narrow(0, int(start), int(num_elements)) grads_to_reduce = [] for key in self.is_grad_computed[i][partition_id]: grad = get_reducible_portion(key) grads_to_reduce.append(grad) return grads_to_reduce def sequential_execution(self, function, message, group=None): if group is None: group = self.dp_process_group if dist.get_rank(group=group) == 0: logger.info(message) for id in range(dist.get_world_size(group=group)): if id == dist.get_rank(group=group): function() dist.barrier(group=group) def set_none_gradients_to_zero(self, i, partition_id): for param_id in self.is_grad_computed[i][partition_id]: param = self.param_dict[param_id] if param.grad is None: param.grad = torch.zero_like(param) ######################Reduction Related Methods############################## def allreduce_bucket(self, bucket, rank=None, log=None): rank = None tensor = self.flatten(bucket) tensor_to_allreduce = tensor if pg_correctness_test: communication_data_type = torch.float32 else: communication_data_type = self.communication_data_type if communication_data_type != tensor.dtype: tensor_to_allreduce = tensor.to(communication_data_type) tensor_to_allreduce.div_(dist.get_world_size(group=self.dp_process_group)) if rank is None: # "All Reducing" dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group) else: global_rank = dist.get_global_rank(self.dp_process_group, rank) dist.reduce(tensor_to_allreduce, global_rank, group=self.dp_process_group) if communication_data_type != tensor.dtype and tensor is not tensor_to_allreduce: if rank is None or rank == dist.get_rank(group=self.dp_process_group): tensor.copy_(tensor_to_allreduce) return tensor # if rank is specified do a reduction instead of an allreduce def allreduce_and_copy(self, small_bucket, rank=None, log=None): with get_accelerator().stream(self.reduction_stream): allreduced = self.allreduce_bucket(small_bucket, rank=rank, log=log) if rank is None or rank == dist.get_rank(group=self.dp_process_group): for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)): buf.copy_(synced) def allreduce_no_retain(self, bucket, numel_per_bucket=500000000, rank=None, log=None): small_bucket = [] numel = 0 for tensor in bucket: small_bucket.append(tensor) numel = numel + tensor.numel() if numel > numel_per_bucket: self.allreduce_and_copy(small_bucket, rank=rank, log=None) small_bucket = [] if len(small_bucket) > 0: self.allreduce_and_copy(small_bucket, rank=rank, log=log) ############################################################################# ############################################################################# ############################################################################# # views the tensor as multiple partitions and returns # those partitions def get_data_parallel_partitions(self, tensor): partitions = [] dp = dist.get_world_size(group=self.dp_process_group) dp_id = dist.get_rank(group=self.dp_process_group) total_num_elements = tensor.numel() base_size = total_num_elements // dp remaining = total_num_elements % dp start = 0 for id in range(dp): partition_size = base_size if id < remaining: partition_size = partition_size + 1 partitions.append(tensor.narrow(0, start, partition_size)) start = start + partition_size return partitions def get_partition_info(self, tensor_list, partition_size, partition_id): params_in_partition = [] params_not_in_partition = [] start_index = partition_size * partition_id end_index = partition_size * (partition_id + 1) current_index = 0 first_offset = 0 for tensor in tensor_list: tensor_size = tensor.numel() if (current_index >= start_index and current_index < end_index): params_in_partition.append(tensor) elif start_index > current_index and start_index < (current_index + tensor_size): params_in_partition.append(tensor) assert (first_offset == 0 ), "This can happen either zero or only once as this must be the first tensor in the partition" first_offset = start_index - current_index else: params_not_in_partition.append(tensor) current_index = current_index + tensor_size return params_in_partition, params_not_in_partition, first_offset @instrument_w_nvtx def zero_grad(self, set_to_none=False): """ Zero FP16 parameter grads. """ self.micro_step_id = 0 # FP32 grad should never exist. # For speed, set model fp16 grad to None by default for group in self.fp16_groups: for p in group: if set_to_none: if p.grad is not None and get_accelerator().on_accelerator(p.grad): p.grad.record_stream(get_accelerator().current_stream()) p.grad = None else: if p.grad is not None: p.grad.detach_() p.grad.zero_() def _model_parallel_all_reduce(self, tensor, op): """ Perform all reduce within model parallel group, if any. """ if self.model_parallel_group is None: pass else: dist.all_reduce(tensor=tensor, op=op, group=self.model_parallel_group) @instrument_w_nvtx def get_grad_norm_direct(self, gradients, params, norm_type=2): """Clips gradient norm of an iterable of parameters. This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and added functionality to handle model parallel parameters. Note that the gradients are modified in place. Arguments: parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a single Tensor that will have gradients normalized max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the parameters (viewed as a single vector). """ norm_type = float(norm_type) if norm_type == inf: total_norm = max(g.data.abs().max() for g in gradients) total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=self.dp_process_group) # Take max across all GPUs. self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.MAX) total_norm = total_norm_cuda[0].item() else: # if dist.get_rank() == 0: # logger.info(f"Total Norm beginning {total_norm}") grad_norms = [] for g, p in zip(gradients, params): if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0): grad_norms.append(g.to(get_accelerator().device_name(), non_blocking=True).double().norm(2)) # Sum across all model parallel GPUs. total_norm_cuda = torch.sum(torch.pow(torch.stack(grad_norms), 2)) dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=self.dp_process_group) self._model_parallel_all_reduce(tensor=total_norm_cuda, op=dist.ReduceOp.SUM) total_norm = total_norm_cuda.item()**(1. / norm_type) if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm: total_norm = -1 return total_norm # creates a flat fused tensor from the tensor list starting at the first_offset # in the first tensor of the list. If there are not enough elements in the tensor # list then the flat tensor will be padded with zeros def get_flat_partition(self, tensor_list, first_offset, partition_size, return_tensor_list=False): flat_tensor_list = [] current_size = 0 for i, tensor in enumerate(tensor_list): if tensor.grad is None: tensor.grad = torch.zeros_like(tensor) tensor = tensor.grad num_elements = tensor.numel() tensor_offset = 0 # we need to offset to get to the right element if i == 0 and first_offset > 0: tensor_offset = first_offset num_elements = num_elements - tensor_offset # we dont need all elements of the tensor if num_elements > (partition_size - current_size): num_elements = partition_size - current_size # we need a narrow view of the tensor based on the tensor offset and number of elements that # we need from this tensor if tensor_offset > 0 or num_elements < tensor.numel(): flat_tensor_list.append(tensor.contiguous().view(-1).narrow(0, int(tensor_offset), int(num_elements))) else: flat_tensor_list.append(tensor) current_size = current_size + num_elements # this means its the last partition and does not align with the dp boundary. We need to pad before flattening if current_size < partition_size: flat_tensor_list.append( torch.zeros(int(partition_size - current_size), dtype=tensor_list[0].dtype, device=tensor_list[0].device)) if return_tensor_list: return flat_tensor_list return self.flatten(flat_tensor_list) def free_grad_in_param_list(self, param_list): for p in param_list: p.grad = None def reset_cpu_buffers(self): self.norm_for_param_grads = {} self.local_overflow = False def log_timers(self, timer_names): if self.timers is None: return self.timers.log(names=list(timer_names)) def start_timers(self, timer_names): if self.timers is None: return for name in timer_names: self.timers(name).start() def stop_timers(self, timer_names): if self.timers is None: return for name in timer_names: self.timers(name).stop() def _pre_step(self): self.micro_step_id = 0 print_rank_0(f"Inside Step function") see_memory_usage(f"In step before checking overflow", force=False) print_rank_0("Finished Tracing at Beginning of Step") self._get_param_coordinator(training=True).hierarchy = 0 print_rank_0("Finished Tracing at Beginning of Step") @instrument_w_nvtx def _get_norm_groups(self): norm_groups = [] for i, group in enumerate(self.fp16_groups): if self.offload_optimizer: norm_groups.append(self.complete_grad_norm_calculation_for_cpu_offload(self.fp16_groups[i])) else: norm_groups.append(self.get_grad_norm_direct(self.averaged_gradients[i], self.fp16_groups[i])) return norm_groups @instrument_w_nvtx def _prepare_fp32_grad_for_sub_group(self, sub_group_id): partition_id = dist.get_rank(group=self.dp_process_group) single_grad_partition = self.flatten(self.averaged_gradients[sub_group_id]).to( self.fp32_partitioned_groups_flat[sub_group_id].dtype) assert single_grad_partition.numel() == self.fp32_partitioned_groups_flat[sub_group_id].numel(), \ "averaged gradients have different number of elements that partition size {} {} {} {}".format( single_grad_partition.numel(), self.fp32_partitioned_groups_flat[sub_group_id].numel(), sub_group_id, partition_id) self.fp32_partitioned_groups_flat[sub_group_id].grad = single_grad_partition # release all the gradient since we have already created a necessary copy in dp_grad_partition self.zero_grad(set_to_none=True) for grad in filter(lambda g: get_accelerator().on_accelerator(g), self.averaged_gradients[sub_group_id]): grad.record_stream(get_accelerator().current_stream()) self.averaged_gradients[sub_group_id] = None @instrument_w_nvtx def _prepare_sub_group(self, sub_group_id, timer_names=set()): see_memory_usage(f'Before prepare optimizer sub group {sub_group_id}', force=False) if self._swappable_optimizer_subgroup(sub_group_id): self._optimizer_states_and_gradient_swap_in(sub_group_id, timer_names) elif not self.offload_optimizer: self._prepare_fp32_grad_for_sub_group(sub_group_id) see_memory_usage(f'After prepare optimizer sub group {sub_group_id}', force=False) def _optimizer_states_and_gradient_swap_in(self, sub_group_id, timer_names=set()): param_length = self.fp16_partitioned_groups_flat_numel[sub_group_id] fp32_param_id = id(self.fp32_partitioned_groups_flat[sub_group_id]) assert self._swappable_optimizer_subgroup(sub_group_id), \ f'Parameter {fp32_param_id} of numel={param_length} is not swappable' OPTIMIZER_SWAP_IN_STATE = 'optimizer_swap_in_state' see_memory_usage(f'pre-step Before swapping in optimizer tensors {sub_group_id}', force=False) self.start_timers([OPTIMIZER_SWAP_IN_STATE]) self.optimizer_swapper.swap_in_optimizer_state( parameter=self.fp32_partitioned_groups_flat[sub_group_id], async_parameter=self.next_swappable_fp32_partitioned_groups[sub_group_id]) self.stop_timers([OPTIMIZER_SWAP_IN_STATE]) timer_names.add(OPTIMIZER_SWAP_IN_STATE) see_memory_usage(f'pre-step After swapping in optimizer tensors {sub_group_id}', force=False) @instrument_w_nvtx def _release_sub_group(self, sub_group_id, timer_names=set()): see_memory_usage(f'Before release optimizer sub group {sub_group_id}', force=False) # get rid of the fp32 gradients. Not needed anymore if not self.offload_optimizer: self.fp32_partitioned_groups_flat[sub_group_id].grad = None if self._swappable_optimizer_subgroup(sub_group_id): self._optimizer_states_and_gradient_swap_out(sub_group_id, timer_names) see_memory_usage(f'After release optimizer sub group {sub_group_id}', force=False) # create a flat tensor aligned at the alignment boundary @instrument_w_nvtx def flatten_dense_tensors_aligned(self, tensor_list, alignment): num_elements = 0 for tens in tensor_list: num_elements = num_elements + tens.numel() remaining = num_elements % alignment if remaining: elements_to_add = alignment - remaining pad_tensor = torch.zeros(elements_to_add, device=tensor_list[0].device, dtype=tensor_list[0].dtype) padded_tensor_list = tensor_list + [pad_tensor] num_elements = num_elements + elements_to_add else: padded_tensor_list = tensor_list return self.flatten(padded_tensor_list) def _optimizer_states_and_gradient_swap_out(self, sub_group_id, timer_names=set()): param_length = self.fp16_partitioned_groups_flat_numel[sub_group_id] fp32_param_id = id(self.fp32_partitioned_groups_flat[sub_group_id]) assert self._swappable_optimizer_subgroup(sub_group_id), \ f'Parameter {fp32_param_id} of numel={param_length} is not swappable' OPTIMIZER_SWAP_OUT_STATE = 'optimizer_swap_out_state' see_memory_usage(f'post-step Before swapping out optimizer tensors {sub_group_id}', force=False) self.start_timers([OPTIMIZER_SWAP_OUT_STATE]) self.optimizer_swapper.swap_out_optimizer_state( parameter=self.fp32_partitioned_groups_flat[sub_group_id], async_swap=self.next_swappable_fp32_partitioned_groups[sub_group_id] is not None) self.stop_timers([OPTIMIZER_SWAP_OUT_STATE]) see_memory_usage(f'post-step After swapping out optimizer tensors {sub_group_id}', force=False) timer_names.add(OPTIMIZER_SWAP_OUT_STATE) # get rid of the fp32 gradients. Not needed anymore self.fp32_partitioned_groups_flat[sub_group_id].grad = None def _unflatten_partitioned_parameters(self, sub_group_id): updated_params = self.unflatten(self.fp16_partitioned_groups_flat[sub_group_id], self.fp16_partitioned_groups[sub_group_id]) for partitioned_param, q in zip(self.fp16_partitioned_groups[sub_group_id], updated_params): partitioned_param.data = q.data def _overflow_clean_up(self, prev_scale): see_memory_usage('After overflow before clearing gradients', force=False) self.zero_grad(set_to_none=True) if self.offload_optimizer: self.reset_cpu_buffers() else: self.averaged_gradients = {} see_memory_usage('After overflow after clearing gradients', force=False) @instrument_w_nvtx def _overflow_check_and_loss_scale_update(self): # First compute norm for all group so we know if there is overflow self.check_overflow() #loss scaling related computation prev_scale = self.loss_scale self._update_scale(self.overflow) if self.overflow: self._overflow_clean_up(prev_scale) return self.overflow @instrument_w_nvtx def _post_step(self, timer_names=set()): if self.offload_optimizer: self.reset_cpu_buffers() #Gathering persisting parameters if len(self.persistent_parameters) > 0: self.persistent_parameters[0].all_gather(self.persistent_parameters) if self.swap_optimizer: self.optimizer_swapper.log_timers() self.log_timers(timer_names) see_memory_usage('After zero_optimizer step', force=False) print_rank_0(f"------------------Finishing Step-----------------------") @instrument_w_nvtx def _reassign_or_swap_out_partitioned_parameters(self, sub_group_id): if self.fp16_partitioned_groups_flat[sub_group_id] is not None: self.fp16_partitioned_groups_flat[sub_group_id].data.copy_( self.fp32_partitioned_groups_flat[sub_group_id].data) #unflatten fp16 parameter subgroup self._unflatten_partitioned_parameters(sub_group_id) else: self._partitioned_params_swap_out(sub_group_id) def override_loss_scale(self, loss_scale): if loss_scale != self.external_loss_scale: logger.info(f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}') self.custom_loss_scaler = True self.external_loss_scale = loss_scale @instrument_w_nvtx def step(self, closure=None): """ Not supporting closure. """ self._pre_step() self._partition_all_parameters() #checks for overflow, adjust the loss scale accordingly if self._overflow_check_and_loss_scale_update(): if self.swap_optimizer: self.optimizer_swapper.log_timers() return norm_groups = self._get_norm_groups() scaled_global_grad_norm = get_global_norm(norm_list=norm_groups) # Stash unscaled gradient norm self._global_grad_norm = scaled_global_grad_norm / self.loss_scale timer_names = set() timer_names.add('optimizer_step') self.start_timers(['optimizer_step']) #update parameters one sub group at a time for sub_group_id, group in enumerate(self.fp16_groups): #prepare optimizer states, gradients and fp32 parameters for update self._prepare_sub_group(sub_group_id, timer_names) #scale the fp32 gradients self.unscale_and_clip_grads(sub_group_id, scaled_global_grad_norm) #apply the optimizer step on the sub group and copy fp32 parameters to fp16 self._optimizer_step(sub_group_id) #put fp16 parameters in appropriate location self._reassign_or_swap_out_partitioned_parameters(sub_group_id) #release memory or swap out optimizer states of fp32 parameters self._release_sub_group(sub_group_id, timer_names) self.stop_timers(['optimizer_step']) self._post_step(timer_names) # warn user about caching allocator flushes memory_stats = get_accelerator().memory_stats() alloc_retries = memory_stats["num_alloc_retries"] if memory_stats != None else 0 if alloc_retries > self.n_caching_allocator_flushes: if dist.get_rank() == 0: logger.warning( "%d pytorch allocator cache flushes since last step. this happens " "when there is high memory pressure and is detrimental to " "performance. if this is happening frequently consider adjusting " "settings to reduce memory consumption. If you are unable to " "make the cache flushes go away consider adding " "get_accelerator().empty_cache() calls in your training loop to ensure " "that all ranks flush their caches at the same time", alloc_retries - self.n_caching_allocator_flushes) self.n_caching_allocator_flushes = alloc_retries def dump_pre_step_gradients(self, debug_fp32_grads): # Dump gradient norms for debugging for i, _ in enumerate(self.fp16_groups): print(f'Pre-Step Dump Norms for Group {i} FP16P, FP16G, FP32G, FP32GUC') for fp16_param, fp32_grad in zip(self.fp16_groups[i], debug_fp32_grads[i]): param_id = self.get_param_id(fp16_param) fp16_grad_norm = self.debug_fp16_grads[i][param_id] fp32_grad_norm = [float(t.data.float().norm(2)) for t in fp32_grad] norm_list = [fp16_grad_norm, fp32_grad_norm] print(f'Pre-Step Norms {i} {param_id} = {norm_list}') def dump_post_step_gradients(self): # Dump gradient norms for debugging for i, group in enumerate(self.fp16_groups): print(f'Post-Step Dump Norms for Group {i} FP16P, FP16DS, FP16FLAT, FP32FLAT') unflat_fp16 = self.unflatten(self.fp16_groups_flat[i], self.fp16_groups[i]) unflat_fp32 = self.unflatten(self.fp32_partitioned_groups_flat[i], self.fp16_groups[i]) for j, p in enumerate(self.fp16_groups[i]): param_id = self.get_param_id(p) param_norm = float(p.data.float().norm(2)) ds_norm = float(p.ds_tensor.data.float().norm(2)) unflat_norm = [float(t.data.float().norm(2)) for t in [unflat_fp16[j], unflat_fp32[j]]] norm_list = [param_norm, ds_norm] + unflat_norm print(f'Post-Step Norms {i} {param_id} = {norm_list}') @instrument_w_nvtx def unscale_and_clip_grads(self, sub_group_id, total_norm): # compute combined scale factor for this group combined_scale = self.loss_scale if self.clip_grad > 0.: # norm is in fact norm*scale clip = ((total_norm / self.loss_scale) + 1e-6) / self.clip_grad if clip > 1: combined_scale = clip * self.loss_scale self.fp32_partitioned_groups_flat[sub_group_id].grad.mul_(1. / combined_scale) def _check_overflow(self, partition_gradients=True): self.overflow = self.has_overflow(partition_gradients) # `params` is a list / generator of torch.Variable def has_overflow_serial(self, params, is_grad_list=False): for p in params: if p.grad is not None and self._has_inf_or_nan(p.grad.data): return True return False def has_overflow_partitioned_grads_serial(self): for i in range(len(self.fp16_groups)): for j, grad in enumerate(self.averaged_gradients[i]): if grad is not None and self._has_inf_or_nan(grad.data, j): return True return False @instrument_w_nvtx def has_overflow(self, partition_gradients=True): if partition_gradients: with get_accelerator().stream(self.reduce_and_partition_stream): self.local_overflow = bool(self.inf_or_nan_tracker.item()) self.inf_or_nan_tracker.zero_() overflow = self.local_overflow #overflow = self.has_overflow_partitioned_grads_serial() overflow_gpu = get_accelerator().ByteTensor([overflow]) dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.dp_process_group) else: params = [] for group in self.fp16_groups: for param in group: params.append(param) overflow = self.has_overflow_serial(params, is_grad_list=partition_gradients) overflow_gpu = get_accelerator().ByteTensor([overflow]) # Since each model parallel GPU carries only part of the model, # make sure overflow flag is synced across all the model parallel GPUs self._model_parallel_all_reduce(tensor=overflow_gpu, op=dist.ReduceOp.MAX) overflow = overflow_gpu[0].item() return bool(overflow) # `x` is a torch.Tensor @staticmethod def _has_inf_or_nan(x, j=None): try: # if x is half, the .float() incurs an additional deep copy, but it's necessary if # Pytorch's .sum() creates a one-element tensor of the same type as x # (which is true for some recent version of pytorch). cpu_sum = float(x.float().sum()) # More efficient version that can be used if .sum() returns a Python scalar # cpu_sum = float(x.sum()) except RuntimeError as instance: # We want to check if inst is actually an overflow exception. # RuntimeError could come from a different error. # If so, we still want the exception to propagate. if "value cannot be converted" not in instance.args[0]: raise return True else: if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum: return True return False @instrument_w_nvtx def backward(self, loss, retain_graph=False): """ :attr:`backward` performs the following steps: 1. fp32_loss = loss.float() 2. scaled_loss = fp32_loss*loss_scale 3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves """ if self.swap_optimizer: self.optimizer_swapper.pre_backward() see_memory_usage(f"Before backward", force=False) if self.custom_loss_scaler: scaled_loss = self.external_loss_scale * loss scaled_loss.backward() else: self.loss_scaler.backward(loss.float(), retain_graph=retain_graph) self._get_param_coordinator(training=True).reset_step() if self.swap_optimizer: self.optimizer_swapper.post_backward() def get_fp32_grad_partitions(self) -> Dict[int, Dict[int, Tensor]]: """get fp32 gradient partition dictionary accessed as grad_dict[parameter_group_index][parameter_index] """ self.reduce_and_partition_stream.synchronize() grad_dict = collections.defaultdict(dict) if self.offload_optimizer: for group in self.fp16_groups: for param_idx, param in enumerate(group): group_idx, dest_offset, num_elements = self.grad_position[self.get_param_id(param)] fp32_grad = self.fp32_partitioned_groups_flat[group_idx].grad.narrow(0, dest_offset, num_elements) grad_dict[group_idx][param_idx] = fp32_grad else: for group_idx, group in self.averaged_gradients.items(): for param_idx, gradient in enumerate(group): grad_dict[group_idx][param_idx] = gradient.float() return grad_dict def _fp32_state_allgather(self, param, fp32_state): reduce_buffer = torch.zeros(self.partition_count * fp32_state.numel(), dtype=torch.float32, device=param.device).flatten() my_rank = dist.get_rank(group=self.dp_process_group) partitions = [ reduce_buffer.narrow(0, fp32_state.numel() * i, fp32_state.numel()) for i in range(self.partition_count) ] partitions[my_rank].data.copy_(fp32_state.data, non_blocking=False) dist.all_gather(partitions, partitions[my_rank], group=self.dp_process_group) return reduce_buffer.narrow(0, 0, param.ds_numel).view(param.ds_shape) def get_fp32_grad_for_param(self, param) -> Tensor: if not param.requires_grad: return None self.reduce_and_partition_stream.synchronize() if self.offload_optimizer: group_idx, dest_offset, num_elements = self.grad_position[self.get_param_id(param)] fp32_grad = self.fp32_partitioned_groups_flat[group_idx].grad.narrow(0, dest_offset, num_elements).to(device=param.device) else: fp32_grad = self.__param_id_to_grad_partition[param.ds_id].float() return self._fp32_state_allgather(param, fp32_grad) def get_full_hp_param(self, param, optim_state_key=None) -> Tensor: if not param.requires_grad: return None self.reduce_and_partition_stream.synchronize() group_idx, dest_offset, num_elements = self.grad_position[self.get_param_id(param)] if self._swappable_optimizer_subgroup(group_idx): self._optimizer_states_and_gradient_swap_in(group_idx) fp32_param = self.fp32_partitioned_groups_flat[group_idx] if optim_state_key is None: fp32_opt_state = fp32_param.narrow(0, dest_offset, num_elements).to(device=param.device) else: fp32_opt_state = self.optimizer.state[fp32_param][optim_state_key].narrow( 0, dest_offset, num_elements).to(device=param.device) hp_param = self._fp32_state_allgather(param, fp32_opt_state) if self._swappable_optimizer_subgroup(group_idx): self._optimizer_states_and_gradient_swap_out(group_idx) return hp_param @instrument_w_nvtx def _partition_all_parameters(self): self.parameter_offload.partition_all_parameters() def check_overflow(self, partition_gradients=True): self._check_overflow(partition_gradients) def _update_scale(self, has_overflow=False): self.loss_scaler.update_scale(has_overflow) # Promote state so it can be retrieved or set via "fp16_optimizer_instance.state" def _get_state(self): return self.optimizer.state def _set_state(self, value): self.optimizer.state = value state = property(_get_state, _set_state) # Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups" # (for example, to adjust the learning rate) def _get_param_groups(self): return self.optimizer.param_groups def _set_param_groups(self, value): self.optimizer.param_groups = value self.trainable_param_groups = self._get_trainable_parameter_groups() param_groups = property(_get_param_groups, _set_param_groups) # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" def _get_loss_scale(self): if self.custom_loss_scaler: return self.external_loss_scale else: return self.loss_scaler.cur_scale def _set_loss_scale(self, value): self.loss_scaler.cur_scale = value loss_scale = property(_get_loss_scale, _set_loss_scale) cur_scale = property(_get_loss_scale, _set_loss_scale) def _get_lean_tensors(self, padded_flattened_tensor, group_tensors, paddings): # Remove paddings from flattened tensor individual_tensors = self.unflatten(padded_flattened_tensor, group_tensors) lean_lengths = [t.numel() - pad for t, pad in zip(group_tensors, paddings)] lean_tensors = [t[:len] for t, len in zip(individual_tensors, lean_lengths)] #logger.info(f'rank {dist.get_rank()}: lean_tensors = {[t.numel() for t in lean_tensors]}') return lean_tensors #TODO REVISIT this for stage 3 def get_lean_optimizer_state(self): # Return optimizer states after removing paddings. # This method assumes that each param group contains a single flattened tensor. optimizer_groups_state = [] for i, group in enumerate(self.optimizer.param_groups): p = group['params'][0] lean_state = {} for key, value in self.optimizer.state[p].items(): if torch.is_tensor(value): padded_lens = [t.numel() for t in self.fp16_partitioned_groups[i]] lean_state[key] = self._get_lean_tensors(value, self.fp16_partitioned_groups[i], self.groups_padding[i]) lean_flat_len = sum([t.numel() for t in lean_state[key]]) else: lean_state[key] = value optimizer_groups_state.append(lean_state) return optimizer_groups_state def get_groups_without_padding(self, groups_with_padding): # Return group tensor after removing paddings added for alignment to DP world size. groups_without_padding = [] for i, group in enumerate(groups_with_padding): lean_group = self._get_lean_tensors(group, self.fp16_partitioned_groups[i], self.groups_padding[i]) groups_without_padding.append(lean_group) return groups_without_padding def _set_fp32_optimizer_param_groups(self): for sub_group_id, _ in enumerate(self.fp16_groups): param_group_id = self.sub_group_to_group_id[sub_group_id] self.optimizer.param_groups[param_group_id]['params'].append( self.fp32_partitioned_groups_flat[sub_group_id]) def _clear_fp32_optimizer_param_groups(self): for param_group in self.optimizer.param_groups: param_group['params'] = [] def _rigid_state_dict(self): state_dict = {} state_dict[ZERO_STAGE] = ZeroStageEnum.weights state_dict['loss_scaler'] = self.loss_scaler state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale state_dict['overflow'] = self.overflow state_dict[PARTITION_COUNT] = self.partition_count self._set_fp32_optimizer_param_groups() state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict() state_dict[FP32_FLAT_GROUPS] = self.fp32_partitioned_groups_flat self._clear_fp32_optimizer_param_groups() return state_dict def state_dict(self): """ Returns a dict containing the current state of this :class:`FP16_Optimizer` instance. This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict of the contained Pytorch optimizer. Example:: checkpoint = {} checkpoint['model'] = model.state_dict() checkpoint['optimizer'] = optimizer.state_dict() torch.save(checkpoint, "saved.pth") """ if self.elastic_checkpoint: raise NotImplementedError("ZeRO-3 does not yet support elastic checkpointing, please disable for now.") if self.swap_optimizer or self.params_in_nvme_and_cpu: raise NotImplementedError( "ZeRO-3 does not yet support checkpointing with NVMe offloading, please disable for now.") return self._rigid_state_dict() # Restore base optimizer fp32 weights from checkpoint by: # 1) Merging fp32 weights from checkpoints of all partitions # 2) Extracting fp32 weights for current partition from merged weights # 3) Using extracted weights to update base optimizer weights directly. def _restore_from_fp32_weights(self, all_state_dict): flat_local_partition = [] for i in range(len(self.fp32_partitioned_groups_flat)): merged_partitions = [sd['fp32_groups'][i] for sd in all_state_dict] flat_local_partition.append(self._get_flattened_partition(merged_partitions)) for current, saved in zip(self.fp32_partitioned_groups_flat, flat_local_partition): current.data.copy_(saved.data) # Restore base optimizer fp32 weights from ZeRO fp16 weights def _restore_from_bit16_weights(self): for fp16_partitions, fp32_partition in zip(self.fp16_partitioned_groups_flat, self.fp32_partitioned_groups_flat): fp32_partition.data.copy_(fp16_partitions.data) # Refresh the fp32 master params from the fp16 copies. def refresh_fp32_params(self): self._restore_from_bit16_weights() # Extract flattened partition for current rank from all partitions def _get_flattened_partition(self, all_partition_states): partition_id = dist.get_rank(group=self.dp_process_group) alignment = dist.get_world_size(group=self.dp_process_group) param_partitions = [[] for _ in range(len(all_partition_states[0]))] for i, partition in enumerate(all_partition_states): for j, param in enumerate(partition): param_partitions[j].append(param) local_state_partitions = [] for param_index, param_slices in enumerate(param_partitions): flattened_merged_tensor = self.flatten_dense_tensors_aligned(param_slices, alignment) new_partitions = self.get_data_parallel_partitions(flattened_merged_tensor) local_state_partitions.append(new_partitions[partition_id]) if torch.is_tensor(local_state_partitions[0]): return self.flatten_dense_tensors_aligned(local_state_partitions, alignment) # Assume non-tensor states are not partitioned and equal across ranks, so return first one return local_state_partitions[0] # Restore base optimizer state from checkpoint by # 1) Merging optimizer state from checkpoints of all partitions # 2) Extracting optimizer state for current partition from the merged state # 3) Using the extracted value to directly update the base optimizer. def _restore_base_optimizer_state(self, all_state_dict): base_optimizer_group_states = [] for i in range(len(self.optimizer.param_groups)): partition_states = {} all_partition_group_states = [sd['base_optimizer_state'][i] for sd in all_state_dict] for key in all_partition_group_states[0].keys(): all_partition_states = [all_states[key] for all_states in all_partition_group_states] partition_states[key] = self._get_flattened_partition(all_partition_states) base_optimizer_group_states.append(partition_states) for i, group in enumerate(self.optimizer.param_groups): p = group['params'][0] for key, saved in base_optimizer_group_states[i].items(): if torch.is_tensor(self.optimizer.state[p][key]): self.optimizer.state[p][key].data.copy_(saved.data) else: self.optimizer.state[p][key] = saved def _rigid_load_state_dict(self, state_dict, load_optimizer_states=True): # I think it should actually be ok to reload the optimizer before the model. self.loss_scaler = state_dict['loss_scaler'] self.dynamic_loss_scale = state_dict['dynamic_loss_scale'] self.overflow = state_dict['overflow'] if load_optimizer_states: self._set_fp32_optimizer_param_groups() self.optimizer.load_state_dict(state_dict[OPTIMIZER_STATE_DICT]) self._clear_fp32_optimizer_param_groups() # restore fp32 partitions for curr_param, saved_param in zip(self.fp32_partitioned_groups_flat, state_dict[FP32_FLAT_GROUPS]): curr_param.data.copy_(saved_param.data) # restore fp16 partitions from fp32 for sub_group_id in range(len(self.fp32_partitioned_groups_flat)): fp32_param = self.fp32_partitioned_groups_flat[sub_group_id] fp16_param = self.fp16_partitioned_groups_flat[sub_group_id] fp16_param.data.copy_(fp32_param.data) # update fp16 unflattened params for sub_group_id in range(len(self.fp16_partitioned_groups_flat)): updated_params = self.unflatten(self.fp16_partitioned_groups_flat[sub_group_id], self.fp16_partitioned_groups[sub_group_id]) for partitioned_param, q in zip(self.fp16_partitioned_groups[sub_group_id], updated_params): partitioned_param.data = q.data # TODO: Support different/changing load/save DP degree. def load_state_dict(self, state_dict_list, load_optimizer_states=True, load_from_fp32_weights=False, checkpoint_folder=None): r"""Loading a ZeRO checkpoint Arguments: state_dict_list: List of all saved ZeRO checkpoints, one for each saved partition. Note that the number of saved partitions may differ from number of loading partitions to support changing GPU count, specifically DP world size, between saving and loading checkpoints. load_optimizer_states: Boolean indicating whether or not to load base optimizer states load_from_fp32_weights: Boolean indicating whether to initialize fp32 master weights from fp32 copies in checkpoints (no precision loss) or from model's fp16 copies (with precision loss). """ """ Loads a state_dict created by an earlier call to state_dict(). If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``, whose parameters in turn came from ``model``, it is expected that the user will call ``model.load_state_dict()`` before ``fp16_optimizer_instance.load_state_dict()`` is called. Example:: model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half() optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) ... checkpoint = torch.load("saved.pth") model.load_state_dict(checkpoint['model']) optimizer.load_state_dict(checkpoint['optimizer']) """ if self.elastic_checkpoint: raise NotImplementedError("ZeRO-3 does not yet support elastic checkpointing, please disable for now.") if self.swap_optimizer or self.params_in_nvme_and_cpu: raise NotImplementedError( "ZeRO-3 does not yet support checkpointing with NVMe offloading, please disable for now.") self._rigid_load_state_dict(state_dict_list[dist.get_rank(group=self.dp_process_group)], load_optimizer_states=load_optimizer_states) if len(self.persistent_parameters) > 0: self.persistent_parameters[0].partition(self.persistent_parameters) self.persistent_parameters[0].all_gather(self.persistent_parameters) def checkpoint_event_prologue(self): self._partition_all_parameters() def checkpoint_event_epilogue(self): if len(self.persistent_parameters) > 0: self.persistent_parameters[0].all_gather(self.persistent_parameters) def empty_partition_cache(self): self.parameter_offload.empty_partition_cache() def _handle_overflow(cpu_sum, x, i): import math rank = dist.get_rank() if rank == 0: t_i = -1 for v_i, v in enumerate(x.data.contiguous().view(-1)): if not math.isfinite(float(v)): t_i = v_i break logger.info(f"rank {rank} detected overflow {cpu_sum} in tensor {i}:{t_i} shape {x.shape}") def estimate_zero3_model_states_mem_needs(total_params, largest_layer_params, num_gpus_per_node=1, num_nodes=1, cpu_offload=True, cpu_offload_params=True, zero_init=True, additional_buffer_factor=1.5): total_gpus = num_nodes * num_gpus_per_node gpus_factor = 1 / num_nodes largest_layer_memory = (4 * largest_layer_params) if cpu_offload: if cpu_offload_params: gpu_mem = largest_layer_memory if zero_init: cpu_mem = total_params * 18 * gpus_factor * additional_buffer_factor else: cpu_mem = total_params * max(4 * num_gpus_per_node, 18 * gpus_factor) * additional_buffer_factor else: gpu_mem = largest_layer_memory + int(2 * total_params / total_gpus) if zero_init: cpu_mem = total_params * 16 * gpus_factor * additional_buffer_factor else: cpu_mem = total_params * max(4 * num_gpus_per_node, 16 * gpus_factor) * additional_buffer_factor else: gpu_mem = largest_layer_memory + int(18 * total_params / total_gpus) if zero_init: cpu_mem = largest_layer_params * 4 * num_gpus_per_node * additional_buffer_factor else: cpu_mem = total_params * 4 * num_gpus_per_node * additional_buffer_factor return int(cpu_mem), int(gpu_mem), largest_layer_memory def model_to_params(model): # shared params calculated only once total_params = sum(dict((p.data_ptr(), p.numel()) for p in model.parameters()).values()) largest_layer_params = 0 for m in model.modules(): # assuming no shared params within a single layer layer_params = sum(p.numel() for p in m.parameters(recurse=False)) largest_layer_params = max(largest_layer_params, layer_params) return total_params, largest_layer_params def estimate_zero3_model_states_mem_needs_all_live(model, num_gpus_per_node=1, num_nodes=1, additional_buffer_factor=1.5): """ Print out estimates on memory usage requirements for ZeRO 3 params, optim states and gradients for a given ``model`` and hardware setup. If you have an actual model object, use this function and everything will be derived automatically. If it's a hypothetical model, use ``estimate_zero3_model_states_mem_needs_all_cold`` where you have to pass the ``total_params`` and ``largest_layer_params`` explicitly. Args: - ``model``: ``nn.Module`` object - ``num_gpus_per_node``: how many gpus per node (defaults to 1) - ``num_nodes``: how many nodes (defaults to 1), - ``additional_buffer_factor``: estimation factor (defaults to 1.5): """ total_params, largest_layer_params = model_to_params(model) estimate_zero3_model_states_mem_needs_all_cold(total_params=total_params, largest_layer_params=largest_layer_params, num_gpus_per_node=num_gpus_per_node, num_nodes=num_nodes, additional_buffer_factor=additional_buffer_factor) def estimate_zero3_model_states_mem_needs_all_cold(total_params, largest_layer_params, num_gpus_per_node=1, num_nodes=1, additional_buffer_factor=1.5): """ Print out estimates on memory usage requirements for ZeRO 3 params, optim states and gradients for a given ``model`` and hardware setup. If it's a hypothetical model, use this function where you have to pass the ``total_params`` and ``largest_layer_params`` explicitly. If you have an actual model object, use ``estimate_zero3_model_states_mem_needs_all_live`` and everything will be derived automatically. Args: - ``total_params``: total model params - ``largest_layer_params``: largest layer's params - ``num_gpus_per_node``: how many gpus per node (defaults to 1) - ``num_nodes``: how many nodes (defaults to 1), - ``additional_buffer_factor``: estimation factor (defaults to 1.5): """ def format_options(cpu_offload, cpu_offload_params, zero_init): enabled = [] padded_cpu_str = f'{OffloadDeviceEnum.cpu:4}' param_device = padded_cpu_str if cpu_offload_params else "none" enabled.append(f"offload_param={param_device}") optimizer_device = padded_cpu_str if cpu_offload else "none" enabled.append(f"offload_optimizer={optimizer_device}") enabled.append(f"zero_init={1 if zero_init else 0}") return ", ".join(enabled) nodes_str = "nodes" if num_nodes > 1 else "node" gpus_str = "GPUs" if num_gpus_per_node > 1 else "GPU" print( "Estimated memory needed for params, optim states and gradients for a:\n" f"HW: Setup with {num_nodes} {nodes_str}, {num_gpus_per_node} {gpus_str} per node.\n" f"SW: Model with {int(total_params/1e6)}M total params, {int(largest_layer_params/1e6)}M largest layer params." ) print(" per CPU | per GPU | Options") for cpu_offload in [True, False]: for cpu_offload_params in [True, False]: if not cpu_offload and cpu_offload_params: continue for zero_init in [True, False]: cpu_mem, gpu_mem, largest_layer_memory = estimate_zero3_model_states_mem_needs( total_params=total_params, largest_layer_params=largest_layer_params, num_gpus_per_node=num_gpus_per_node, num_nodes=num_nodes, cpu_offload=cpu_offload, cpu_offload_params=cpu_offload_params, zero_init=zero_init, additional_buffer_factor=additional_buffer_factor) options_str = format_options(cpu_offload=cpu_offload, cpu_offload_params=cpu_offload_params, zero_init=zero_init) print(f" {cpu_mem/2**30:7.2f}GB | {gpu_mem/2**30:6.2f}GB | {options_str}")
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from deepspeed.runtime.config_utils import get_scalar_param, DeepSpeedConfigObject ######################################### # DeepSpeed Activation Checkpointing ######################################### # Activation Checkpointing Allows to save memory by only keeping a select few #activations for the backpropagation. ACTIVATION_CHKPT_FORMAT = ''' Activation Checkpointing should be configured as: "session_params": { "activation_checkpointing": { "partitioned_activations": [true|false], "number_checkpoints": 100, "contiguous_memory_optimization": [true|false], "cpu_checkpointing": [true|false] "profile": [true|false], "synchronize_checkpoint_boundary": [true|false], } } ''' ACT_CHKPT_PARTITION_ACTIVATIONS = 'partition_activations' ACT_CHKPT_PARTITION_ACTIVATIONS_DEFAULT = False ACT_CHKPT_NUMBER_CHECKPOINTS = 'number_checkpoints' ACT_CHKPT_NUMBER_CHECKPOINTS_DEFAULT = None ACT_CHKPT_CONTIGUOUS_MEMORY_OPTIMIZATION = 'contiguous_memory_optimization' ACT_CHKPT_CONTIGUOUS_MEMORY_OPTIMIZATION_DEFAULT = False ACT_CHKPT_SYNCHRONIZE_CHECKPOINT_BOUNDARY = 'synchronize_checkpoint_boundary' ACT_CHKPT_SYNCHRONIZE_CHECKPOINT_BOUNDARY_DEFAULT = False ACT_CHKPT_PROFILE = 'profile' ACT_CHKPT_PROFILE_DEFAULT = False ACT_CHKPT_CPU_CHECKPOINTING = 'cpu_checkpointing' ACT_CHKPT_CPU_CHECKPOINTING_DEFAULT = False ACT_CHKPT = 'activation_checkpointing' ACT_CHKPT_DEFAULT = { ACT_CHKPT_PARTITION_ACTIVATIONS: ACT_CHKPT_PARTITION_ACTIVATIONS_DEFAULT, ACT_CHKPT_NUMBER_CHECKPOINTS: ACT_CHKPT_NUMBER_CHECKPOINTS_DEFAULT, ACT_CHKPT_CONTIGUOUS_MEMORY_OPTIMIZATION: ACT_CHKPT_CONTIGUOUS_MEMORY_OPTIMIZATION_DEFAULT, ACT_CHKPT_SYNCHRONIZE_CHECKPOINT_BOUNDARY: ACT_CHKPT_SYNCHRONIZE_CHECKPOINT_BOUNDARY_DEFAULT, ACT_CHKPT_PROFILE: ACT_CHKPT_PROFILE_DEFAULT, ACT_CHKPT_CPU_CHECKPOINTING: ACT_CHKPT_CPU_CHECKPOINTING_DEFAULT } class DeepSpeedActivationCheckpointingConfig(DeepSpeedConfigObject): def __init__(self, param_dict): super(DeepSpeedActivationCheckpointingConfig, self).__init__() self.partition_activations = None self.contiguous_memory_optimization = None self.cpu_checkpointing = None self.number_checkpoints = None self.synchronize_checkpoint_boundary = None self.profile = None if ACT_CHKPT in param_dict.keys(): act_chkpt_config_dict = param_dict[ACT_CHKPT] else: act_chkpt_config_dict = ACT_CHKPT_DEFAULT self._initialize(act_chkpt_config_dict) def _initialize(self, act_chkpt_config_dict): self.partition_activations = get_scalar_param(act_chkpt_config_dict, ACT_CHKPT_PARTITION_ACTIVATIONS, ACT_CHKPT_PARTITION_ACTIVATIONS_DEFAULT) self.contiguous_memory_optimization = get_scalar_param(act_chkpt_config_dict, ACT_CHKPT_CONTIGUOUS_MEMORY_OPTIMIZATION, ACT_CHKPT_CONTIGUOUS_MEMORY_OPTIMIZATION_DEFAULT) self.cpu_checkpointing = get_scalar_param(act_chkpt_config_dict, ACT_CHKPT_CPU_CHECKPOINTING, ACT_CHKPT_CPU_CHECKPOINTING_DEFAULT) self.number_checkpoints = get_scalar_param(act_chkpt_config_dict, ACT_CHKPT_NUMBER_CHECKPOINTS, ACT_CHKPT_NUMBER_CHECKPOINTS_DEFAULT) self.profile = get_scalar_param(act_chkpt_config_dict, ACT_CHKPT_PROFILE, ACT_CHKPT_PROFILE_DEFAULT) self.synchronize_checkpoint_boundary = get_scalar_param(act_chkpt_config_dict, ACT_CHKPT_SYNCHRONIZE_CHECKPOINT_BOUNDARY, ACT_CHKPT_SYNCHRONIZE_CHECKPOINT_BOUNDARY_DEFAULT)
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team '''Copyright The Microsoft DeepSpeed Team'''
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ Use to partition the activations stored for backward propagation Therefore reduces the memory consumption Also implements CPU checkpointing and contiguous memory checkpointing Reduces memory consumption and memory fragmentation Code for rng checkpointing taken from NVIDIA Megatron-LM mpu/random.py b886b7bb972afe72bac0f5de4f42a4a7bae8ebef """ # Parts of the code here are adapted from PyTorch # repo: https://github.com/pytorch/pytorch import copy import torch import contextlib from deepspeed import comm as dist import mmap from torch import _C from deepspeed.runtime.config import DeepSpeedConfig from deepspeed.utils import logger from deepspeed.runtime.utils import copy_to_device, move_to_device, see_memory_usage, bwc_tensor_model_parallel_rank from deepspeed.utils.timer import SynchronizedWallClockTimer as Timers from deepspeed.accelerator import get_accelerator # DeepSpeed Checkpointing Enabled or Disabled deepspeed_checkpointing_enabled = False # MP parameters mpu = None mp_rank = None mp_size = None mp_group = None # Model Parameters num_layers = None # Checkpointing buffers contiguous_data_buffers = [] data_offsets = [] contiguous_size_buffers = [] size_offsets = [] timers = None # optimization flags PARTITION_ACTIVATIONS = False CPU_CHECKPOINT = False CONTIGUOUS_CHECKPOINTING = False SYNCHRONIZE = False PROFILE_TIME = False # Default name for the model parallel rng tracker. _MODEL_PARALLEL_RNG_TRACKER_NAME = 'model-parallel-rng' transport_stream = None cuda_device = None def detach_variable(inputs, device=None): if isinstance(inputs, tuple): out = [] for inp in inputs: if not isinstance(inp, torch.Tensor): out.append(inp) continue requires_grad = inp.requires_grad if device is not None: x = inp.to(device=device) else: x = inp x = x.detach() x.requires_grad = requires_grad out.append(x) return tuple(out) else: raise RuntimeError("Only tuple of tensors is supported. Got Unsupported input type: ", type(inputs).__name__) def _set_cuda_rng_state(new_state, device=-1): """Sets the random number generator state of the current GPU. Arguments: new_state (torch.ByteTensor): The desired state This function is adapted from PyTorch repo (torch.cuda.set_rng_state) #ignore-cuda with a single change: the input state is not cloned. Cloning caused major performance issues for +4 GPU cases. """ if hasattr(_C, '_cuda_setRNGState') and callable(_C._cuda_setRNGState): # older PyTorch def cb(): with get_accelerator().device(device): _C._cuda_setRNGState(new_state) else: # newer PyTorch if device == -1: device = torch.device(get_accelerator().device_name()) elif isinstance(device, str): device = torch.device(device) elif isinstance(device, int): device = torch.device(get_accelerator().device_name(), device) def cb(): idx = device.index if idx is None: idx = get_accelerator().current_device() default_generator = get_accelerator().default_generator(idx) default_generator.set_state(new_state) get_accelerator().lazy_call(cb) class CudaRNGStatesTracker: """Tracker for the cuda RNG states. Using the `add` method, a cuda rng state is initialized based on the input `seed` and is assigned to `name`. Later, by forking the rng state, we can perform operations and return to our starting cuda state. """ def __init__(self): # Map from a string name to the cuda rng state. self.states_ = {} # Seeds are just for book keeping and ensure no seed is set twice. self.seeds_ = set() def reset(self): """Set to the initial state (no tracker).""" self.states_ = {} self.seeds_ = set() def get_states(self): """Get rng states. Copy the dictionary so we have direct pointers to the states, not just a pointer to the dictionary.""" return copy.copy(self.states_) def set_states(self, states): """Set the rng states. For efficiency purposes, we do not check the size of seed for compatibility.""" self.states_ = states def add(self, name, seed): """Track the rng state.""" # Check seed is not already used. if seed in self.seeds_: raise Exception('seed {} already exists'.format(seed)) self.seeds_.add(seed) # Check that state is not already defined. if name in self.states_: raise Exception('cuda rng state {} already exists'.format(name)) # Get the current rng state. orig_rng_state = get_accelerator().get_rng_state() # Set the new state and store it. get_accelerator().manual_seed(seed) self.states_[name] = get_accelerator().get_rng_state() # Reset rng state to what it was. _set_cuda_rng_state(orig_rng_state) @contextlib.contextmanager def fork(self, name=_MODEL_PARALLEL_RNG_TRACKER_NAME): """Fork the cuda rng state, perform operations, and exit with the original state.""" # Check if we have added the state if name not in self.states_: raise Exception('cuda rng state {} is not added'.format(name)) # Store current rng state. orig_cuda_rng_state = get_accelerator().get_rng_state() # Set rng state to the desired one _set_cuda_rng_state(self.states_[name]) # Do the stuff we wanted to do. try: yield finally: # Update the current rng state for later use. self.states_[name] = get_accelerator().get_rng_state() # And set the state to the original state we started with. _set_cuda_rng_state(orig_cuda_rng_state) # RNG tracker object. _CUDA_RNG_STATE_TRACKER = CudaRNGStatesTracker() def get_cuda_rng_tracker(): """Get cuda rng tracker.""" return _CUDA_RNG_STATE_TRACKER def model_parallel_cuda_manual_seed(seed): """Initialize model parallel cuda seed. This function should be called after the model parallel is initialized. Also, no get_accelerator().manual_seed should be called after this function. Basically, this is replacement for that function. Two set of RNG states are tracked: default state: This is for data parallelism and is the same among a set of model parallel GPUs but different across different model parallel groups. This is used for example for dropout in the non-model-parallel regions. model-parallel state: This state is different among a set of model parallel GPUs, but the same across data parallel groups. This is used for example for dropout in model parallel regions. """ global mpu tp_rank = bwc_tensor_model_parallel_rank(mpu) # 2718 is just for fun and any POSITIVE value will work. offset = seed + 2718 model_parallel_seed = offset + tp_rank # Data parallel gets the original seed. data_parallel_seed = seed if dist.get_rank() == 0: logger.info( '> initializing model parallel cuda seeds on global rank {}, ' 'model parallel rank {}, and data parallel rank {} with ' 'model parallel seed: {} and data parallel seed: {}'.format(dist.get_rank(), tp_rank, mpu.get_data_parallel_rank(), model_parallel_seed, data_parallel_seed), ) _CUDA_RNG_STATE_TRACKER.reset() # Set the default state. get_accelerator().manual_seed(data_parallel_seed) # and model parallel state. _CUDA_RNG_STATE_TRACKER.add(_MODEL_PARALLEL_RNG_TRACKER_NAME, model_parallel_seed) def get_partition_start(item): global mp_rank, mp_size, mp_group size = item.numel() partition_size = size / mp_size start = partition_size * mp_rank return int(start) def get_partition_size(item): global mp_rank, mp_size, mp_group size = item.numel() assert size % mp_size == 0, "Doesn't handle if partition activation if item is not divisible by mp size" partition_size = size / mp_size return int(partition_size) def gather_partitioned_activations(tensors, device=None): global mp_rank, mp_size, mp_group assert len(tensors) % 2 == 0, f'Expected even count of tensors, instead got {len(tensors)}' inputs = [] num_args = int(len(tensors) / 2) for i in range(num_args): item = tensors[2 * i] size = tensors[2 * i + 1] if not is_activation_to_checkpoint(item): inputs.append(item) continue # don't need to do all_gather if model parallel is not enabled if mp_group is None or mp_size == 1: item = item.view(list(size.numpy())) inputs.append(item) continue partition_size = item.numel() tensor_size = partition_size * mp_size if device is not None: flat_tensor = torch.zeros([tensor_size], dtype=item.dtype, device=device) else: flat_tensor = torch.zeros([tensor_size], dtype=item.dtype, device=item.device) partitions = [] for i in range(mp_size): part_i = flat_tensor.narrow(0, partition_size * i, partition_size) if i == mp_rank: part_i.copy_(item) partitions.append(part_i) dist.all_gather(partitions, partitions[mp_rank], group=mp_group) input_tensor = flat_tensor.view(list(size.numpy())) item.data = input_tensor.data inputs.append(item) return tuple(inputs) def extract_tensors(all_objects): """ Separate objects in list/tuple into tensors and non-tensors and create a mapping to enable re-aggregation. The order of tensors and non-tensors is preserved in their respective output groups. Parameters: all_objects (list/tuple): Objects containing tensors and non-tensors to be split. Returns: tuple: Containing tensors, non-tensors, and bools of whether each position in original list/tuple was a tensor. """ tensor_objects = [v for v in all_objects if torch.is_tensor(v)] non_tensor_objects = [v for v in all_objects if not torch.is_tensor(v)] tensor_flags = [torch.is_tensor(v) for v in all_objects] if type(all_objects) is tuple: return tuple(tensor_objects), tuple(non_tensor_objects), tuple(tensor_flags) return tensor_objects, non_tensor_objects, tensor_flags def merge_tensors(tensor_objects, non_tensor_objects, tensor_flags): """ Merge two lists (or tuples) of tensors and non-tensors using a mapping of positions in merged list (or tuple). Parameters: tensor_objects (list/tuple): Tensors to merge. non_tensor_objects (list/tuple): Non-tensors to merge. tensor_flags (list/tuple): Indicates whether each position in output is a tensor. Returns: tuple: Merge of tensors and non-tensors """ merged_objects = [] tensor_idx = 0 non_tensor_idx = 0 real_tensor_flags = None # remove the flags that are assigned to the size of the flattened tensors if PARTITION_ACTIVATIONS: real_tensor_flags = [] previous_flag = False for flag in tensor_flags: if previous_flag: previous_flag = False continue previous_flag = flag real_tensor_flags.append(flag) else: real_tensor_flags = tensor_flags for is_tensor in real_tensor_flags: if is_tensor: merged_objects.append(tensor_objects[tensor_idx]) tensor_idx += 1 else: merged_objects.append(non_tensor_objects[non_tensor_idx]) non_tensor_idx += 1 return tuple(merged_objects) def is_activation_to_checkpoint(item): """ Is an activation to be checkpointed """ global mp_size return torch.is_tensor(item) and item.is_floating_point() and item.numel() >= mp_size def partition_activations(args, cpu_checkpoint, contiguous_checkpoint): global contiguous_data_buffers, data_offsets inputs = [] num_non_fp_tensors = 0 for arg_index, item in enumerate(args): if not is_activation_to_checkpoint(item): inputs.append(item) num_non_fp_tensors += 1 continue i = arg_index - num_non_fp_tensors partition_size = get_partition_size(item) partition = item.detach().contiguous().view(-1).narrow(0, get_partition_start(item), partition_size).clone() buffer_device = torch.device('cpu') if cpu_checkpoint else partition.device if contiguous_checkpoint: if i >= len(contiguous_data_buffers): tensor_list = [ torch.tensor(()).new_empty([partition_size], dtype=partition.dtype, device=buffer_device) for _ in range(num_layers) ] contiguous_data_buffers.append(tensor_list) data_offsets.append(0) elif contiguous_data_buffers[i] is None: tensor_list = [ torch.tensor(()).new_empty([partition_size], dtype=partition.dtype, device=buffer_device) for _ in range(num_layers) ] contiguous_data_buffers[i] = tensor_list data_offsets[i] = 0 # Because the 'new_empty' returns uninitialized pages, # the pages need to be populated during the cudaMemcpy time # which increases the data copy time. To avoid this, we # pre-populate these pages by simply writing 0 ahead of # the actual cudaMemcpy operation time. Due to the # previously launched GPU kernels, there is a small # window of time here for CPUs to populate pages asynchronously. contiguous_data_buffers[i][data_offsets[i]].data[range( 0, contiguous_data_buffers[i][data_offsets[i]].data.shape[0], int(mmap.PAGESIZE / contiguous_data_buffers[i][data_offsets[i]].data.element_size()))] = 0 contiguous_partition = contiguous_data_buffers[i][data_offsets[i]].data.copy_(partition.data) data_offsets[i] = data_offsets[i] + 1 inputs.append(contiguous_partition) else: partition = partition.cpu() if CPU_CHECKPOINT else partition inputs.append(partition) return inputs def get_partitioned_activations_for_backward(args, inputs, contiguous_checkpoint): global contiguous_size_buffers, size_offsets new_args = [] num_non_fp_tensors = 0 for arg_index, (arg, inp) in enumerate(zip(args, inputs)): size = torch.tensor(arg.size()) if torch.is_tensor(arg) else None if not is_activation_to_checkpoint(arg): new_args.append(arg) new_args.append(size) num_non_fp_tensors += 1 continue arg.data = inp.data new_args.append(arg) i = arg_index - num_non_fp_tensors if contiguous_checkpoint: numel = size.numel() if i >= len(contiguous_size_buffers): tmp = torch.tensor(()) contiguous_size_buffers.append( tmp.new_empty([numel * num_layers], dtype=size.dtype, device=size.device)) size_offsets.append(0) elif contiguous_size_buffers[i] is None: tmp = torch.tensor(()) contiguous_size_buffers[i] = tmp.new_empty([numel * num_layers], dtype=size.dtype, device=size.device) size_offsets[i] = 0 contiguous_size = contiguous_size_buffers[i].narrow(0, size_offsets[i], numel).data.copy_(size.data) contiguous_size = contiguous_size.view_as(size) size_offsets[i] = size_offsets[i] + numel new_args.append(contiguous_size) else: new_args.append(size) return new_args def get_cpu_activations_for_backward(args, inputs): new_args = [] for i, (arg, inp) in enumerate(zip(args, inputs)): if not is_activation_to_checkpoint(arg): new_args.append(arg) continue arg.data = inp.data new_args.append(arg) return new_args class CheckpointFunction(torch.autograd.Function): """This function is adapted from torch.utils.checkpoint with two main changes: 1) torch.cuda.set_rng_state is replaced with `_set_cuda_rng_state` #ignore-cuda 2) the states in the model parallel tracker are also properly tracked/set/reset. 3) Performance activation partitioning, contiguous memory optimization 4) CPU Checkpointing 5) Profile forward and backward functions """ @staticmethod def forward(ctx, run_function, all_outputs, *args): global mpu, timers, SYNCHRONIZE, PROFILE_TIME def save_args_for_backward(*all_args): tensor_args, non_tensor_args, tensor_flags = extract_tensors(all_objects=all_args) ctx.deepspeed_saved_tensors = tensor_args ctx.non_tensor_args = non_tensor_args ctx.tensor_flags = tensor_flags if SYNCHRONIZE: get_accelerator().synchronize() if timers is None and PROFILE_TIME: timers = Timers() if PROFILE_TIME: timers('forward').start() ctx.run_function = run_function global num_layers global mp_rank, mp_size, mp_group global contiguous_data_buffers, contiguous_size_buffers global data_offsets, size_offsets if mp_rank is None: if mpu is not None: if hasattr(mpu, 'get_tensor_model_parallel_rank'): mp_rank = mpu.get_tensor_model_parallel_rank() mp_size = mpu.get_tensor_model_parallel_world_size() mp_group = mpu.get_tensor_model_parallel_group() else: mp_rank = mpu.get_model_parallel_rank() mp_size = mpu.get_model_parallel_world_size() mp_group = mpu.get_model_parallel_group() else: mp_rank = 0 mp_size = 1 mp_group = None global cuda_device, transport_stream, PARTITION_ACTIVATIONS, buffer_0, buffer_1, buffer_0_offset, buffer_1_offset if cuda_device is None: see_memory_usage("First Forward Beginning", force=False) if dist.get_rank() == 0: logger.info(f"Activation Checkpointing Information") logger.info(f"----Partition Activations {PARTITION_ACTIVATIONS}, CPU CHECKPOINTING {CPU_CHECKPOINT}") logger.info( f"----contiguous Memory Checkpointing {CONTIGUOUS_CHECKPOINTING} with {num_layers} total layers") logger.info(f"----Synchronization {SYNCHRONIZE}") logger.info(f"----Profiling time in checkpointing {PROFILE_TIME}") cuda_device = get_accelerator().current_device_name() transport_stream = get_accelerator().Stream(device=cuda_device) if PARTITION_ACTIVATIONS: inputs = partition_activations(args, CPU_CHECKPOINT, CONTIGUOUS_CHECKPOINTING) elif CPU_CHECKPOINT: inputs = copy_to_device(args, device=torch.device('cpu'), criterion_func=is_activation_to_checkpoint) # just in case something funky is happening such as reuse of inputs inputs_cuda = copy_to_device(args, device=cuda_device, criterion_func=is_activation_to_checkpoint) # Copy the rng states. ctx.fwd_cpu_rng_state = torch.get_rng_state() ctx.fwd_cuda_rng_state = get_accelerator().get_rng_state() ctx.fwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states() see_memory_usage("Before running forward on the layer", force=False) # ctx.save_for_backward(*args) with torch.no_grad(): outputs = run_function(*inputs_cuda) see_memory_usage("After running forward on the layer", force=False) del inputs_cuda if PARTITION_ACTIVATIONS: new_args = get_partitioned_activations_for_backward(args, inputs, CONTIGUOUS_CHECKPOINTING) assert len(new_args) % 2 == 0, f'save_for_backward called with odd number of args, {len(new_args)}' save_args_for_backward(*new_args) elif CPU_CHECKPOINT: new_args = get_cpu_activations_for_backward(args, inputs) save_args_for_backward(*new_args) else: save_args_for_backward(*args) if PROFILE_TIME: timers('forward').stop() timers.log(['forward']) if SYNCHRONIZE: get_accelerator().synchronize() # Tensors returned from forward() may not be differentiable. if torch.is_tensor(outputs): non_grad_outputs = [outputs] if not outputs.is_floating_point() else [] else: non_grad_outputs = [o for o in outputs if torch.is_tensor(o) and not o.is_floating_point()] ctx.mark_non_differentiable(*non_grad_outputs) if torch.is_tensor(outputs): all_outputs += [outputs] return outputs else: all_outputs += outputs outputs, _, _ = extract_tensors(all_objects=outputs) return tuple(outputs) @staticmethod def backward(ctx, *grads): global timers see_memory_usage("In backward", force=False) # removing pointers to the contiguous buffer memory # so that they can be garbage collected once the checkpoints # have been used if SYNCHRONIZE: get_accelerator().synchronize() if PROFILE_TIME: timers('backward').start() if CONTIGUOUS_CHECKPOINTING: global data_offsets, size_offsets global contiguous_data_buffers, contiguous_size_buffers for buffers in contiguous_data_buffers: buffers = [] # frees up all the pointers to the checkpoints except for the ones # stored by save for backward contiguous_data_buffers = [] contiguous_size_buffers = [] data_offsets = [] size_offsets = [] see_memory_usage("In backward checkpointing code", force=False) if not torch.autograd._is_checkpoint_valid(): raise RuntimeError("Checkpointing is not compatible with .grad(), " "please use .backward() if possible") global cuda_device, transport_stream, PARTITION_ACTIVATIONS if PARTITION_ACTIVATIONS: # with get_accelerator().stream(transport_stream): inputs = gather_partitioned_activations(ctx.deepspeed_saved_tensors, device=cuda_device if CPU_CHECKPOINT else None) detached_inputs = detach_variable(inputs) elif CPU_CHECKPOINT: inputs = move_to_device(ctx.deepspeed_saved_tensors, cuda_device, is_activation_to_checkpoint) detached_inputs = detach_variable(inputs) else: inputs = ctx.deepspeed_saved_tensors detached_inputs = detach_variable(inputs) # Add non tensor input args detached_inputs = merge_tensors(tensor_objects=detached_inputs, non_tensor_objects=ctx.non_tensor_args, tensor_flags=ctx.tensor_flags) # Store the current states. bwd_cpu_rng_state = torch.get_rng_state() bwd_cuda_rng_state = get_accelerator().get_rng_state() bwd_cuda_rng_state_tracker = get_cuda_rng_tracker().get_states() # Set the states to what it used to be before the forward pass. torch.set_rng_state(ctx.fwd_cpu_rng_state) _set_cuda_rng_state(ctx.fwd_cuda_rng_state) get_cuda_rng_tracker().set_states(ctx.fwd_cuda_rng_state_tracker) # if PARTITION_ACTIVATIONS: # current_stream=get_accelerator().current_stream() # current_stream.wait_stream(transport_stream) see_memory_usage("In backward checkpointing code before forward", force=False) with torch.enable_grad(): outputs = ctx.run_function(*detached_inputs) see_memory_usage("In backward checkpointing code after forward", force=False) # Set the states back to what it was at the start of this function. torch.set_rng_state(bwd_cpu_rng_state) _set_cuda_rng_state(bwd_cuda_rng_state) get_cuda_rng_tracker().set_states(bwd_cuda_rng_state_tracker) if isinstance(outputs, torch.Tensor): outputs = (outputs, ) # Filter out non tensor outputs outputs, _, _ = extract_tensors(all_objects=outputs) # Construct arguments to autograd.backward(). # This is usually just outputs and grads, but forward() can return tensors that # are not differentiable. output_tensors = [] grad_tensors = [] for out, grad in zip(outputs, grads): if out.requires_grad: output_tensors.append(out) grad_tensors.append(grad) see_memory_usage("In backward checkpointing code before backward", force=False) torch.autograd.backward(output_tensors, grad_tensors) # Force clear our stashed tensors to prevent a memory leak in certain scenarios ctx.deepspeed_saved_tensors = None ctx.non_tensor_args = None ctx.tensor_flags = None see_memory_usage("After backward checkpointing code after backward", force=False) if PROFILE_TIME: timers('backward').stop() timers.log(['backward']) if SYNCHRONIZE: get_accelerator().synchronize() ret_list = [None, None] # first None for ctx for inp in detached_inputs: if torch.is_tensor(inp): ret_list.append(inp.grad) else: ret_list.append(None) return tuple(ret_list) def checkpoint(function, *args): """Checkpoint a model or part of the model. This has been directly copied from torch.utils.checkpoint. """ all_outputs = [] CheckpointFunction.apply(function, all_outputs, *args) if len(all_outputs) == 1: return all_outputs[0] else: return tuple(all_outputs) def partition_activations_in_checkpoint(partition_activation): global PARTITION_ACTIVATIONS PARTITION_ACTIVATIONS = partition_activation if dist.get_rank() == 0: logger.info(f"**************Partition Activations {PARTITION_ACTIVATIONS}************") def set_num_layers(nlayers): global num_layers num_layers = nlayers def reset(): """Resets memory buffers related to contiguous memory optimizations. Should be called during eval when multiple forward propagations are computed without any backward propagation that usually clears these buffers. Arguments: None Return: None """ if CONTIGUOUS_CHECKPOINTING: global data_offsets, size_offsets global contiguous_data_buffers, contiguous_size_buffers for buffers in contiguous_data_buffers: buffers = [] # frees up all the pointers to the checkpoints except for the ones # stored by save for backward contiguous_data_buffers = [] contiguous_size_buffers = [] data_offsets = [] size_offsets = [] def _configure_using_config_file(config, mpu=None): global num_layers, PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \ CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME config = DeepSpeedConfig(config, mpu=mpu).activation_checkpointing_config if dist.get_rank() == 0: logger.info(config.repr()) PARTITION_ACTIVATIONS = config.partition_activations CONTIGUOUS_CHECKPOINTING = config.contiguous_memory_optimization num_layers = config.number_checkpoints CPU_CHECKPOINT = config.cpu_checkpointing SYNCHRONIZE = config.synchronize_checkpoint_boundary PROFILE_TIME = config.profile def _configure_defaults(): global mpu, num_layers, deepspeed_checkpointing_enabled global PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \ CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME PARTITION_ACTIVATIONS = False CONTIGUOUS_CHECKPOINTING = False num_layers = False CPU_CHECKPOINT = False SYNCHRONIZE = False PROFILE_TIME = False deepspeed_checkpointing_enabled = True def configure( mpu_, deepspeed_config=None, partition_activations=None, contiguous_checkpointing=None, num_checkpoints=None, checkpoint_in_cpu=None, synchronize=None, profile=None, ): """Configure DeepSpeed Activation Checkpointing. Arguments: mpu_: Optional: An object that implements the following methods get_model_parallel_rank/group/world_size, and get_data_parallel_rank/group/world_size deepspeed_config: Optional: DeepSpeed Config json file when provided will be used to configure DeepSpeed Activation Checkpointing partition_activations: Optional: Partitions activation checkpoint across model parallel GPUs when enabled. By default False. Will overwrite deepspeed_config if provided contiguous_checkpointing: Optional: Copies activation checkpoints to a contiguous memory buffer. Works only with homogeneous checkpoints when partition_activations is enabled. Must provide num_checkpoints. By default False. Will overwrite deepspeed_config if provided num_checkpoints: Optional: Number of activation checkpoints stored during the forward propagation of the model. Used to calculate the buffer size for contiguous_checkpointing Will overwrite deepspeed_config if provided checkpoint_in_cpu: Optional: Moves the activation checkpoint to CPU. Only works with partition_activation. Default is false. Will overwrite deepspeed_config if provided synchronize: Optional: Performs get_accelerator().synchronize() at the beginning and end of each call to deepspeed.checkpointing.checkpoint for both forward and backward pass. By default false. Will overwrite deepspeed_config if provided profile: Optional: Logs the forward and backward time for each deepspeed.checkpointing.checkpoint invocation. Will overwrite deepspeed_config if provided Returns: None """ global mpu, num_layers, deepspeed_checkpointing_enabled global PARTITION_ACTIVATIONS, CONTIGUOUS_CHECKPOINTING, \ CPU_CHECKPOINT, SYNCHRONIZE, PROFILE_TIME _configure_defaults() if mpu_ is not None: mpu = mpu_ if deepspeed_config is not None: _configure_using_config_file(deepspeed_config, mpu=mpu) if partition_activations is not None: PARTITION_ACTIVATIONS = partition_activations if contiguous_checkpointing is not None: CONTIGUOUS_CHECKPOINTING = contiguous_checkpointing if num_checkpoints is not None: num_layers = num_checkpoints if checkpoint_in_cpu is not None: CPU_CHECKPOINT = checkpoint_in_cpu if synchronize is not None: SYNCHRONIZE = synchronize if profile is not None: PROFILE_TIME = profile if CONTIGUOUS_CHECKPOINTING: assert PARTITION_ACTIVATIONS, "Contiguous Checkpointing is only available with partitioned activations. Set partitioned activations to true in deepspeed config" if CONTIGUOUS_CHECKPOINTING: assert num_layers is not None, "Must specify the number of layers with contiguous memory checkpointing" def is_configured(): """True if deepspeed activation checkpointing has been configured by calling deepspeed.checkpointing.configure, else returns false Arguments: None Return: True of configured, else False """ return deepspeed_checkpointing_enabled
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team '''Copyright The Microsoft DeepSpeed Team'''
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import cupy from torch.utils.dlpack import to_dlpack from torch.utils.dlpack import from_dlpack class CupyBackend(object): def __init__(self): pass def torch2cupy(self, tensor): return cupy.fromDlpack(to_dlpack(tensor)) def cupy2torch(self, cupy_tensor): return from_dlpack(cupy_tensor.toDlpack()) def compress_by_chunk(self, cupy_bool_tensor, num_chunks): packed_sign = cupy.packbits(cupy_bool_tensor) sign_list_packed = cupy.split(packed_sign, num_chunks) cupy.cuda.get_current_stream().synchronize() return sign_list_packed
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os import torch import torch_nebula from deepspeed.runtime.checkpoint_engine.checkpoint_engine import \ CheckpointEngine from deepspeed.utils import logger, log_dist from deepspeed.nebula.constants import * def _get_tag_from_path(path): return os.path.basename(os.path.dirname(path)) class NebulaCheckpointEngine(CheckpointEngine): def __init__(self, config_params=None): super().__init__(config_params) self.checkpoint = None self.tag_flag = None self.enable_nebula_load = config_params.enable_nebula_load self.nebula_load_path = config_params.load_path if self.nebula_load_path is None: self.nebula_load_path = config_params.persistent_storage_path nebula_config_params = { NEBULA_PERSISTENT_STORAGE_PATH: config_params.persistent_storage_path, NEBULA_PERSISTENT_TIME_INTERVAL: config_params.persistent_time_interval, NEBULA_NUM_OF_VERSION_IN_RETENTION: config_params.num_of_version_in_retention, } torch_nebula.init(**nebula_config_params) def create(self, tag): log_dist(f"[Nebula] Start Checkpoint for tag:{tag}", ranks=[0]) # -2 means: customer needs to explicitly tell nebula # current checkpoint is complete by commit method. self.checkpoint = torch_nebula.Checkpoint(tag, -2) def save(self, state_dict, path: str): log_dist(f"[Nebula] Create dummy files for loading.") torch.save("", path) tag = _get_tag_from_path(path) partititon_name = os.path.basename(path) logger.info(f"[Nebula] Saving {partititon_name} under tag {tag}...") self.checkpoint.save(partititon_name, state_dict) logger.info(f"[Nebula] Saved {partititon_name} under tag {tag}.") return None def load(self, path: str, map_location=None): tag = _get_tag_from_path(path) first_load_flag = self.tag_flag is None or self.tag_flag == tag if not self.enable_nebula_load and first_load_flag: self.tag_flag = tag logger.info(f"[Nebula] Disable nebula load. Loading checkpoint from {path} ...") partition = torch.load(path, map_location=map_location) logger.info(f"[Nebula] Disable nebula load. Loaded checkpoint from {path} .") return partition partititon_name = os.path.basename(path) logger.info(f"[Nebula] Loading {path} under tag {tag} from nebula path {self.nebula_load_path}...") checkpoint = None if tag in (None, 'latest', 'latest_universal'): # In some cases, there is the inconsistent tag between deepspeed metadata (latest file) # and nebula metadata, will lead to the failure on loading with deepspeed tag. Then we # will try to load the valid latest checkpoint from nebula(tier3 > tier1). So, in summary # when met failure loading for given tag, the loading priority would be like: # nebula tier3 latest > nebula tier1 latest. checkpoint = torch_nebula.get_latest_checkpoint(persist_path=self.nebula_load_path) else: checkpoint = torch_nebula.get_checkpoint(tag=tag, persist_path=self.nebula_load_path) if checkpoint is None or (checkpoint is not None and checkpoint.tag == ''): logger.info( f"Unable to find valid checkpoint tag:{tag} from Nebula, try to get latest checkpoint again from nebula {self.nebula_load_path} path!" ) # nebula tier3 latest checkpoint = torch_nebula.get_latest_checkpoint(persist_path=self.nebula_load_path) if checkpoint is None or (checkpoint is not None and checkpoint.tag == ''): logger.info( f"Unable to find latest checkpoint from Nebula tier3, try to get latest checkpoint again from nebula tier1 path!" ) # nebula tier1 latest checkpoint = torch_nebula.get_latest_checkpoint() logger.warning(f"Unable to find valid checkpoint from Nebula under tag:{tag}.") return None tag = checkpoint.tag self.tag_flag = -1 partition = checkpoint.load(partititon_name, map_location=map_location) logger.info(f"[Nebula] Loaded {path} under tag {tag} from {self.nebula_load_path}.") return partition def commit(self, tag): # nebula commit will be call when all files under give tag are ready to be persisted in the async way. logger.info(f"[Nebula] all files for {tag} are saved in tier1. It is ready to start persisting") commit_rls = self.checkpoint.commit() if not commit_rls: logger.error(f"[Nebula] failed to commit the checkpoint, please check the log.") return False return commit_rls
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team '''Copyright The Microsoft DeepSpeed Team'''
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os class CheckpointEngine(object): # init checkpoint engine for save/load def __init__(self, config_params=None): pass def create(self, tag): # create checkpoint on give tag for save/load. pass def makedirs(self, path, exist_ok=False): os.makedirs(path, exist_ok=exist_ok) def save(self, state_dict, path: str): pass def load(self, path: str, map_location=None): pass def commit(self, tag): # to tell checkpoint services if all files are readys. pass
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch from deepspeed.utils import logger, log_dist from deepspeed.runtime.checkpoint_engine.checkpoint_engine import \ CheckpointEngine class TorchCheckpointEngine(CheckpointEngine): def __init__(self, config_params=None): super().__init__(config_params) def create(self, tag): log_dist(f"[Torch] Checkpoint {tag} is about to be saved!", ranks=[0]) def save(self, state_dict, path: str): logger.info(f"[Torch] Saving {path}...") torch.save(state_dict, path) logger.info(f"[Torch] Saved {path}.") return None def load(self, path: str, map_location=None): logger.info(f"[Torch] Loading checkpoint from {path}...") partition = torch.load(path, map_location=map_location) logger.info(f"[Torch] Loaded checkpoint from {path}.") return partition def commit(self, tag): logger.info(f"[Torch] Checkpoint {tag} is ready now!") return True
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ Copyright NVIDIA/apex This file is adapted from FP16_Optimizer in NVIDIA/apex """ import torch from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from deepspeed.runtime import DeepSpeedOptimizer from deepspeed.runtime.utils import get_global_norm, get_grad_norm, CheckOverflow, get_weight_norm from deepspeed.runtime.fp16.loss_scaler import INITIAL_LOSS_SCALE, SCALE_WINDOW, MIN_LOSS_SCALE from deepspeed.utils import groups, logger, log_dist from deepspeed import comm as dist from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT, CLIP_GRAD from deepspeed.accelerator import get_accelerator class FP16_Optimizer(DeepSpeedOptimizer): """ FP16 Optimizer for training fp16 models. Handles loss scaling. For usage example please see, TODO: DeepSpeed V2 Tutorial """ def __init__(self, init_optimizer, deepspeed=None, static_loss_scale=1.0, dynamic_loss_scale=False, initial_dynamic_scale=2**32, dynamic_loss_args=None, verbose=True, mpu=None, clip_grad=0.0, fused_adam_legacy=False, has_moe_layers=False, timers=None): self.fused_adam_legacy = fused_adam_legacy self.timers = timers self.deepspeed = deepspeed self.has_moe_layers = has_moe_layers self.using_pipeline = self.deepspeed.pipeline_parallelism if not get_accelerator().is_available(): raise SystemError("Cannot use fp16 without accelerator.") self.optimizer = init_optimizer # param flattened by groups self.fp16_groups = [] self.fp16_groups_flat = [] self.fp32_groups_flat = [] self._global_grad_norm = 0. # loop to deal with groups for i, param_group in enumerate(self.optimizer.param_groups): # push this group to list before modify self.fp16_groups.append(param_group['params']) # init fp16 weight buffer, flattened self.fp16_groups_flat.append(_flatten_dense_tensors([p.clone().detach() for p in self.fp16_groups[i]])) # set model fp16 weight to slices of flattened buffer updated_params = _unflatten_dense_tensors(self.fp16_groups_flat[i], self.fp16_groups[i]) for p, q in zip(self.fp16_groups[i], updated_params): p.data = q.data # init master weight, flattened self.fp32_groups_flat.append(self.fp16_groups_flat[i].clone().float().detach()) # modify optimizer of have flat master weight self.fp32_groups_flat[i].requires_grad = True # keep this in case internal optimizer uses it param_group['params'] = [self.fp32_groups_flat[i]] # we may have a way of fusing dynamic scale. Do not support for now if dynamic_loss_scale: self.dynamic_loss_scale = True self.cur_iter = 0 self.last_overflow_iter = -1 self.scale_factor = 2 if dynamic_loss_args is None: self.cur_scale = initial_dynamic_scale self.scale_window = 1000 self.min_loss_scale = 1 else: self.cur_scale = dynamic_loss_args[INITIAL_LOSS_SCALE] self.scale_window = dynamic_loss_args[SCALE_WINDOW] self.min_loss_scale = dynamic_loss_args[MIN_LOSS_SCALE] else: self.dynamic_loss_scale = False self.cur_iter = 0 self.cur_scale = static_loss_scale self.verbose = verbose self.custom_loss_scaler = False self.external_loss_scale = None self.clip_grad = clip_grad self.norm_type = 2 self.step_count = 0 TORCH_MAJOR = int(torch.__version__.split('.')[0]) TORCH_MINOR = int(torch.__version__.split('.')[1]) if TORCH_MAJOR == 0 and TORCH_MINOR <= 4: self.clip_grad_norm = torch.nn.utils.clip_grad_norm else: self.clip_grad_norm = torch.nn.utils.clip_grad_norm_ #model parallel object self.mpu = mpu self.overflow = False self.overflow_checker = CheckOverflow(self.fp16_groups, mpu=self.mpu, deepspeed=deepspeed) self.initialize_optimizer_states() def initialize_optimizer_states(self): for i, group in enumerate(self.fp16_groups): self.fp32_groups_flat[i].grad = torch.zeros(self.fp32_groups_flat[i].size(), device=self.fp32_groups_flat[i].device) self.optimizer.step() for i, group in enumerate(self.fp16_groups): self.fp32_groups_flat[i].grad = None return def zero_grad(self, set_to_none=False): """ Zero FP16 parameter grads. """ # For speed, set model fp16 grad to None by default for group in self.fp16_groups: for p in group: if set_to_none: p.grad = None else: if p.grad is not None: p.grad.detach_() p.grad.zero_() def step_fused_adam(self, closure=None): """ Not supporting closure. """ # First compute norm for all group so we know if there is overflow grads_groups_flat = [] norm_groups = [] for i, group in enumerate(self.fp16_groups): grads_groups_flat.append( _flatten_dense_tensors([ torch.zeros(p.size(), dtype=p.dtype, device=p.device) if p.grad is None else p.grad for p in group ])) norm_groups.append(get_weight_norm(grads_groups_flat[i], mpu=self.mpu)) self.overflow = self.overflow_checker.check_using_norm(norm_groups) prev_scale = self.cur_scale self._update_scale(self.overflow) if self.overflow: if self.verbose: logger.info("[deepspeed] fp16 dynamic loss scale overflow! Skipping step. Attempted loss " "scale: {}, reducing to {}".format(prev_scale, self.cur_scale)) return self.overflow scaled_grad_norm = get_global_norm(norm_list=norm_groups) combined_scale = self.unscale_and_clip_grads(grads_groups_flat, scaled_grad_norm, apply_scale=False) # Stash unscaled gradient norm self._global_grad_norm = scaled_grad_norm / self.cur_scale # norm is in fact norm*cur_scale self.optimizer.step(grads=[[g] for g in grads_groups_flat], output_params=[[p] for p in self.fp16_groups_flat], scale=combined_scale, grad_norms=norm_groups) # TODO: we probably don't need this? just to be safe for i in range(len(norm_groups)): updated_params = _unflatten_dense_tensors(self.fp16_groups_flat[i], self.fp16_groups[i]) for p, q in zip(self.fp16_groups[i], updated_params): p.data = q.data return self.overflow def start_timers(self, name_list): if self.timers is not None: for name in name_list: self.timers(name).start() def stop_timers(self, name_list): if self.timers is not None: for name in name_list: self.timers(name).stop() def log_timers(self, name_list): if self.timers is not None: self.timers.log(name_list) def set_lr(self, lr): """Set the learning rate.""" for param_group in self.optimizer.param_groups: param_group["lr"] = lr def get_lr(self): """Return the current learning rate.""" return self.optimizer.param_groups[0]["lr"] def override_loss_scale(self, loss_scale): if loss_scale != self.external_loss_scale: logger.info(f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}') self.custom_loss_scaler = True self.external_loss_scale = loss_scale def step(self, closure=None): """ Not supporting closure. """ if self.fused_adam_legacy: return self.step_fused_adam() COMPUTE_NORM = "compute_norm" OVERFLOW_CHECK = 'overflow_check' OVERFLOW_TIMERS = [COMPUTE_NORM, OVERFLOW_CHECK] UNSCALE_AND_CLIP = 'unscale_and_clip' BASIC_STEP = 'basic_step' UPDATE_FP16 = 'update_fp16' STEP_TIMERS = OVERFLOW_TIMERS + [UNSCALE_AND_CLIP, BASIC_STEP, UPDATE_FP16] # First determine if there is overflow. self.start_timers([OVERFLOW_CHECK]) fp16_params = [] for i, group in enumerate(self.fp16_groups): fp16_params.extend([p for p in group if p.grad is not None]) self.overflow = self.overflow_checker.has_overflow(fp16_params) self.stop_timers([OVERFLOW_CHECK]) prev_scale = self.cur_scale self._update_scale(self.overflow) if self.overflow: if self.verbose: log_dist( "Overflow detected. Skipping step. Attempted loss " f"scale: {prev_scale}, reducing to {self.cur_scale}", ranks=[0]) # Clear gradients for i, group in enumerate(self.fp16_groups): for p in group: p.grad = None self.log_timers(OVERFLOW_TIMERS) return self.overflow grads_groups_flat = [] for i, group in enumerate(self.fp16_groups): data_type = self.fp32_groups_flat[i].dtype grads_groups_flat.append( _flatten_dense_tensors([ torch.zeros(p.size(), dtype=data_type, device=p.device) if p.grad is None else p.grad.to(data_type) for p in group ])) for p in group: p.grad = None self.fp32_groups_flat[i].grad = grads_groups_flat[i] self.start_timers([COMPUTE_NORM]) all_groups_norm = get_grad_norm(self.fp32_groups_flat, mpu=self.mpu) self.stop_timers([COMPUTE_NORM]) if self.has_moe_layers: all_groups_norm = self._get_norm_with_moe_layers(all_groups_norm) scaled_global_grad_norm = get_global_norm(norm_list=[all_groups_norm]) # Stash unscaled gradient norm self._global_grad_norm = scaled_global_grad_norm / self.cur_scale self.start_timers([UNSCALE_AND_CLIP]) self.unscale_and_clip_grads(grads_groups_flat, scaled_global_grad_norm) self.stop_timers([UNSCALE_AND_CLIP]) self.start_timers([BASIC_STEP]) self.optimizer.step() self.stop_timers([BASIC_STEP]) #get rid of the fp32 gradients. Not needed anymore for group in self.fp32_groups_flat: group.grad = None self.start_timers([UPDATE_FP16]) for i in range(len(self.fp16_groups)): updated_params = _unflatten_dense_tensors(self.fp32_groups_flat[i], self.fp16_groups[i]) for p, q in zip(self.fp16_groups[i], updated_params): p.data.copy_(q.data) self.stop_timers([UPDATE_FP16]) self.log_timers(STEP_TIMERS) self.step_count += 1 return self.overflow def _get_norm_with_moe_layers(self, all_groups_norm): #all_groups_norm_old = all_groups_norm # Need to allreduce (avg) the norms across different ranks because moe params will not be synced during allreduce if self.using_pipeline: pg = self.deepspeed.mpu.get_data_parallel_group() else: pg = groups._get_data_parallel_group() scaled_norm = all_groups_norm * 1.0 / float(dist.get_world_size(group=pg)) scaled_norm_tensor = torch.tensor(scaled_norm, device=self.fp32_groups_flat[0].device, dtype=torch.float) dist.all_reduce(scaled_norm_tensor, group=pg) all_groups_norm = scaled_norm_tensor.item() #print(f"old = {all_groups_norm_old} and new = {all_groups_norm} at rank: {deepspeed.comm.get_rank()}") return all_groups_norm def unscale_and_clip_grads(self, grad_groups_flat, total_norm, apply_scale=True): # compute combined scale factor for this group combined_scale = self.cur_scale if self.clip_grad > 0.: # norm is in fact norm*scale clip = ((total_norm / self.cur_scale) + 1e-6) / self.clip_grad if clip > 1: combined_scale = clip * self.cur_scale if apply_scale: for grad in grad_groups_flat: grad.data.mul_(1. / combined_scale) return combined_scale def backward(self, loss, create_graph=False, retain_graph=False): """ :attr:`backward` performs the following steps: 1. fp32_loss = loss.float() 2. scaled_loss = fp32_loss*loss_scale 3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves """ if self.custom_loss_scaler: scaled_loss = self.external_loss_scale * loss scaled_loss.backward() else: scaled_loss = (loss.float()) * self.cur_scale scaled_loss.backward(create_graph=create_graph, retain_graph=retain_graph) def _update_scale(self, skip): if self.dynamic_loss_scale: prev_scale = self.cur_scale if skip: self.cur_scale = max(self.cur_scale / self.scale_factor, self.min_loss_scale) self.last_overflow_iter = self.cur_iter if self.verbose: logger.info(f"\nGrad overflow on iteration {self.cur_iter}") logger.info(f"Reducing dynamic loss scale from {prev_scale} to {self.cur_scale}") else: # Ensure self.scale_window updates since last overflow stable_interval = (self.cur_iter - self.last_overflow_iter) - 1 if (stable_interval > 0) and (stable_interval % self.scale_window == 0): self.cur_scale *= self.scale_factor if self.verbose: logger.info(f"No Grad overflow for {self.scale_window} iterations") logger.info(f"Increasing dynamic loss scale from {prev_scale} to {self.cur_scale}") else: if skip: logger.info("Grad overflow on iteration: %s", self.cur_iter) logger.info("Using static loss scale of: %s", self.cur_scale) self.cur_iter += 1 return # Promote state so it can be retrieved or set via "fp16_optimizer_instance.state" def _get_state(self): return self.optimizer.state def _set_state(self, value): self.optimizer.state = value state = property(_get_state, _set_state) # Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups" # (for example, to adjust the learning rate) def _get_param_groups(self): return self.optimizer.param_groups def _set_param_groups(self, value): self.optimizer.param_groups = value param_groups = property(_get_param_groups, _set_param_groups) def state_dict(self): """ Returns a dict containing the current state of this :class:`FP16_Optimizer` instance. This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict of the contained Pytorch optimizer. Example:: checkpoint = {} checkpoint['model'] = model.state_dict() checkpoint['optimizer'] = optimizer.state_dict() torch.save(checkpoint, "saved.pth") """ state_dict = {} state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale state_dict['cur_scale'] = self.cur_scale state_dict['cur_iter'] = self.cur_iter if state_dict['dynamic_loss_scale']: state_dict['last_overflow_iter'] = self.last_overflow_iter state_dict['scale_factor'] = self.scale_factor state_dict['scale_window'] = self.scale_window state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict() state_dict['fp32_groups_flat'] = self.fp32_groups_flat state_dict[CLIP_GRAD] = self.clip_grad return state_dict # Refresh fp32 master params from fp16 copies def refresh_fp32_params(self): for current, saved in zip(self.fp32_groups_flat, self.fp16_groups_flat): current.data.copy_(saved.data) def load_state_dict(self, state_dict, load_optimizer_states=True): """ Loads a state_dict created by an earlier call to state_dict(). If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``, whose parameters in turn came from ``model``, it is expected that the user will call ``model.load_state_dict()`` before ``fp16_optimizer_instance.load_state_dict()`` is called. Example:: model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half() optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) ... checkpoint = torch.load("saved.pth") model.load_state_dict(checkpoint['model']) optimizer.load_state_dict(checkpoint['optimizer']) """ # I think it should actually be ok to reload the optimizer before the model. self.dynamic_loss_scale = state_dict['dynamic_loss_scale'] self.cur_scale = state_dict['cur_scale'] self.cur_iter = state_dict['cur_iter'] if state_dict['dynamic_loss_scale']: self.last_overflow_iter = state_dict['last_overflow_iter'] self.scale_factor = state_dict['scale_factor'] self.scale_window = state_dict['scale_window'] if load_optimizer_states: self.optimizer.load_state_dict(state_dict[OPTIMIZER_STATE_DICT]) self.clip_grad = state_dict[CLIP_GRAD] # At this point, the optimizer's references to the model's fp32 parameters are up to date. # The optimizer's hyperparameters and internal buffers are also up to date. # However, the fp32 master copies of the model's fp16 params stored by the optimizer are still # out of date. There are two options. # 1: Refresh the master params from the model's fp16 params. # This requires less storage but incurs precision loss. # 2: Save and restore the fp32 master copies separately. # We choose option 2. # # Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device # of their associated parameters, because it's possible those buffers might not exist yet in # the current optimizer instance. In our case, as long as the current FP16_Optimizer has been # constructed in the same way as the one whose state_dict we are loading, the same master params # are guaranteed to exist, so we can just copy_() from the saved master params. for current, saved in zip(self.fp32_groups_flat, state_dict['fp32_groups_flat']): current.data.copy_(saved.data) def __repr__(self): return repr(self.optimizer) # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" def _get_loss_scale(self): if self.custom_loss_scaler: return self.external_loss_scale else: return self.cur_scale def _set_loss_scale(self, value): self.loss_scaler.cur_scale = value loss_scale = property(_get_loss_scale, _set_loss_scale)
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ Copyright NVIDIA/apex This file is adapted from FP16_Optimizer in NVIDIA/apex """ from deepspeed.moe.utils import split_params_grads_into_shared_and_expert_params import torch from torch._utils import _flatten_dense_tensors from deepspeed.runtime import DeepSpeedOptimizer from deepspeed.runtime.utils import get_global_norm, CheckOverflow, get_weight_norm from deepspeed.runtime.fp16.loss_scaler import INITIAL_LOSS_SCALE, SCALE_WINDOW, MIN_LOSS_SCALE from deepspeed.utils import logger from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT from deepspeed.accelerator import get_accelerator from deepspeed import comm as dist class FP16_UnfusedOptimizer(DeepSpeedOptimizer): """ FP16 Optimizer without weight fusion to support LAMB optimizer For usage example please see, TODO: DeepSpeed V2 Tutorial """ def __init__(self, init_optimizer, deepspeed=None, static_loss_scale=1.0, dynamic_loss_scale=False, dynamic_loss_args=None, verbose=True, mpu=None, clip_grad=0.0, fused_lamb_legacy=False): self.fused_lamb_legacy = fused_lamb_legacy self._global_grad_norm = 0. if dist.get_rank() == 0: logger.info(f'Fused Lamb Legacy : {self.fused_lamb_legacy} ') if not get_accelerator().is_available(): raise SystemError("Cannot use fp16 without accelerator.") self.optimizer = init_optimizer # param groups self.fp16_groups = [] self.fp32_groups = [] # loop to deal with groups for i, param_group in enumerate(self.optimizer.param_groups): #fp16 weights that represents the actual model weights self.fp16_groups.append(param_group['params']) #creating a fp32 copy of the weights that will be updated first then #copied to fp16 weights fp32_group = [p.clone().float().detach() for p in param_group['params']] #in case the internal optimizer needs it for p in fp32_group: p.requires_grad = True #setting the param groups in the optimizer to point to fp32 #note these are not the weights used by the model #the model uses the fp16 version that we added to fp16_group self.fp32_groups.append(fp32_group) param_group['params'] = self.fp32_groups[i] # we may have a way of fusing dynamic scale. Do not support for now if dynamic_loss_scale: self.dynamic_loss_scale = True self.cur_iter = 0 self.last_overflow_iter = -1 self.scale_factor = 2.0 if dynamic_loss_args is None: self.cur_scale = 1.0 * 2**16 self.scale_window = 1000 self.min_loss_scale = 0.25 else: self.cur_scale = dynamic_loss_args[INITIAL_LOSS_SCALE] self.scale_window = dynamic_loss_args[SCALE_WINDOW] self.min_loss_scale = dynamic_loss_args[MIN_LOSS_SCALE] else: self.dynamic_loss_scale = False self.cur_iter = 0 self.cur_scale = static_loss_scale self.custom_loss_scaler = False self.external_loss_scale = None self.verbose = verbose self.clip_grad = clip_grad self.norm_type = 2 TORCH_MAJOR = int(torch.__version__.split('.')[0]) TORCH_MINOR = int(torch.__version__.split('.')[1]) if TORCH_MAJOR == 0 and TORCH_MINOR <= 4: self.clip_grad_norm = torch.nn.utils.clip_grad_norm else: self.clip_grad_norm = torch.nn.utils.clip_grad_norm_ self.mpu = mpu self.overflow = False self.overflow_checker = CheckOverflow(self.fp16_groups, mpu=self.mpu, deepspeed=deepspeed) self.initialize_optimizer_states() def zero_grad(self, set_to_none=False): """ Zero FP16 parameter grads. """ # FP32 grad should never exist outside of the step function # For speed, set model fp16 grad to None by default for group in self.fp16_groups: for p in group: if set_to_none: p.grad = None else: if p.grad is not None: p.grad.detach_() p.grad.zero_() def step_fused_lamb(self, closure=None): """ Not supporting closure. """ # First compute norm for all group so we know if there is overflow grads_groups_flat = [] grads_groups = [] norm_groups = [] expert_norm_groups = [] for i, group in enumerate(self.fp16_groups): grads = [ torch.zeros(p.size(), dtype=p.dtype, device=p.device) if p.grad is None else p.grad for p in group ] grads_groups.append(grads) grads_groups_flat.append(_flatten_dense_tensors(grads)) grads_for_norm, expert_grads_for_norm = split_params_grads_into_shared_and_expert_params(group) norm_group_value = 0.0 if len(grads_for_norm) > 0: norm_group_value = get_weight_norm(_flatten_dense_tensors(grads_for_norm), mpu=self.mpu) norm_groups.append(norm_group_value) expert_norm_group_value = 0.0 if len(expert_grads_for_norm) > 0: expert_norm_group_value = get_weight_norm(_flatten_dense_tensors(expert_grads_for_norm), mpu=self.mpu) expert_norm_groups.append(expert_norm_group_value) self.overflow = self.overflow_checker.check_using_norm(norm_groups + expert_norm_groups) prev_scale = self.cur_scale self._update_scale(self.overflow) if self.overflow: if self.verbose: logger.info("[deepspeed] fp16 dynamic loss scale overflow! Skipping step. Attempted loss " "scale: {}, reducing to {}".format(prev_scale, self.cur_scale)) return self.overflow self._global_grad_norm = get_global_norm(norm_list=norm_groups) combined_scale = self.unscale_and_clip_grads(self._global_grad_norm, apply_scale=False) self.optimizer.step(grads=grads_groups, output_params=self.fp16_groups, scale=combined_scale) for fp32_group, fp16_group in zip(self.fp32_groups, self.fp16_groups): for idx, (fp32_param, fp16_param) in enumerate(zip(fp32_group, fp16_group)): #remove the fp32 grad fp32_param.grad = None #copy data from fp32 to fp16 fp16_param.data.copy_(fp32_param.data) return self.overflow def set_lr(self, lr): """Set the learning rate.""" for param_group in self.optimizer.param_groups: param_group["lr"] = lr def get_lr(self): """Return the current learning rate.""" return self.optimizer.param_groups[0]["lr"] def override_loss_scale(self, loss_scale): if loss_scale != self.external_loss_scale: logger.info(f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}') self.custom_loss_scaler = True self.external_loss_scale = loss_scale def step(self, closure=None): """ Not supporting closure. """ if self.fused_lamb_legacy: return self.step_fused_lamb() self.overflow = self.overflow_checker.check() prev_scale = self.cur_scale self._update_scale(self.overflow) if self.overflow: if self.verbose: logger.info("[deepspeed] fp16 dynamic loss scale overflow! Skipping step. Attempted loss " "scale: {}, reducing to {}".format(prev_scale, self.cur_scale)) return self.overflow norm_groups = [] for i, group in enumerate(self.fp16_groups): grads_for_norm, _ = split_params_grads_into_shared_and_expert_params(group) norm_group_value = 0.0 if len(grads_for_norm) > 0: norm_group_value = get_weight_norm(grads_for_norm, mpu=self.mpu) norm_groups.append(norm_group_value) # copying gradients to fp32 to wor k with fp32 parameters for fp32_param, fp16_param in zip(self.fp32_groups[i], self.fp16_groups[i]): if fp16_param.grad is None: fp32_param.grad = torch.zeros(fp16_param.size(), dtype=fp32_param.dtype, device=fp32_param.device) else: fp32_param.grad = fp16_param.grad.to(fp32_param.dtype) self._global_grad_norm = get_global_norm(norm_list=norm_groups) self.unscale_and_clip_grads(self._global_grad_norm) self.optimizer.step() for fp32_group, fp16_group in zip(self.fp32_groups, self.fp16_groups): for idx, (fp32_param, fp16_param) in enumerate(zip(fp32_group, fp16_group)): #remove the fp32 grad fp32_param.grad = None #copy data from fp32 to fp16 fp16_param.data.copy_(fp32_param.data) return self.overflow def unscale_and_clip_grads(self, total_norm, apply_scale=True): # compute combined scale factor for this group combined_scale = self.cur_scale if self.clip_grad > 0.: # norm is in fact norm*scale clip = ((total_norm / self.cur_scale) + 1e-6) / self.clip_grad if clip > 1: combined_scale = clip * self.cur_scale if apply_scale: for group in self.fp32_groups: for param in group: if param.grad is not None: param.grad.data.mul_(1. / combined_scale) return combined_scale def backward(self, loss, create_graph=False, retain_graph=False): """ :attr:`backward` performs the following steps: 1. fp32_loss = loss.float() 2. scaled_loss = fp32_loss*loss_scale 3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves """ if self.custom_loss_scaler: scaled_loss = self.external_loss_scale * loss scaled_loss.backward() else: scaled_loss = (loss.float()) * self.cur_scale scaled_loss.backward(create_graph=create_graph, retain_graph=retain_graph) def _update_scale(self, skip): if self.dynamic_loss_scale: prev_scale = self.cur_scale if skip: self.cur_scale = max(self.cur_scale / self.scale_factor, self.min_loss_scale) self.last_overflow_iter = self.cur_iter if self.verbose: logger.info("Grad overflow on iteration: %s", self.cur_iter) logger.info(f"Reducing dynamic loss scale from {prev_scale} to {self.cur_scale}") else: # Ensure self.scale_window updates since last overflow stable_interval = (self.cur_iter - self.last_overflow_iter) - 1 if (stable_interval > 0) and (stable_interval % self.scale_window == 0): self.cur_scale *= self.scale_factor if self.verbose: logger.info(f"No Grad overflow for {self.scale_window} iterations") logger.info(f"Increasing dynamic loss scale from {prev_scale} to {self.cur_scale}") else: if skip: logger.info("Grad overflow on iteration %s", self.cur_iter) logger.info("Using static loss scale of %s", self.cur_scale) self.cur_iter += 1 return # Promote state so it can be retrieved or set via "fp16_optimizer_instance.state" def _get_state(self): return self.optimizer.state def _set_state(self, value): self.optimizer.state = value state = property(_get_state, _set_state) # Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups" # (for example, to adjust the learning rate) def _get_param_groups(self): return self.optimizer.param_groups def _set_param_groups(self, value): self.optimizer.param_groups = value param_groups = property(_get_param_groups, _set_param_groups) # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" def _get_loss_scale(self): if self.custom_loss_scaler: return self.external_loss_scale else: return self.cur_scale def _set_loss_scale(self, value): self.loss_scaler.cur_scale = value loss_scale = property(_get_loss_scale, _set_loss_scale) def state_dict(self): """ Returns a dict containing the current state of this :class:`FP16_Optimizer` instance. This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict of the contained Pytorch optimizer. Example:: checkpoint = {} checkpoint['model'] = model.state_dict() checkpoint['optimizer'] = optimizer.state_dict() torch.save(checkpoint, "saved.pth") """ state_dict = {} state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale state_dict['cur_scale'] = self.cur_scale state_dict['cur_iter'] = self.cur_iter if state_dict['dynamic_loss_scale']: state_dict['last_overflow_iter'] = self.last_overflow_iter state_dict['scale_factor'] = self.scale_factor state_dict['scale_window'] = self.scale_window state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict() state_dict['fp32_groups'] = self.fp32_groups return state_dict # Refresh fp32 master params from fp16 copies def refresh_fp32_params(self): for current_group, saved_group in zip(self.fp32_groups, self.fp16_groups): for current, saved in zip(current_group, saved_group): current.data.copy_(saved.data) def load_state_dict(self, state_dict, load_optimizer_states=True): """ Loads a state_dict created by an earlier call to state_dict(). If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``, whose parameters in turn came from ``model``, it is expected that the user will call ``model.load_state_dict()`` before ``fp16_optimizer_instance.load_state_dict()`` is called. Example:: model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half() optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) ... checkpoint = torch.load("saved.pth") model.load_state_dict(checkpoint['model']) optimizer.load_state_dict(checkpoint['optimizer']) """ # I think it should actually be ok to reload the optimizer before the model. self.dynamic_loss_scale = state_dict['dynamic_loss_scale'] self.cur_scale = state_dict['cur_scale'] self.cur_iter = state_dict['cur_iter'] if state_dict['dynamic_loss_scale']: self.last_overflow_iter = state_dict['last_overflow_iter'] self.scale_factor = state_dict['scale_factor'] self.scale_window = state_dict['scale_window'] if load_optimizer_states: self.optimizer.load_state_dict(state_dict[OPTIMIZER_STATE_DICT]) # At this point, the optimizer's references to the model's fp32 parameters are up to date. # The optimizer's hyperparameters and internal buffers are also up to date. # However, the fp32 master copies of the model's fp16 params stored by the optimizer are still # out of date. There are two options. # 1: Refresh the master params from the model's fp16 params. # This requires less storage but incurs precision loss. # 2: Save and restore the fp32 master copies separately. # We choose option 2. # # Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device # of their associated parameters, because it's possible those buffers might not exist yet in # the current optimizer instance. In our case, as long as the current FP16_Optimizer has been # constructed in the same way as the one whose state_dict we are loading, the same master params # are guaranteed to exist, so we can just copy_() from the saved master params. for current_group, saved_group in zip(self.fp32_groups, state_dict['fp32_groups']): for current, saved in zip(current_group, saved_group): current.data.copy_(saved.data) def __repr__(self): return repr(self.optimizer) def initialize_optimizer_states(self): for i, group in enumerate(self.fp16_groups): for param in group: param.grad = torch.zeros(param.size(), dtype=param.dtype, device=get_accelerator().current_device_name()) for i, group in enumerate(self.fp32_groups): for param in group: param.grad = torch.zeros(param.size(), dtype=param.dtype, device=get_accelerator().current_device_name()) self.optimizer.step() for i, group in enumerate(self.fp16_groups): for param in group: param.grad = None for i, group in enumerate(self.fp32_groups): for param in group: param.grad = None
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team '''Copyright The Microsoft DeepSpeed Team'''
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Taken and modified for DeepSpeed from: https://github.com/NVIDIA/Megatron-LM/blob/master/fp16/loss_scaler.py Commit: 93ab4bea59dc5cbf97c079d313741866af4deac9 """ import torch from deepspeed import comm as dist from deepspeed.utils import logger INITIAL_LOSS_SCALE = 'init_scale' SCALE_WINDOW = 'scale_window' DELAYED_SHIFT = 'delayed_shift' MIN_LOSS_SCALE = 'min_scale' # item() is a recent addition, so this helps with backward compatibility. def to_python_float(t): if hasattr(t, 'item'): return t.item() return t[0] class LossScalerBase: """LossScalarBase Base class for a loss scaler """ def __init__(self, cur_scale): self.cur_scale = cur_scale self.dynamic = False @property def loss_scale(self): return self.cur_scale def scale_gradient(self, module, grad_in, grad_out): return tuple(self.loss_scale * g for g in grad_in) def update_scale(self, overflow): pass def backward(self, loss, retain_graph=False): scaled_loss = loss * self.loss_scale scaled_loss.backward(retain_graph=retain_graph) # print(f'LossScalerBackward: {scaled_loss=}') class LossScaler(LossScalerBase): """ Class that manages a static loss scale. This class is intended to interact with :class:`FP16_Optimizer`, and should not be directly manipulated by the user. Use of :class:`LossScaler` is enabled via the ``static_loss_scale`` argument to :class:`FP16_Optimizer`'s constructor. Args: scale (float, optional, default=1.0): The loss scale. """ def __init__(self, scale=1): super(LossScaler, self).__init__(scale) # `params` is a list / generator of torch.Variable def has_overflow(self, params): return False # `x` is a torch.Tensor def _has_inf_or_nan(x): return False class DynamicLossScaler(LossScalerBase): """ Class that manages dynamic loss scaling. It is recommended to use :class:`DynamicLossScaler` indirectly, by supplying ``dynamic_loss_scale=True`` to the constructor of :class:`FP16_Optimizer`. However, it's important to understand how :class:`DynamicLossScaler` operates, because the default options can be changed using the the ``dynamic_loss_args`` argument to :class:`FP16_Optimizer`'s constructor. Loss scaling is designed to combat the problem of underflowing gradients encountered at long times when training fp16 networks. Dynamic loss scaling begins by attempting a very high loss scale. Ironically, this may result in OVERflowing gradients. If overflowing gradients are encountered, :class:`DynamicLossScaler` informs :class:`FP16_Optimizer` that an overflow has occurred. :class:`FP16_Optimizer` then skips the update step for this particular iteration/minibatch, and :class:`DynamicLossScaler` adjusts the loss scale to a lower value. If a certain number of iterations occur without overflowing gradients detected, :class:`DynamicLossScaler` increases the loss scale once more. In this way :class:`DynamicLossScaler` attempts to "ride the edge" of always using the highest loss scale possible without incurring overflow. Args: init_scale (float, optional, default=2**32): Initial loss scale attempted by :class:`DynamicLossScaler.` scale_factor (float, optional, default=2.0): Factor used when adjusting the loss scale. If an overflow is encountered, the loss scale is readjusted to loss scale/``scale_factor``. If ``scale_window`` consecutive iterations take place without an overflow, the loss scale is readjusted to loss_scale*``scale_factor``. scale_window (int, optional, default=1000): Number of consecutive iterations without an overflow to wait before increasing the loss scale. """ def __init__(self, init_scale=2**32, scale_factor=2., scale_window=1000, min_scale=1, delayed_shift=1, consecutive_hysteresis=False, raise_error_at_min_scale=True, dtype=torch.half): super(DynamicLossScaler, self).__init__(init_scale) self.cur_iter = 0 self.last_overflow_iter = -1 self.scale_factor = scale_factor self.scale_window = scale_window self.min_scale = min_scale self.delayed_shift = delayed_shift self.cur_hysteresis = delayed_shift self.consecutive_hysteresis = consecutive_hysteresis self.raise_error_at_min_scale = raise_error_at_min_scale self.dynamic = True self.dtype = dtype # `params` is a list / generator of torch.Variable def has_overflow_serial(self, params): for p in params: if p.grad is not None and self._has_inf_or_nan(p.grad.data): return True return False # `x` is a torch.Tensor def _has_inf_or_nan(x): try: # if x is half, the .float() incurs an additional deep copy, but it's necessary if # Pytorch's .sum() creates a one-element tensor of the same type as x # (which is true for some recent version of pytorch). cpu_sum = float(x.float().sum()) # More efficient version that can be used if .sum() returns a Python scalar # cpu_sum = float(x.sum()) except RuntimeError as instance: # We want to check if inst is actually an overflow exception. # RuntimeError could come from a different error. # If so, we still want the exception to propagate. if "value cannot be converted" not in instance.args[0]: raise return True else: if cpu_sum in [float('inf'), -float('inf')] or cpu_sum != cpu_sum: return True return False # `overflow` is boolean indicating whether the gradient overflowed def update_scale(self, overflow): if overflow: # self.cur_scale /= self.scale_factor if self.delayed_shift == 1 or self.cur_hysteresis == 1: if (self.cur_scale == self.min_scale) and self.raise_error_at_min_scale: raise Exception( "Current loss scale already at minimum - cannot decrease scale anymore. Exiting run.") else: next_scale = max(self.cur_scale / self.scale_factor, self.min_scale) if dist.get_rank() == 0: overflow_msg = f"[deepspeed] OVERFLOW! Rank {dist.get_rank()} Skipping step." if self.dtype == torch.half: overflow_msg += f" Attempted loss scale: {int(self.cur_scale)}, reducing to {int(next_scale)}" logger.info(overflow_msg) self.cur_scale = next_scale else: if dist.get_rank() == 0: overflow_msg = f"[deepspeed] OVERFLOW! Rank {dist.get_rank()} Skipping step." if self.dtype == torch.half: overflow_msg += f" Attempted loss scale: {int(self.cur_scale)}, but hysteresis is {self.cur_hysteresis}. Reducing hysteresis to {self.cur_hysteresis-1}" logger.info(overflow_msg) self.cur_hysteresis -= 1 self.last_overflow_iter = self.cur_iter else: if self.consecutive_hysteresis: self.cur_hysteresis = self.delayed_shift if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0: if not self.consecutive_hysteresis: self.cur_hysteresis = self.delayed_shift self.cur_scale *= self.scale_factor self.cur_iter += 1 # Although loss scaling is only defined for fp16, yet for backwards compatibility # we still create a scaler for other dtypes (fp32, bf16) which does not perform any scaling. def CreateLossScaler(dtype, static_loss_scale, dynamic_scaling, dynamic_loss_args): if dtype == torch.half and dynamic_scaling: if dynamic_loss_args is None: return DynamicLossScaler(dtype=dtype) return DynamicLossScaler(dtype=dtype, **dynamic_loss_args) loss_scale_value = static_loss_scale if dtype == torch.half else 1.0 return LossScaler(scale=loss_scale_value) ############################################################## # Example usage below here -- assuming it's in a separate file ############################################################## """ TO-DO separate out into an example. if __name__ == "__main__": import torch from torch.autograd import Variable from dynamic_loss_scaler import DynamicLossScaler # N is batch size; D_in is input dimension; # H is hidden dimension; D_out is output dimension. N, D_in, H, D_out = 64, 1000, 100, 10 # Create random Tensors to hold inputs and outputs, and wrap them in Variables. x = Variable(torch.randn(N, D_in), requires_grad=False) y = Variable(torch.randn(N, D_out), requires_grad=False) w1 = Variable(torch.randn(D_in, H), requires_grad=True) w2 = Variable(torch.randn(H, D_out), requires_grad=True) parameters = [w1, w2] learning_rate = 1e-6 optimizer = torch.optim.SGD(parameters, lr=learning_rate) loss_scaler = DynamicLossScaler() for t in range(500): y_pred = x.mm(w1).clamp(min=0).mm(w2) loss = (y_pred - y).pow(2).sum() * loss_scaler.loss_scale print('Iter {} loss scale: {}'.format(t, loss_scaler.loss_scale)) print('Iter {} scaled loss: {}'.format(t, loss.data[0])) print('Iter {} unscaled loss: {}'.format(t, loss.data[0] / loss_scaler.loss_scale)) # Run backprop optimizer.zero_grad() loss.backward() # Check for overflow has_overflow = DynamicLossScaler.has_overflow(parameters) # If no overflow, unscale grad and update as usual if not has_overflow: for param in parameters: param.grad.data.mul_(1. / loss_scaler.loss_scale) optimizer.step() # Otherwise, don't do anything -- ie, skip iteration else: print('fp16 dynamic loss scale overflow!') # Update loss scale for next iteration loss_scaler.update_scale(has_overflow) """
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import types import torch import numpy as np from deepspeed.accelerator import get_accelerator from deepspeed import comm as dist class ZeroOneAdam(torch.optim.Optimizer): """Implements the 0/1 Adam algorithm. Currently GPU-only. For usage example please see https://www.deepspeed.ai/tutorials/zero-one-adam/ For technical details please read https://arxiv.org/abs/2202.06009 Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups. lr (float, optional): learning rate. (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square. (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability. (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) var_freeze_step (int, optional): The latest step to update the variance, using the notation from https://arxiv.org/abs/2202.06009, it denotes the max{i|i in T_v}. Note that this is different from the freeze step from the 1-bit Adam. The var_freeze_step is usually the end of the learning rate warmup and thus does not require tuning. (default: 100000) var_update_scaler (int, optional): The interval to update the variance. Note that the update policy for variance follows an exponential rule, where var_update_scaler denotes the kappa in the 0/1 Adam paper. (default: 16) local_step_scaler (int, optional): The interval to scale the local steps interval according to the learning rate policy. (default: 32678) local_step_clipper (int, optional): The largest interval for local steps with learning rate policy. This corresponds to the variable H in the 0/1 Adam paper. (default: 16) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) NOT SUPPORTED in 0/1 Adam! eps_inside_sqrt (boolean, optional): in the 'update parameters' step, adds eps to the bias-corrected second moment estimate before evaluating square root instead of adding it to the square root of second moment estimate as in the original paper. (default: False) cuda_aware (boolean, required): Set True if the underlying MPI implementation supports CUDA-Aware communication. (default: False) comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl') .. _Adam\\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__(self, params, deepspeed=None, lr=1e-3, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, eps_inside_sqrt=False, weight_decay=0., max_grad_norm=0., var_freeze_step=100000, var_update_scaler=16, local_step_scaler=32678, local_step_clipper=16, amsgrad=False, cuda_aware=False, comm_backend_name='nccl'): if amsgrad: raise RuntimeError('0/1 Adam does not support the AMSGrad variant.') defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay, max_grad_norm=max_grad_norm) super(ZeroOneAdam, self).__init__(params, defaults) self.eps_mode = 0 if eps_inside_sqrt else 1 assert (dist.is_initialized()) self.deepspeed = deepspeed self.initialize = False self.cuda_aware = cuda_aware self.using_pipeline = False self.var_freeze_step = var_freeze_step self.var_update_scaler = var_update_scaler self.local_step_scaler = local_step_scaler self.local_step_clipper = local_step_clipper self.freeze_key = False self.reinitial_error_buffer = False self.comm_backend_name = comm_backend_name # Empty initializer. Set handle based on the comm backend as follows. self.comm_backend_handle = None if self.comm_backend_name == 'nccl': TORCH_MAJOR = int(torch.__version__.split('.')[0]) TORCH_MINOR = int(torch.__version__.split('.')[1]) assert ( (TORCH_MAJOR == 1 and TORCH_MINOR >= 8) or TORCH_MAJOR >= 2 ), "Please use torch 1.8 or greater to enable NCCL backend in 0/1 Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend" assert dist.is_initialized() == True, "Please initialize the torch distributed backend." from deepspeed.runtime.comm.nccl import NcclBackend self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce') self.comm_backend_handle = NcclBackend(self.deepspeed.mpu) elif self.comm_backend_name == 'mpi': from deepspeed.runtime.comm.mpi import MpiBackend self.comm_backend_handle = MpiBackend(cuda_aware) self.size = self.comm_backend_handle.size self.divider = int(self.size * 8 / np.gcd(self.size, 8)) def step(self, closure=None, grads=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. grads (list of tensors, optional): weight gradient to use for the optimizer update. If gradients have type torch.half, parameters are expected to be in type torch.float. (default: None) output params (list of tensors, optional): A reduced precision copy of the updated weights written out in addition to the regular updated weights. Have to be of same type as gradients. (default: None) scale (float, optional): factor to divide gradient tensor values by before applying to weights. (default: 1) """ loss = None if closure is not None: loss = closure() if grads is None: grads_group = [None] * len(self.param_groups) # backward compatibility # assuming a list/generator of parameter means single group elif isinstance(grads, types.GeneratorType): grads_group = [grads] elif type(grads[0]) != list: grads_group = [grads] else: grads_group = grads for group, grads_this_group in zip(self.param_groups, grads_group): if grads_this_group is None: grads_this_group = [None] * len(group['params']) bias_correction = 1 if group['bias_correction'] else 0 for p, grad in zip(group['params'], grads_this_group): if p.grad is None and grad is None: continue if grad is None: grad = p.grad.data if grad.is_sparse: raise RuntimeError('0/1 Adam does not support sparse gradients') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) if not self.initialize or 'worker_error' not in state.keys(): # Some scalars to help scale the variance update/local step policies state['var_interval'] = 1 state['var_counter'] = 0 state['local_step_interval'] = 1 state['local_step_counter'] = 0 state['lrs'] = 0 state['tensor_size'] = torch.numel(p.data) state['corrected_tensor_size'] = state['tensor_size'] if state['tensor_size'] % (self.size * self.divider) != 0: state['corrected_tensor_size'] += ((self.size * self.divider) - (state['tensor_size'] % (self.size * self.divider))) state['server_chunk_size'] = state['corrected_tensor_size'] // self.size get_accelerator().empty_cache() state['worker_error'] = torch.zeros(state['corrected_tensor_size'], device=p.device) state['server_error'] = torch.zeros(state['server_chunk_size'], device=p.device) # Accumulation of momentum, i.e., the u variable in the 0/1 Adam paper state['momentum_accumulator'] = torch.zeros_like(p.data) get_accelerator().empty_cache() # self.freeze_key = True if not self.initialize and dist.get_rank() == 0: print("Cupy Buffers Initialized Successfully.") exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] comm_buffer = state['momentum_accumulator'] beta1, beta2 = group['betas'] state['step'] += 1 if self.initialize: if self.freeze_key is False: if state['step'] % state['var_interval'] == 0: exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) else: if self.size > 1: with torch.no_grad(): grad_onebit = self.comm_backend_handle.compressed_allreduce( grad, state['worker_error'], state['server_error'], self.deepspeed.local_rank) if 'exp_avg_mask' in group: if grad_onebit.device != group['exp_avg_mask'].device: group['exp_avg_mask'] = group['exp_avg_mask'].to(device=grad_onebit.device) grad_onebit.mul_(group['exp_avg_mask']) exp_avg.mul_(beta1).add_(1 - beta1, grad_onebit) else: exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) state['lrs'] += group['lr'] grad = None if not self.initialize: if self.size > 1: comm_buffer.set_( self.comm_backend_handle.compressed_allreduce(comm_buffer, state['worker_error'], state['server_error'], self.deepspeed.local_rank)) if 'exp_avg_mask' in group: if comm_buffer.device != group['exp_avg_mask'].device: group['exp_avg_mask'] = group['exp_avg_mask'].to(device=comm_buffer.device) comm_buffer.mul_(group['exp_avg_mask']) if self.initialize: update = exp_avg / (exp_avg_sq.sqrt() + group['eps']) if group['weight_decay'] > 0.0: update += group['weight_decay'] * p.data with torch.no_grad(): p.data.add_(-group['lr'] * update) if self.freeze_key is True: comm_buffer.add_(-group['lr'] * update) if state['step'] % state['local_step_interval'] == 0 and self.freeze_key: with torch.no_grad(): p.data.add_(-1 * comm_buffer) comm_buffer.mul_(exp_avg_sq.sqrt() + group['eps']) if self.size > 1: comm_buffer.copy_( self.comm_backend_handle.compressed_allreduce(comm_buffer, state['worker_error'], state['server_error'], self.deepspeed.local_rank)) if 'exp_avg_mask' in group: if comm_buffer.device != group['exp_avg_mask'].device: group['exp_avg_mask'] = group['exp_avg_mask'].to(device=comm_buffer.device) comm_buffer.mul_(group['exp_avg_mask']) exp_avg.zero_().add_(comm_buffer / state['lrs'], alpha=-1) p.data.add_(comm_buffer / (exp_avg_sq.sqrt() + group['eps'])) comm_buffer.zero_() state['lrs'] = 0 # According to 0/1 Adam theory, a fixed variance would allow more accurate estimation of momentum # However, in practice, we can also disable the manual freezing of variance, since the interval of # updating variance will increase exponentially, so that it has negligible effect on the estimation. if self.freeze_key is False: if state['step'] % state['var_interval'] == 0: state['var_counter'] += 1 if state['var_counter'] == self.var_update_scaler: state['var_counter'] = 0 state['var_interval'] *= 2 if (state['step'] + 1) % state['var_interval'] == 0: if self.using_pipeline: self.deepspeed.pipeline_enable_backward_allreduce = True else: self.deepspeed.enable_backward_allreduce = True else: if self.using_pipeline: self.deepspeed.pipeline_enable_backward_allreduce = False else: self.deepspeed.enable_backward_allreduce = False else: state['local_step_counter'] += 1 if state['local_step_counter'] == self.local_step_scaler: state['local_step_counter'] = 0 state['local_step_interval'] = min(self.local_step_clipper, state['local_step_interval'] * 2) if not self.initialize: print('Pop out errors', flush=True) self.freeze_key = False state.pop('worker_error') state.pop('server_error') if not self.initialize: self.initialize = True print(f"Finished the initialization step at rank {dist.get_rank()}") return loss if self.state[self.param_groups[0]['params'][0]]['step'] > self.var_freeze_step: self.freeze_key = True if self.using_pipeline: self.deepspeed.pipeline_enable_backward_allreduce = False else: self.deepspeed.enable_backward_allreduce = False if self.freeze_key is True and self.reinitial_error_buffer is False: # We need to reinitialize the error buffers when local step > 1 since # the errors will be logged for different metrics (gradient vs. accumulated momentum). for group in self.param_groups: for p in group['params']: self.state[p]['worker_error'].zero_() self.state[p]['server_error'].zero_() self.reinitial_error_buffer = True return loss def load_state_dict(self, state_dict): """ Overrides load_state_dict() to add special handling when loading checkpoints """ # Because at different stage exp_avg_mask may change (e.g., # BERT pre-training seqlen 128 and 512 ), we don't use the exp_avg_mask # in checkpoints but always use the one user provided in training script. # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.) # Thus here we keep the exp_avg_mask unchanged when loading checkpoint for i, group in enumerate(self.param_groups): if 'exp_avg_mask' in group: state_dict['param_groups'][i]['exp_avg_mask'] = group['exp_avg_mask'] elif 'exp_avg_mask' not in group and 'exp_avg_mask' in state_dict['param_groups'][i]: state_dict['param_groups'][i].pop('exp_avg_mask') super().load_state_dict(state_dict) if self.state[self.param_groups[0]['params'][0]]['step'] < self.var_freeze_step: self.var_freeze_key = False if (self.state[self.param_groups[0]['params'][0]]['step'] + 1) % self.state[self.param_groups[0]['params'][0]]['var_interval'] == 0: if self.using_pipeline: self.deepspeed.pipeline_enable_backward_allreduce = True else: self.deepspeed.enable_backward_allreduce = True else: if self.using_pipeline: self.deepspeed.pipeline_enable_backward_allreduce = False else: self.deepspeed.enable_backward_allreduce = False else: self.var_freeze_key = True if self.using_pipeline: self.deepspeed.pipeline_enable_backward_allreduce = False else: self.deepspeed.enable_backward_allreduce = False self.reinitial_error_buffer = False for group in self.param_groups: for p in group['params']: if 'worker_error' in self.state[p]: self.state[p].pop('worker_error') if 'server_error' in self.state[p]: self.state[p].pop('server_error') if 'momentum_accumulator' in self.state[p]: self.state[p].pop('momentum_accumulator')
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import types import torch import numpy as np from deepspeed import comm as dist from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from deepspeed.accelerator import get_accelerator class OnebitLamb(torch.optim.Optimizer): """Implements the 1-bit Lamb algorithm. Currently GPU-only. For usage example please see https://www.deepspeed.ai/tutorials/onebit-lamb/ For technical details please see our paper https://arxiv.org/abs/2104.06069. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups. lr (float, optional): learning rate. (default: 1e-3) freeze_step (int, optional): Number of steps for warmup (uncompressed) stage before we start using compressed communication. (default 100000) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square. (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability. (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) max_coeff(float, optional): maximum value of the lamb coefficient (default: 10.0) min_coeff(float, optional): minimum value of the lamb coefficient (default: 0.01) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) NOT SUPPORTED in 1-bit Lamb! eps_inside_sqrt (boolean, optional): in the 'update parameters' step, adds eps to the bias-corrected second moment estimate before evaluating square root instead of adding it to the square root of second moment estimate as in the original paper. (default: False) cuda_aware (boolean, required): Set True if the underlying MPI implementation supports CUDA-Aware communication. (default: False) comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl') coeff_beta (float, optional): coefficient used for computing running averages of lamb coefficient (default: 0.9) note that you may want to increase or decrease this beta depending on the freeze_step you choose, as 1/(1 - coeff_beta) should be smaller than or equal to freeze_step factor_max (float, optional): maximum value of scaling factor to the frozen lamb coefficient during compression stage (default: 4.0) factor_min (float, optional): minimum value of scaling factor to the frozen lamb coefficient during compression stage (default: 0.5) factor_threshold (float, optional): threshold of how much the scaling factor can fluctuate between steps (default: 0.1) .. _Large Batch Optimization for Deep Learning\\: Training BERT in 76 minutes: https://arxiv.org/abs/1904.00962 .. _Adam\\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__(self, params, deepspeed=None, lr=1e-3, freeze_step=100000, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, eps_inside_sqrt=False, weight_decay=0., max_grad_norm=0., max_coeff=10.0, min_coeff=0.01, amsgrad=False, cuda_aware=False, comm_backend_name='nccl', coeff_beta=0.9, factor_max=4.0, factor_min=0.5, factor_threshold=0.1): if amsgrad: raise RuntimeError('1-bit Lamb does not support the AMSGrad variant.') defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay, max_grad_norm=max_grad_norm, max_coeff=max_coeff, min_coeff=min_coeff) super(OnebitLamb, self).__init__(params, defaults) self.eps_mode = 0 if eps_inside_sqrt else 1 assert (dist.is_initialized()) self.deepspeed = deepspeed self.lamb_freeze_key = False self.initialize = False self.freeze_step = freeze_step self.cuda_aware = cuda_aware self.coeff_beta = coeff_beta self.factor_max = factor_max self.factor_min = factor_min self.factor_threshold = factor_threshold self.using_pipeline = False self.comm_backend_name = comm_backend_name # Empty initializer. Set handle based on the comm backend as follows. self.comm_backend_handle = None if self.comm_backend_name == 'nccl': TORCH_MAJOR = int(torch.__version__.split('.')[0]) TORCH_MINOR = int(torch.__version__.split('.')[1]) assert ( (TORCH_MAJOR == 1 and TORCH_MINOR >= 8) or TORCH_MAJOR >= 2 ), "Please use torch 1.8 or greater to enable NCCL backend in 1-bit Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend" assert dist.is_initialized() == True, "Please initialize the torch distributed backend." from deepspeed.runtime.comm.nccl import NcclBackend self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce') self.comm_backend_handle = NcclBackend(self.deepspeed.mpu) elif self.comm_backend_name == 'mpi': from deepspeed.runtime.comm.mpi import MpiBackend self.comm_backend_handle = MpiBackend(cuda_aware) self.size = self.comm_backend_handle.size self.divider = int(self.size * 8 / np.gcd(self.size, 8)) self.exp_avg_flat = [] self.dummy_exp_avg = {} self.corrected_tensor_sizes = [] self.server_chunk_sizes = [] self.worker_errors = [] self.server_errors = [] self.lamb_coeffs = [] def step(self, closure=None, grads=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. grads (list of tensors, optional): weight gradient to use for the optimizer update. If gradients have type torch.half, parameters are expected to be in type torch.float. (default: None) """ loss = None if closure is not None: loss = closure() if grads is None: grads_group = [None] * len(self.param_groups) # backward compatibility # assuming a list/generator of parameter means single group elif isinstance(grads, types.GeneratorType): grads_group = [grads] elif type(grads[0]) != list: grads_group = [grads] else: grads_group = grads #remove the previous stats del self.lamb_coeffs[:] if self.lamb_freeze_key: exp_avg_last_step = [] for group in self.param_groups: exp_avg_last_step.append([self.state[p]['exp_avg'].detach().clone() for p in group['params']]) if 'scaling_coeff' not in self.state[self.param_groups[0]['params'][0]]: # Compute the scaling_coeff for each momentum at the end of warmup stage. # This is used to reduce compression error during compression stage. momentum_scales = [] for group in self.param_groups: momentum_scales.append([ (torch.norm(self.state[p]['exp_avg']) / np.sqrt(torch.numel(self.state[p]['exp_avg']))).item() for p in group['params'] ]) united_scale = sum([sum(x) for x in momentum_scales]) / sum([len(x) for x in momentum_scales]) for i, group in enumerate(self.param_groups): for j, p in enumerate(group['params']): self.state[p]['scaling_coeff'] = united_scale / momentum_scales[i][j] for group, grads_this_group in zip(self.param_groups, grads_group): if grads_this_group is None: grads_this_group = [None] * len(group['params']) bias_correction = 1 if group['bias_correction'] else 0 for p, grad in zip(group['params'], grads_this_group): if p.grad is None and grad is None: continue if grad is None: grad = p.grad.data if grad.is_sparse: raise RuntimeError('1-bit Lamb does not support sparse gradients') state = self.state[p] # State initialization if len(state) == 0 or (len(state) == 1 and 'scaling_coeff' in state.keys()): state['step'] = 0 state['lamb_coeff_freeze'] = 0.0 state['last_factor'] = 1.0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) state['exp_avg_sq_fresh'] = torch.zeros_like(p.data) if not self.initialize: self.lamb_freeze_key = True exp_avg, exp_avg_sq, exp_avg_sq_fresh = state['exp_avg'], state['exp_avg_sq'], state[ 'exp_avg_sq_fresh'] beta1, beta2 = group['betas'] max_coeff = group['max_coeff'] min_coeff = group['min_coeff'] state['step'] += 1 if self.lamb_freeze_key is False: # warmup stage, baseline Lamb optimization exp_avg.mul_(beta1).add_(1 - beta1, grad) exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) if state['step'] == self.freeze_step: exp_avg_sq_fresh.data = exp_avg_sq.detach().clone() grad = None if self.initialize: weight_norm = p.data.pow(2).sum().sqrt() update = exp_avg / (exp_avg_sq.sqrt() + group['eps']) if group['weight_decay'] > 0.0: update += group['weight_decay'] * p.data update_norm = update.pow(2).sum().sqrt() lamb_coeff = 1.0 if weight_norm != 0 and update_norm != 0: lamb_coeff = (weight_norm / update_norm).item() if lamb_coeff > max_coeff: lamb_coeff = max_coeff if lamb_coeff < min_coeff: lamb_coeff = min_coeff if lamb_coeff != 1.0: state['lamb_coeff_freeze'] = self.coeff_beta * state['lamb_coeff_freeze'] + ( 1 - self.coeff_beta) * lamb_coeff self.lamb_coeffs.append(lamb_coeff) with torch.no_grad(): p.add_(-group['lr'] * lamb_coeff * update) else: # compression stage, update each momentum locally, then # communicate based on the compressed_allreduce below if self.initialize: exp_avg.mul_(beta1).add_(1 - beta1, grad) exp_avg.mul_(self.state[p]['scaling_coeff']) grad = None # init fused momentum if len(self.exp_avg_flat) == 0: momentum_groups = [] tensor_size = 0 for group in self.param_groups: for p in group['params']: momentum_groups.append(self.state[p]['exp_avg']) tensor_size += torch.numel(p.data) corrected_tensor_size = tensor_size if tensor_size % (self.size * self.divider) != 0: difference = ((self.size * self.divider) - (tensor_size % (self.size * self.divider))) corrected_tensor_size += difference self.dummy_exp_avg[0] = torch.zeros(difference, device=momentum_groups[0].data.device) momentum_groups.append(self.dummy_exp_avg[0]) self.corrected_tensor_sizes.append(corrected_tensor_size) self.server_chunk_sizes.append(corrected_tensor_size // self.size) self.exp_avg_flat.append(_flatten_dense_tensors([p.detach().clone() for p in momentum_groups])) updated_params = _unflatten_dense_tensors(self.exp_avg_flat[0], momentum_groups) for p, q in zip(momentum_groups, updated_params): p.data = q.data if self.initialize and len(self.worker_errors) == 0: get_accelerator().empty_cache() for i in range(len(self.exp_avg_flat)): self.worker_errors.append( torch.zeros(self.corrected_tensor_sizes[i], device=self.exp_avg_flat[i].device)) self.server_errors.append(torch.zeros(self.server_chunk_sizes[i], device=self.exp_avg_flat[i].device)) get_accelerator().empty_cache() if self.lamb_freeze_key: if self.size > 1: for i in range(len(self.exp_avg_flat)): if not self.initialize: get_accelerator().empty_cache() self.worker_errors.append( torch.zeros(self.corrected_tensor_sizes[i], device=self.exp_avg_flat[i].device)) self.server_errors.append( torch.zeros(self.server_chunk_sizes[i], device=self.exp_avg_flat[i].device)) get_accelerator().empty_cache() if dist.get_rank() == 0: print("Cupy Buffers Initialized Successfully.") self.comm_backend_handle.compressed_allreduce(self.exp_avg_flat[i], self.worker_errors[0], self.server_errors[0], self.deepspeed.local_rank) if dist.get_rank() == 0: print('Pop out errors', flush=True) del self.worker_errors[:] del self.server_errors[:] else: self.comm_backend_handle.compressed_allreduce(self.exp_avg_flat[i], self.worker_errors[i], self.server_errors[i], self.deepspeed.local_rank) if self.lamb_freeze_key and self.initialize: for i, group in enumerate(self.param_groups): bias_correction = 1 if group['bias_correction'] else 0 for j, p in enumerate(group['params']): state = self.state[p] exp_avg, exp_avg_sq, exp_avg_sq_fresh = state['exp_avg'], state['exp_avg_sq'], state[ 'exp_avg_sq_fresh'] beta1, beta2 = group['betas'] exp_avg.div_(self.state[p]['scaling_coeff']) # Because 1-bit compression cannot represent exact zero, it is required to # provide a momentum mask for those params that have constant exact zeros in their # momentums, otherwise the compression error would keep accumulating. # For example, for BERT pre-training seq 128, bert.embeddings.position_embeddings.weight # always have exact zeros in its momentum for row 129 to 512, because it only # learns up to seq length 128 while the model supports up to 512 seq length. # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py about how # to add this exp_avg_mask for BERT pre-training.) if 'exp_avg_mask' in group: if exp_avg.device != group['exp_avg_mask'].device: group['exp_avg_mask'] = group['exp_avg_mask'].to(device=exp_avg.device) exp_avg.mul_(group['exp_avg_mask']) grad_reconstruct = ((exp_avg - exp_avg_last_step[i][j] * beta1) / (1 - beta1)) exp_avg_sq_fresh.mul_(beta2).addcmul_(1 - beta2, grad_reconstruct, grad_reconstruct) denom = exp_avg_sq.sqrt() + group['eps'] update_prelim = exp_avg / denom if group['weight_decay'] > 0.0: update = update_prelim + group['weight_decay'] * p.data else: update = update_prelim lamb_coeff = 1.0 update_norm = update.pow(2).sum().sqrt() denom_real = exp_avg_sq_fresh.sqrt() + group['eps'] factor = (denom / denom_real).max().item() if group['weight_decay'] > 0.0: update_ratio = min(1.0, (update_prelim.pow(2).sum().sqrt() / update_norm).item()) factor = factor * update_ratio + (1.0 - update_ratio) if factor > self.factor_max: factor = self.factor_max if factor < self.factor_min: factor = self.factor_min if factor > state['last_factor'] * (1.0 + self.factor_threshold): factor = state['last_factor'] * (1.0 + self.factor_threshold) if factor < state['last_factor'] * (1.0 - self.factor_threshold): factor = state['last_factor'] * (1.0 - self.factor_threshold) state['last_factor'] = factor lamb_coeff = state['lamb_coeff_freeze'] * factor self.lamb_coeffs.append(lamb_coeff) with torch.no_grad(): p.add_(-group['lr'] * lamb_coeff * update) del exp_avg_last_step[:] exp_avg_last_step = None if not self.initialize: self.lamb_freeze_key = False self.initialize = True print(f"Finished the initialization step at rank {dist.get_rank()}") return loss if self.lamb_freeze_key is False: if state['step'] >= self.freeze_step: print('OnebitLamb - starting compressed communication') self.lamb_freeze_key = True if self.using_pipeline: self.deepspeed.pipeline_enable_backward_allreduce = False else: self.deepspeed.enable_backward_allreduce = False return loss def load_state_dict(self, state_dict): """ Overrides load_state_dict() to add special handling when loading checkpoints """ # Because at different stage exp_avg_mask may change (e.g., # BERT pre-training seqlen 128 and 512 ), we don't use the exp_avg_mask # in checkpoints but always use the one user provided in training script. # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.) # Thus here we keep the exp_avg_mask unchanged when loading checkpoint for i, group in enumerate(self.param_groups): if 'exp_avg_mask' in group: state_dict['param_groups'][i]['exp_avg_mask'] = group['exp_avg_mask'] elif 'exp_avg_mask' not in group and 'exp_avg_mask' in state_dict['param_groups'][i]: state_dict['param_groups'][i].pop('exp_avg_mask') super().load_state_dict(state_dict) # need to reset the fused momentum since loading states will break the linking del self.exp_avg_flat[:] self.dummy_exp_avg.clear() del self.corrected_tensor_sizes[:] del self.server_chunk_sizes[:] if self.state[self.param_groups[0]['params'][0]]['step'] < self.freeze_step: if dist.get_rank() == 0: print("Checkpoint loaded and OnebitLamb warmup stage starts/continues.") if self.lamb_freeze_key is True: self.lamb_freeze_key = False if self.using_pipeline: self.deepspeed.pipeline_enable_backward_allreduce = True else: self.deepspeed.enable_backward_allreduce = True for group in self.param_groups: for p in group['params']: self.state[p]['lamb_coeff_freeze'] = 0.0 self.state[p]['last_factor'] = 1.0 if 'scaling_coeff' in self.state[p]: self.state[p].pop('scaling_coeff') else: if dist.get_rank() == 0: print("Checkpoint loaded and OnebitLamb compression stage starts/continues.") if self.lamb_freeze_key is False: self.lamb_freeze_key = True if self.using_pipeline: self.deepspeed.pipeline_enable_backward_allreduce = False else: self.deepspeed.enable_backward_allreduce = False # We reset the compression errors when loading checkpoints for 3 reasons: # 1) The worker and server error at each GPU are distinct, so in current implementation # only rank 0's errors are saved in the checkpoint. Thus we have to reset the errors. # If we want to save them correctly we need O(num_gpu*model_size) memory in order to # gather all the error, which is a very large memory requirement. It's possible to save # them in a distributed way, but it will make the checkpoint saving/loading much more complicated. # 2) Even if we are able to save the compression errors correctly, you need to have the # exact same number of GPUs in order to load them correctly. # 3) We verified on BERT pre-training that occasionally resetting the compression error # at checkpoint loading does not affect the convergence. # However, please avoid frequent checkpoint loading which could break the error # compensation mechanism thus affect the convergence. del self.worker_errors[:] del self.server_errors[:] def get_lamb_coeffs(self): return self.lamb_coeffs
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from .adam import OnebitAdam from .lamb import OnebitLamb from .zoadam import ZeroOneAdam
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import types import torch import numpy as np from deepspeed.accelerator import get_accelerator from deepspeed import comm as dist class OnebitAdam(torch.optim.Optimizer): """Implements the 1-bit Adam algorithm. Currently GPU-only. For usage example please see https://www.deepspeed.ai/tutorials/onebit-adam/ For technical details please read https://arxiv.org/abs/2102.02888 Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups. lr (float, optional): learning rate. (default: 1e-3) freeze_step (int, optional): Number of steps for warmup (uncompressed) stage before we start using compressed communication. (default 100000) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square. (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability. (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) NOT SUPPORTED in 1-bit Adam! eps_inside_sqrt (boolean, optional): in the 'update parameters' step, adds eps to the bias-corrected second moment estimate before evaluating square root instead of adding it to the square root of second moment estimate as in the original paper. (default: False) cuda_aware (boolean, required): Set True if the underlying MPI implementation supports CUDA-Aware communication. (default: False) comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl') .. _Adam\\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__(self, params, deepspeed=None, lr=1e-3, freeze_step=100000, bias_correction=True, betas=(0.9, 0.999), eps=1e-8, eps_inside_sqrt=False, weight_decay=0., max_grad_norm=0., amsgrad=False, cuda_aware=False, comm_backend_name='nccl'): if amsgrad: raise RuntimeError('1-bit Adam does not support the AMSGrad variant.') defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay, max_grad_norm=max_grad_norm) super(OnebitAdam, self).__init__(params, defaults) self.eps_mode = 0 if eps_inside_sqrt else 1 assert (dist.is_initialized()) self.comm_time = 0.0 self.step_time = 0.0 self.ave_step = 1 self.bk_time = 0.0 self.deepspeed = deepspeed self.adam_freeze_key = False self.initialize = False self.freeze_step = freeze_step self.cuda_aware = cuda_aware self.using_pipeline = False self.comm_backend_name = comm_backend_name # Empty initializer. Set handle based on the comm backend as follows. self.comm_backend_handle = None if self.comm_backend_name == 'nccl': TORCH_MAJOR = int(torch.__version__.split('.')[0]) TORCH_MINOR = int(torch.__version__.split('.')[1]) assert ( (TORCH_MAJOR == 1 and TORCH_MINOR >= 8) or TORCH_MAJOR >= 2 ), "Please use torch 1.8 or greater to enable NCCL backend in 1-bit Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend" assert dist.is_initialized() == True, "Please initialize the torch distributed backend." from deepspeed.runtime.comm.nccl import NcclBackend self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce') self.comm_backend_handle = NcclBackend(self.deepspeed.mpu) elif self.comm_backend_name == 'mpi': from deepspeed.runtime.comm.mpi import MpiBackend self.comm_backend_handle = MpiBackend(cuda_aware) self.size = self.comm_backend_handle.size self.divider = int(self.size * 8 / np.gcd(self.size, 8)) def step(self, closure=None, grads=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. grads (list of tensors, optional): weight gradient to use for the optimizer update. If gradients have type torch.half, parameters are expected to be in type torch.float. (default: None) output params (list of tensors, optional): A reduced precision copy of the updated weights written out in addition to the regular updated weights. Have to be of same type as gradients. (default: None) scale (float, optional): factor to divide gradient tensor values by before applying to weights. (default: 1) """ loss = None if closure is not None: loss = closure() gather_time = 0 allgather_time = 0 all_time = 0 if self.adam_freeze_key is False: v_diff_buffer = 0.0 if grads is None: grads_group = [None] * len(self.param_groups) # backward compatibility # assuming a list/generator of parameter means single group elif isinstance(grads, types.GeneratorType): grads_group = [grads] elif type(grads[0]) != list: grads_group = [grads] else: grads_group = grads for group, grads_this_group in zip(self.param_groups, grads_group): if grads_this_group is None: grads_this_group = [None] * len(group['params']) bias_correction = 1 if group['bias_correction'] else 0 for p, grad in zip(group['params'], grads_this_group): if p.grad is None and grad is None: continue if grad is None: grad = p.grad.data if grad.is_sparse: raise RuntimeError('1-bit Adam does not support sparse gradients') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) if not self.initialize or (self.adam_freeze_key and 'worker_error' not in state.keys()): state['tensor_size'] = torch.numel(p.data) state['corrected_tensor_size'] = state['tensor_size'] if state['tensor_size'] % (self.size * self.divider) != 0: state['corrected_tensor_size'] += ((self.size * self.divider) - (state['tensor_size'] % (self.size * self.divider))) state['server_chunk_size'] = state['corrected_tensor_size'] // self.size get_accelerator().empty_cache() state['worker_error'] = torch.zeros(state['corrected_tensor_size'], device=p.device) state['server_error'] = torch.zeros(state['server_chunk_size'], device=p.device) get_accelerator().empty_cache() self.adam_freeze_key = True if not self.initialize and dist.get_rank() == 0: print("Cupy Buffers Initialized Successfully.") exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 if self.adam_freeze_key is False: exp_avg.mul_(beta1).add_(1 - beta1, grad) exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) grad = None if self.initialize: update = exp_avg / (exp_avg_sq.sqrt() + group['eps']) else: if 'non_freeze' in group.keys() and group['non_freeze'] is True: dist.all_reduce(grad) grad.mul_(1 / dist.get_world_size()) exp_avg.mul_(beta1).add_(1 - beta1, grad) exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) grad = None else: if self.initialize is True: exp_avg.mul_(beta1).add_(1 - beta1, grad) grad = None if self.size > 1: exp_avg.set_( self.comm_backend_handle.compressed_allreduce(exp_avg, state['worker_error'], state['server_error'], self.deepspeed.local_rank)) # Because 1-bit compression cannot represent exact zero, it is required to # provide a momentum mask for those params that have constant exact zeros in their # momentums, otherwise the compression error would keep accumulating. # For example, for BERT pre-training seq 128, bert.embeddings.position_embeddings.weight # always have exact zeros in its momentum for row 129 to 512, because it only # learns up to seq length 128 while the model supports up to 512 seq length. # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.) if 'exp_avg_mask' in group: if exp_avg.device != group['exp_avg_mask'].device: group['exp_avg_mask'] = group['exp_avg_mask'].to(device=exp_avg.device) exp_avg.mul_(group['exp_avg_mask']) if self.initialize: update = exp_avg / (exp_avg_sq.sqrt() + group['eps']) if self.initialize: if group['weight_decay'] > 0.0: update += group['weight_decay'] * p.data with torch.no_grad(): p.add_(-group['lr'] * update) if not self.initialize: print('Pop out errors', flush=True) state.pop('worker_error') state.pop('server_error') if not self.initialize: self.adam_freeze_key = False self.initialize = True print(f"Finished the initialization step at rank {dist.get_rank()}") return loss if self.adam_freeze_key is False: if state['step'] >= self.freeze_step: print('OnebitAdam - starting compressed communication') self.adam_freeze_key = True if self.using_pipeline: self.deepspeed.pipeline_enable_backward_allreduce = False else: self.deepspeed.enable_backward_allreduce = False return loss def load_state_dict(self, state_dict): """ Overrides load_state_dict() to add special handling when loading checkpoints """ # Because at different stage exp_avg_mask may change (e.g., # BERT pre-training seqlen 128 and 512 ), we don't use the exp_avg_mask # in checkpoints but always use the one user provided in training script. # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.) # Thus here we keep the exp_avg_mask unchanged when loading checkpoint for i, group in enumerate(self.param_groups): if 'exp_avg_mask' in group: state_dict['param_groups'][i]['exp_avg_mask'] = group['exp_avg_mask'] elif 'exp_avg_mask' not in group and 'exp_avg_mask' in state_dict['param_groups'][i]: state_dict['param_groups'][i].pop('exp_avg_mask') super().load_state_dict(state_dict) if self.state[self.param_groups[0]['params'][0]]['step'] < self.freeze_step: if dist.get_rank() == 0: print("Checkpoint loaded and OnebitAdam warmup stage starts/continues.") if self.adam_freeze_key is True: self.adam_freeze_key = False if self.using_pipeline: self.deepspeed.pipeline_enable_backward_allreduce = True else: self.deepspeed.enable_backward_allreduce = True else: if dist.get_rank() == 0: print("Checkpoint loaded and OnebitAdam compression stage starts/continues.") if self.adam_freeze_key is False: self.adam_freeze_key = True if self.using_pipeline: self.deepspeed.pipeline_enable_backward_allreduce = False else: self.deepspeed.enable_backward_allreduce = False # We reset the compression errors when loading checkpoints for 3 reasons: # 1) The worker and server error at each GPU are distinct, so in current implementation # only rank 0's errors are saved in the checkpoint. Thus we have to reset the errors. # If we want to save them correctly we need O(num_gpu*model_size) memory in order to # gather all the error, which is a very large memory requirement. It's possible to save # them in a distributed way, but it will make the checkpoint saving/loading much more complicated. # 2) Even if we are able to save the compression errors correctly, you need to have the # exact same number of GPUs in order to load them correctly. # 3) We verified on BERT pre-training that occasionally resetting the compression error # at checkpoint loading does not affect the convergence. # However, please avoid frequent checkpoint loading which could break the error # compensation mechanism thus affect the convergence. for group in self.param_groups: for p in group['params']: if 'worker_error' in self.state[p]: self.state[p].pop('worker_error') if 'server_error' in self.state[p]: self.state[p].pop('server_error')
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ DeepSpeed Communication Backend. In the future, directly use NCCL/MPI/Gloo/etc without requiring torch.distributed. Simply wrap torch.distributed for now. # Custom DS Backends -- Direct C/Ops - NCCL -- [EXPERIMENTAL] - MPI -- [EXPERIMENTAL] - RCCL -- [EXPERIMENTAL] - GLOO -- [EXPERIMENTAL] # DS backend wrapper for torch.distributed [DEFAULT] - T-NCCL -- [DEFAULT] - T-GLOO - T-MPI # Backend is the base class -- NcclBackend, MpiBackend, and TorchBackend are the main subclasses. TorchBackend is the only officially supported backend for now. """ class Backend(object): def __init__(self, name='backend', rank=0, size=1): self.name = name # The world size and rank of the world process group self.world_group = None self.world_size = rank self.world_rank = size # Single process group (pg) implementation for now but keep a list for future self.process_groups = [] self.initialized = False def is_initialized(self): return self.initialized def new_group(self): # create a new pg and add it to pg list pass def init_process_group(self): # subclasses will initialize them fully # - initialize a default world process group and add it to pg list self.initialized = True
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from pydantic import BaseModel from .constants import * class CommsConfig(BaseModel): class Config: validate_all = True validate_assignment = True use_enum_values = True extra = 'forbid' class CommsLoggerConfig(CommsConfig): enabled: bool = COMMS_LOGGER_ENABLED_DEFAULT prof_all: bool = COMMS_LOGGER_PROF_ALL_DEFAULT prof_ops: list = COMMS_LOGGER_PROF_OPS_DEFAULT verbose: bool = COMMS_LOGGER_VERBOSE_DEFAULT debug: bool = COMMS_LOGGER_DEBUG_DEFAULT class DeepSpeedCommsConfig: def __init__(self, ds_config): self.comms_logger_enabled = 'comms_logger' in ds_config if self.comms_logger_enabled: self.comms_logger = CommsLoggerConfig(**ds_config['comms_logger'])
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team """ DeepSpeed Communication Package: deepspeed.comm deepspeed.comm -- import and use deepspeed.ops.comm -- use torch.distributed directly if both this package and torch.distributed use the same NCCL version -- use custom collectives -- can either use torch.dist or ds.ops.comm? Note: the old 1-bit compressed allreduce variants that resided in deepspeed.runtime.comm will be moved here as well. deepspeed.comm API -- must be kept fully compatible (same signatures) as torch.dist API to ensure backward/cross-framework compatibility. -- e.g. if a client code used from deepspeed import comm as dist instead of import torch.distributed as dist The code should work without breaking any of the public torch.distributed functionality Future: -- deepspeed groups API should be brought into ds.comm """ import torch import os from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT, default_pg_timeout from .constants import * from .reduce_op import * from deepspeed.accelerator import get_accelerator from deepspeed.comm.ccl import CCLBackend from deepspeed.utils.comms_logging import CommsLogger from deepspeed.utils import timer, get_caller_func from deepspeed.comm.torch import TorchBackend from deepspeed import utils from datetime import timedelta # Current deepspeed.comm backend (cdb) global object for simple access by client code cdb = None # Create global timer for ops timers = timer.SynchronizedWallClockTimer() timer_summary = {} comms_logger = CommsLogger() # Maintain objects of all initialized ds backends and assign them using the API functions in this file nccl_backend = None mpi_backend = None ccl_backend = None # This should be set here so all rank/size information from the launcher can be propagated from deepspeed.comm.utils import * class ProcessGroup(): def __init__(self, comm_id, ranks=[]): self.ranks = ranks self.comm_id = comm_id self.size = len(ranks) def _configure_using_config_file(config): if config.comms_logger_enabled: comms_logger.configure(config) def configure( deepspeed_config=None, enabled=None, prof_all=None, prof_ops=None, verbose=None, debug=None, ): if deepspeed_config is not None: _configure_using_config_file(deepspeed_config.comms_config) if enabled is not None: comms_logger.enabled = enabled if prof_all is not None: comms_logger.prof_all = prof_all if prof_ops is not None: comms_logger.prof_ops = prof_ops if verbose is not None: comms_logger.verbose = verbose if debug is not None: comms_logger.debug = debug # Logging wrapper for timing ops def timed_op(func): def log_wrapper(*args, **kwargs): # Add enabled flag so that overhead to each comm op is two if conditions at most if comms_logger.enabled: if ('prof' in kwargs and kwargs['prof']) or comms_logger.prof_all or ('log_name' in kwargs and kwargs['log_name'] in comms_logger.prof_ops): # Need func args for their defaults func_args = get_default_args(func) func_args.update(kwargs) msg_size = get_msg_size_from_args(func, *args, **kwargs) log_name = get_debug_log_name(func_args, comms_logger.debug) timers(log_name).start() # Return the op, then stop the op's timer try: return func(*args, **kwargs) finally: if comms_logger.enabled: # Need to make op blocking for accurate logging get_accelerator().synchronize() # If we're using MPI, we can't simply sync the stream if cdb.using_mpi: cdb.barrier() if ('prof' in kwargs and kwargs['prof']) or comms_logger.prof_all or ( 'log_name' in kwargs and kwargs['log_name'] in comms_logger.prof_ops): log_name = get_debug_log_name(func_args, comms_logger.debug) raw_name = func.__name__ timers(log_name).stop() # need temp var since 'elapsed' resets events time_elapsed = timers(log_name).elapsed(reset=False) comms_logger.append(raw_name, log_name, time_elapsed, msg_size) return log_wrapper # For compatibility with torch distributed's init_process_group, we shall retain the signature from PyTorch code. # DeepSpeed NCCL/MPI backend may not need all these params as we will have our own implementation. # Please read full torch.distributed API docs from https://pytorch.org/docs/stable/distributed.html # UNUSED: Future helper function to initialize DS backends def init_deepspeed_backend(ds_backend, timeout, init_method): global cdb global nccl_backend global mpi_backend global ccl_backend rank = int(os.environ["RANK"]) size = int(os.environ["WORLD_SIZE"]) if ds_backend == NCCL_BACKEND: utils.logger.warn("NCCL backend in DeepSpeed not yet implemented") elif ds_backend == MPI_BACKEND: utils.logger.warn("MPI backend in DeepSpeed not yet implemented") elif ds_backend == GLOO_BACKEND: utils.logger.warn("Gloo backend in DeepSpeed not yet implemented") elif ds_backend == CCL_BACKEND: ccl_backend = CCLBackend(rank=rank, world_size=size, timeout=timeout, init_method=init_method) utils.logger.info(f"Initialize {ds_backend} backend") else: utils.logger.warn(f"DeepSpeed does not support {ds_backend} backend") def is_initialized(): #assert cdb is not None, 'DeepSpeed backend not set, please initialize it using init_process_group()' if cdb is None: return False else: return cdb.is_initialized() def destroy_process_group(group=None): global cdb return cdb.destroy_process_group(group=group) def new_group(ranks): global cdb assert cdb is not None and cdb.is_initialized( ), 'DeepSpeed backend not set, please initialize it using init_process_group()' return cdb.new_group(ranks) def is_available() -> bool: # Returns ``True`` if the deepspeed comm package is available. # TODO: load other ops. Clients including deepspeed itself should use deepspeed.comm to import # any communication related primitives from this package. # use hasattr(deepspeed.csrc.ops, "_comm") or something return True def set_backend(): global cdb global nccl_backend global mpi_backend global ccl_backend backend_name = get_accelerator().communication_backend_name() if backend_name == NCCL_BACKEND: if nccl_backend is not None and nccl_backend.is_initialized(): cdb = nccl_backend elif backend_name == MPI_BACKEND: if mpi_backend is not None and mpi_backend.is_initialized(): cdb = mpi_backend elif backend_name == CCL_BACKEND: if ccl_backend is not None and ccl_backend.is_initialized(): cdb = ccl_backend @timed_op def broadcast(tensor, src, group=None, async_op=False, prof=False, log_name='broadcast', debug=get_caller_func()): global cdb return cdb.broadcast(tensor=tensor, src=src, group=group, async_op=async_op) @timed_op def all_gather(tensor_list, tensor, group=None, async_op=False, prof=False, log_name='all_gather', debug=get_caller_func()): global cdb return cdb.all_gather(tensor_list=tensor_list, tensor=tensor, group=group, async_op=async_op) def has_reduce_scatter_tensor(): global cdb assert cdb is not None and cdb.is_initialized( ), 'DeepSpeed backend not set, please initialize it using init_process_group()' return cdb.has_reduce_scatter_tensor() def reduce_scatter_fn(output_tensor, tensor, op=ReduceOp.SUM, group=None, async_op=False, prof=False, debug=get_caller_func()): global cdb assert cdb is not None and cdb.is_initialized( ), 'DeepSpeed backend not set, please initialize it using init_process_group()' if cdb.has_reduce_scatter_tensor(): return reduce_scatter_tensor(output_tensor, tensor, op=op, group=group, async_op=async_op, prof=prof, debug=debug) else: if get_rank() == 0: utils.logger.warning_once("unable to find torch.distributed.reduce_scatter_tensor. will fall back to " "torch.distributed.all_gather which will result in suboptimal performance. " "please consider upgrading your pytorch installation.") input_tensor_lst = list(torch.chunk(tensor, cdb.get_world_size(group))) return reduce_scatter(output_tensor, input_tensor_lst, op=op, group=group, async_op=async_op, prof=prof, debug=debug) @timed_op def reduce_scatter_tensor(output_tensor, tensor, op=ReduceOp.SUM, group=None, async_op=False, prof=False, log_name='reduce_scatter_tensor', debug=get_caller_func()): global cdb return cdb.reduce_scatter_tensor(output_tensor=output_tensor, input_tensor=tensor, op=op, group=group, async_op=async_op) @timed_op def all_gather_into_tensor(output_tensor, tensor, group=None, async_op=False, prof=False, log_name='all_gather_into_tensor', debug=get_caller_func()): global cdb return cdb.all_gather_into_tensor(output_tensor=output_tensor, input_tensor=tensor, group=group, async_op=async_op) def has_all_gather_into_tensor(): global cdb assert cdb is not None and cdb.is_initialized( ), 'DeepSpeed backend not set, please initialize it using init_process_group()' return cdb.has_all_gather_into_tensor() def allgather_fn(output_tensor, input_tensor, group=None, async_op=False, debug=get_caller_func()): global cdb assert cdb is not None and cdb.is_initialized( ), 'DeepSpeed backend not set, please initialize it using init_process_group()' if cdb.has_all_gather_into_tensor(): return all_gather_into_tensor(output_tensor, input_tensor, group=group, async_op=async_op, debug=debug) else: if get_rank() == 0: utils.logger.warning_once("unable to find torch.distributed.all_gather_into_tensor. will fall back to " "torch.distributed.all_gather which will result in suboptimal performance. " "please consider upgrading your pytorch installation.") output_tensors = list(torch.chunk(output_tensor, cdb.get_world_size(group))) return all_gather(output_tensors, input_tensor, group=group, async_op=async_op, debug=debug) @timed_op def all_to_all_single(output, tensor, output_split_sizes=None, input_split_sizes=None, group=None, async_op=False, prof=False, log_name='all_to_all_single', debug=get_caller_func()): global cdb return cdb.all_to_all_single(output=output, input=tensor, output_split_sizes=output_split_sizes, input_split_sizes=input_split_sizes, group=group, async_op=async_op) @timed_op def send(tensor, dst, group=None, tag=0, prof=False, log_name='send', debug=get_caller_func()): global cdb return cdb.send(tensor=tensor, dst=dst, group=group, tag=tag) @timed_op def recv(tensor, src=None, group=None, tag=0, prof=False, log_name='recv', debug=get_caller_func()): global cdb return cdb.recv(tensor=tensor, src=src, group=group, tag=tag) @timed_op def isend(tensor, dst, group=None, tag=0, prof=False, log_name='isend', debug=get_caller_func()): global cdb return cdb.send(tensor=tensor, dst=dst, group=group, tag=tag) @timed_op def irecv(tensor, src=None, group=None, tag=0, prof=False, log_name='irecv', debug=get_caller_func()): global cdb return cdb.recv(tensor=tensor, src=src, group=group, tag=tag) @timed_op def gather(tensor, gather_list=None, dst=0, group=None, async_op=False, prof=False, log_name='gather', debug=get_caller_func()): global cdb return cdb.gather(tensor=tensor, gather_list=gather_list, dst=dst, group=group, async_op=async_op) @timed_op def scatter(tensor, scatter_list=None, src=0, group=None, async_op=False, prof=False, log_name='scatter', debug=get_caller_func()): global cdb return cdb.scatter(tensor=tensor, scatter_list=scatter_list, src=src, group=group, async_op=async_op) @timed_op def barrier(group=None, async_op=False, device_ids=None, prof=False, log_name='barrier', debug=get_caller_func()): global cdb return cdb.barrier(group=group, async_op=async_op) @timed_op def monitored_barrier(group=None, timeout=None, wait_all_ranks=False, prof=False, log_name='monitored_barrier', debug=get_caller_func()): global cdb return cdb.barrier(group=group, timeout=timeout, wait_all_ranks=wait_all_ranks) def log_summary(): global cdb barrier(log_name='log_summary_barrier') if cdb.get_rank() == 0: comms_logger.log_all() barrier(log_name='log_summary_barrier') @timed_op def reduce(tensor, dst, op=ReduceOp.SUM, group=None, async_op=False, prof=False, log_name='reduce', debug=get_caller_func()): global cdb return cdb.reduce(tensor=tensor, dst=dst, op=op, group=group, async_op=async_op) @timed_op def reduce_scatter(output, input_list, op=ReduceOp.SUM, group=None, async_op=False, prof=False, log_name='reduce_scatter', debug=get_caller_func()): global cdb return cdb.reduce_scatter(output=output, input_list=input_list, op=op, group=group, async_op=async_op) def has_all_reduce_coalesced(): """""" global cdb assert cdb is not None and cdb.is_initialized( ), 'DeepSpeed backend not set, please initialize it using init_process_group()' assert cdb.has_all_reduce_coalesced is not None, 'has_all_reduce_coalesced is not yet defined' return cdb.has_all_reduce_coalesced def has_coalescing_manager(): global cdb assert cdb is not None and cdb.is_initialized( ), 'DeepSpeed backend not set, please initialize it using init_process_group()' assert cdb.has_coalescing_manager is not None, 'has_coalescing_manager is not yet defined' return cdb.has_coalescing_manager def all_gather_coalesced(output_tensors, input_tensors, group=None, async_op=False): global cdb assert cdb is not None and cdb.is_initialized( ), 'DeepSpeed backend not set, please initialize it using init_process_group()' return cdb.all_gather_coalesced(output_tensors, input_tensors, group=group, async_op=async_op) @timed_op def all_reduce(tensor, op=ReduceOp.SUM, group=None, async_op=False, prof=False, log_name='all_reduce', debug=get_caller_func()): #if profile_comm: # context of the timers? # timers.start() # TensorBoard logging for comm calls.? global cdb #print(f'op = {op}, cdb= {cdb.name}') return cdb.all_reduce(tensor, op, group, async_op) @timed_op def all_reduce_coalesced(tensors, op=ReduceOp.SUM, group=None, async_op=False, prof=False, log_name='all_reduce', debug=get_caller_func()): global cbd return cdb.all_reduce_coalesced(tensors, op, group, async_op) def get_world_group(): global cdb assert cdb is not None and cdb.is_initialized( ), 'DeepSpeed backend not set, please initialize it using init_process_group()' return cdb.get_world_group() def get_world_size(group=None) -> int: """ Returns the number of processes in the current process group Args: group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. Returns: The world size of the process group -1, if not part of the group """ global cdb assert cdb is not None and cdb.is_initialized( ), 'DeepSpeed backend not set, please initialize it using init_process_group()' return cdb.get_world_size(group) def get_rank(group=None): """ Returns the rank of the current process in the provided ``group`` or the default group if none was provided. Rank is a unique identifier assigned to each process within a distributed process group. They are always consecutive integers ranging from 0 to ``world_size``. Args: group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. Returns: The rank of the process group -1, if not part of the group """ global cdb assert cdb is not None and cdb.is_initialized( ), 'DeepSpeed backend not set, please initialize it using init_process_group()' return cdb.get_rank(group) def get_local_rank(): """ Helper function to get local rank after a backend has been set and initialized Args: None Returns: local rank (= GPU device ID) """ global cdb assert cdb is not None and cdb.is_initialized( ), 'DeepSpeed backend not set, please initialize it using init_process_group()' return get_local_rank_from_launcher() def get_global_rank(group=None, group_rank=0): global cdb assert cdb is not None and cdb.is_initialized( ), 'DeepSpeed backend not set, please initialize it using init_process_group()' return cdb.get_global_rank(group, group_rank) # Main DeepSpeed Comms. public API. def init_distributed(dist_backend=None, auto_mpi_discovery=True, distributed_port=TORCH_DISTRIBUTED_DEFAULT_PORT, verbose=True, timeout=default_pg_timeout, init_method=None, dist_init_required=None, config=None, rank=-1, world_size=-1): ''' Initialize dist backend, potentially performing MPI discovery if needed Arguments: dist_backend: Optional (str). torch distributed backend, e.g., nccl, mpi, gloo auto_mpi_discovery Optional (bool). if distributed environment variables are not set, attempt to discover them from MPI distributed_port: Optional (int). torch distributed backend port verbose: Optional (bool). verbose logging timeout: Optional (timedelta). Timeout for operations executed against the process group. Default value equals 30 minutes. init_method: Optional (string). Torch distributed, URL specifying how to initialize the process group. Default is “env://” if no init_method or store is specified. config: Optional (dict). DeepSpeed configuration for setting up comms options (e.g. Comms profiling) rank: Optional (int). The current manually specified rank. Some init_method like “tcp://” need the rank and world_size as well (see: https://pytorch.org/docs/stable/distributed.html#tcp-initialization) world_size: Optional (int). Desired world_size for the TCP or Shared file-system initialization. ''' global cdb configure(deepspeed_config=config) if dist_init_required is None: dist_init_required = cdb is None or not cdb.is_initialized() if cdb is None: init_deepspeed_backend(get_accelerator().communication_backend_name(), timeout, init_method) set_backend() utils.logger.info(f'cdb={cdb}') if cdb is None and torch.distributed.is_initialized(): # The user initialized torch.dist themselves, create cdb and short-circuit cdb = TorchBackend(dist_backend, timeout, init_method) return if dist_init_required is False: assert ( cdb is not None and cdb.is_initialized() is True ), "Distributed backend is not initialized. Please set dist_init_required to True or initialize before calling deepspeed.initialize()" else: # Initialize torch distributed if needed required_env = ["RANK", "WORLD_SIZE", "MASTER_ADDR", "MASTER_PORT", "LOCAL_RANK"] if auto_mpi_discovery and not all(map(lambda v: v in os.environ, required_env)): if verbose: utils.logger.info("Not using the DeepSpeed or dist launchers, attempting to detect MPI environment...") if in_aml() and not in_dlts(): patch_aml_env_for_torch_nccl_backend(verbose=verbose) elif in_aws_sm(): patch_aws_sm_env_for_torch_nccl_backend(verbose=verbose) else: mpi_discovery(distributed_port=distributed_port, verbose=verbose) if cdb is not None and cdb.is_initialized(): if int(os.getenv('RANK', '0')) == 0: utils.logger.info('Distributed backend already initialized') else: assert isinstance(timeout, timedelta) if dist_backend == None: dist_backend = get_accelerator().communication_backend_name() if int(os.getenv('RANK', '0')) == 0: utils.logger.info('Initializing TorchBackend in DeepSpeed with backend {}'.format(dist_backend)) # Create a torch backend object, initialize torch distributed, and assign to cdb cdb = TorchBackend(dist_backend, timeout, init_method, rank, world_size) def mpi_discovery(distributed_port=TORCH_DISTRIBUTED_DEFAULT_PORT, verbose=True): ''' Discovery MPI environment via mpi4py and map to relevant dist state ''' from mpi4py import MPI import subprocess comm = MPI.COMM_WORLD rank = comm.Get_rank() world_size = comm.Get_size() master_addr = None if rank == 0: hostname_cmd = ["hostname -I"] result = subprocess.check_output(hostname_cmd, shell=True) master_addr = result.decode('utf-8').split()[0] master_addr = comm.bcast(master_addr, root=0) # Determine local rank by assuming hostnames are unique proc_name = MPI.Get_processor_name() all_procs = comm.allgather(proc_name) local_rank = sum([i == proc_name for i in all_procs[:rank]]) os.environ['RANK'] = str(rank) os.environ['WORLD_SIZE'] = str(world_size) os.environ['LOCAL_RANK'] = str(local_rank) os.environ['MASTER_ADDR'] = master_addr os.environ['MASTER_PORT'] = str(distributed_port) if verbose: utils.logger.info( "Discovered MPI settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}". format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])) if cdb is not None and cdb.is_initialized(): assert cdb.get_rank() == rank, "MPI rank {} does not match torch rank {}".format(rank, cdb.get_rank()) assert cdb.get_world_size() == world_size, "MPI world size {} does not match torch world size {}".format( world_size, cdb.get_world_size()) def in_aml(): # Are we running inside an Azure Machine Learning (AML) environment? return 'AZUREML_EXPERIMENT_ID' in os.environ def in_aws_sm(): # Are we running inside an AWS SageMaker environment? return 'SM_TRAINING_ENV' in os.environ def in_dlts(): # Are we running on a DLTS cluster? return 'DLTS_JOB_ID' in os.environ def patch_aml_env_for_torch_nccl_backend(master_port=6105, verbose=True): """Helper routine to get and set environment variables. This is adapted from Azure ML's documentation available from: https://azure.github.io/azureml-web/docs/cheatsheet/distributed-training/#environment-variables-from-openmpi """ os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"] os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"] single_node = int(os.environ["OMPI_COMM_WORLD_LOCAL_SIZE"]) == int(os.environ["WORLD_SIZE"]) if not single_node: master_node_params = os.environ["AZ_BATCH_MASTER_NODE"].split(":") os.environ["MASTER_ADDR"] = master_node_params[0] # Do not overwrite master port with that defined in AZ_BATCH_MASTER_NODE if "MASTER_PORT" not in os.environ: os.environ["MASTER_PORT"] = str(master_port) else: os.environ["MASTER_ADDR"] = os.environ["AZ_BATCHAI_MPI_MASTER_NODE"] os.environ["MASTER_PORT"] = DEFAULT_AML_MASTER_PORT if verbose: utils.logger.info("NCCL_SOCKET_IFNAME original value = {}".format(os.environ["NCCL_SOCKET_IFNAME"])) os.environ["NCCL_SOCKET_IFNAME"] = DEFAULT_AML_NCCL_SOCKET_IFNAME os.environ['LOCAL_RANK'] = os.environ["OMPI_COMM_WORLD_LOCAL_RANK"] if verbose: utils.logger.info( "Discovered AzureML settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}" .format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'], os.environ['MASTER_PORT'])) def patch_aws_sm_env_for_torch_nccl_backend(verbose=True): """Helper routine to get and set environment variables when running inside an AWS SageMaker environment. """ os.environ["RANK"] = os.environ["OMPI_COMM_WORLD_RANK"] os.environ['LOCAL_RANK'] = os.environ["OMPI_COMM_WORLD_LOCAL_RANK"] os.environ["WORLD_SIZE"] = os.environ["OMPI_COMM_WORLD_SIZE"] if verbose: utils.logger.info( "Discovered AWS SageMaker settings of world_rank={}, local_rank={}, world_size={}, master_addr={}, master_port={}" .format(os.environ['RANK'], os.environ['LOCAL_RANK'], os.environ['WORLD_SIZE'], os.environ['MASTER_ADDR'], os.environ['MASTER_PORT']))
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from enum import Enum class ReduceOp(Enum): SUM = 0 PRODUCT = 1 MIN = 2 MAX = 3 BAND = 4 BOR = 5 BXOR = 6 AVG = 7 UNUSED = 8
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team NCCL_BACKEND = 'nccl' CCL_BACKEND = 'ccl' MPI_BACKEND = 'mpi' GLOO_BACKEND = 'gloo' SCCL_BACKEND = 'sccl' DEFAULT_AML_MASTER_PORT = "54965" DEFAULT_AML_NCCL_SOCKET_IFNAME = "^docker0,lo" ######################################### # Comms Logger ######################################### # Comms Logger. By default, this feature is not enabled. # Users can configure in ds_config.json as below example: COMMS_LOGGER_FORMAT = ''' The Comms Logger can be specified as: "comms_logger": { "enabled": true, "verbose": false, "prof_all": true, "debug": false, "prof_ops": ["all_reduce", "custom_all_reduce_name"] } ''' COMMS_LOGGER = "comms_logger" # Comms logger enable signal COMMS_LOGGER_ENABLED = "enabled" COMMS_LOGGER_ENABLED_DEFAULT = False # Comms logger verbose signal COMMS_LOGGER_VERBOSE = "verbose" COMMS_LOGGER_VERBOSE_DEFAULT = False # comms logger profile all ops signal COMMS_LOGGER_PROF_ALL = "prof_all" COMMS_LOGGER_PROF_ALL_DEFAULT = True # comms logger show all ops signal COMMS_LOGGER_DEBUG = "debug" COMMS_LOGGER_DEBUG_DEFAULT = False # comms logger profile specific ops in list COMMS_LOGGER_PROF_OPS = "prof_ops" COMMS_LOGGER_PROF_OPS_DEFAULT = []
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team ''' Copyright 2021 The Microsoft DeepSpeed Team ''' import torch from deepspeed.accelerator import get_accelerator from .reduce_op import ReduceOp from .torch import TorchBackend def build_ccl_op(): builder = get_accelerator().create_op_builder("CCLCommBuilder") if builder == None: return None ccl_cpp_module = builder.load() print(f'DeepSpeed {builder.absolute_name()} built successfully') return ccl_cpp_module class CCLBackend(TorchBackend): def __init__(self, name='ccl', rank=-1, world_size=-1, mpu=None, timeout=None, init_method=None): super(CCLBackend, self).__init__(backend='ccl', name='torch', rank=rank, world_size=world_size, timeout=timeout, init_method=init_method) self.name = 'ccl' self.ccl_comm_op = build_ccl_op() if self.ccl_comm_op == None: # set CCLBackend to uninitialized state if CCLCommBuilder cannot be loaded self.initialized = False return size = self.get_world_size() rank = self.get_rank() main_kvs = self.ccl_comm_op.get_kvs_addr(rank) main_kvs = torch.tensor(main_kvs).to(torch.uint8) super(CCLBackend, self).broadcast(main_kvs, 0) self.ccl_comm_op.initialize(size, rank, main_kvs) self.initialized = True def is_initialized(self): return self.initialized def broadcast(self, tensor, src, group=None, async_op=False): self.ccl_comm_op.broadcast(tensor, src, group, async_op) def all_reduce(self, tensor, op=ReduceOp.SUM, group=None, async_op=False): use_caching = False if use_caching: match_id = f"{tensor.size()}-{op}" self.ccl_comm_op.all_reduce_caching(tensor, op, match_id, group, async_op) else: self.ccl_comm_op.all_reduce(tensor, op, group, async_op) def barrier(self, group=None, async_op=False): self.ccl_comm_op.barrier(group, async_op)
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from .utils import * from .comm import *
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import os import inspect from deepspeed.utils import get_caller_func def get_local_rank_from_launcher(): # DeepSpeed launcher will set it so get from there rank = os.environ.get('LOCAL_RANK') if rank is None: rank = os.environ.get('OMPI_COMM_WORLD_LOCAL_RANK') # Make it a single process job and set rank to 0 if rank is None: rank = 0 return int(rank) def get_world_rank_from_launcher(): # DeepSpeed launcher will set it so get from there rank = os.environ.get('RANK') if rank is None: rank = os.environ.get('OMPI_COMM_WORLD_RANK') # Make it a single process job and set rank to 0 if rank is None: rank = 0 return int(rank) def get_world_size_from_launcher(): # DeepSpeed launcher will set it so get from there size = os.environ.get('WORLD_SIZE') rank = os.environ.get('RANK') if size is None: size = os.environ.get('OMPI_COMM_WORLD_SIZE') # Make it a single process job and set size to 1 if size is None: size = 1 if rank == 0: print(f"set world size to {size}") return int(size) def get_default_args(func): signature = inspect.signature(func) return {k: v.default for k, v in signature.parameters.items() if v.default is not inspect.Parameter.empty} # We need this hacky function since torch doesn't consistently name or place the input tensor args def get_tensor_position(func): sig_params = inspect.signature(func).parameters arg = None # most colls if 'tensor' in sig_params: arg = 'tensor' # reduce scatter coll elif 'input_list' in sig_params: arg = 'input_list' # all_to_all and torch multiGPU colls elif 'input_tensor_list' in sig_params: arg = 'input_tensor_list' if arg is None: return -1 else: return list(sig_params).index(arg) def get_tensor_kwarg(func, kwargs): func_args = get_default_args(func) func_args.update(kwargs) arg = None if 'tensor' in func_args: arg = func_args['tensor'] elif 'input_list' in func_args: arg = func_args['input_list'] elif 'input_tensor_list' in func_args: arg = func_args['input_tensor_list'] return arg def get_msg_size_from_args(func, *args, **kwargs): # 3 cases: # - tensor arg is in args # - tensor arg is in kwargs # - tensor arg is not present (e.g. barrier) tensor_arg_position = -1 tensor_arg = None # check if tensor arg is in args if len(args) > 0: tensor_arg_position = get_tensor_position(func) if tensor_arg_position > -1: tensor_arg = args[get_tensor_position(func)] # check if tensor arg is in kwargs if tensor_arg is None and len(kwargs) > 0: tensor_arg = get_tensor_kwarg(func, kwargs) # if tensor arg is not present, no data is being transmitted if tensor_arg is None: return 0 else: # Sum of tensor sizes for list colls such as torch's all_to_all # NOTE: msg_size for list colls will not be the actual size transmitted by a given MPI/NCCL call within the coll op. Instead, it's the total amount of data transmitted. if type(tensor_arg) is list: return sum(x.element_size() * x.nelement() for x in tensor_arg) else: return tensor_arg.element_size() * tensor_arg.nelement() def get_debug_log_name(func_args, debug): if debug: return func_args['log_name'] + ' | [Caller Func: ' + get_caller_func() + ']' else: return func_args['log_name']
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from deepspeed import utils from .utils import * from .backend import * from .comm import * import torch def is_torch_two(): TORCH_MAJOR = int(torch.__version__.split('.')[0]) if TORCH_MAJOR >= 2: return True else: return False def has_coalescing_manager(): has_c10d = hasattr(torch.distributed, 'distributed_c10d') return has_c10d and hasattr(torch.distributed.distributed_c10d, '_coalescing_manager') def has_all_reduce_coalesced(): return hasattr(torch.distributed, "all_reduce_coalesced") def get_coalescing_manager(group, device, reqs): if is_torch_two(): return torch.distributed.distributed_c10d._coalescing_manager(group, device=device, reqs=reqs) else: return torch.distributed.distributed_c10d._coalescing_manager(group, reqs) class TorchBackend(Backend): """ A light-weight wrapper class for torch.distributed API. Only a subset of functions are wrapped. Once the init_process_group is initialized, standard torch.distributed.* can be used directly so no need to wrap all the functions. We can keep adding wrappers as needed. """ def __init__(self, backend, timeout, init_method, rank=-1, world_size=-1, name='torch'): super(TorchBackend, self).__init__() self.has_all_reduce_coalesced = has_all_reduce_coalesced() self.has_coalescing_manager = has_coalescing_manager() self.all_gather_function = self.get_all_gather_function() self.reduce_scatter_function = self.get_reduce_scatter_function() self.initialized = True self.name = name # Future functionality to support ds.initialize() on a single GPU # The idea is to fake that dist backend is initialized even when # it is not so we can run on a single GPU without doing any init_process_group self.single_gpu_mode = True self.init_process_group(backend, timeout, init_method, rank, world_size) @classmethod def get_all_gather_function(self): if hasattr(torch.distributed, "all_gather_into_tensor"): return torch.distributed.all_gather_into_tensor elif hasattr(torch.distributed, "_all_gather_base"): return torch.distributed._all_gather_base return None @classmethod def get_reduce_scatter_function(self): if hasattr(torch.distributed, "reduce_scatter_tensor"): return torch.distributed.reduce_scatter_tensor elif hasattr(torch.distributed, "_reduce_scatter_base"): return torch.distributed._reduce_scatter_base return None def has_all_gather_into_tensor(self): return self.all_gather_function is not None def has_reduce_scatter_tensor(self): return self.reduce_scatter_function is not None def init_process_group(self, backend, timeout, init_method, rank, world_size): if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend, timeout=timeout, init_method=init_method, rank=rank, world_size=world_size) self.using_mpi = torch.distributed.get_backend() == 'mpi' def all_reduce(self, tensor, op=torch.distributed.ReduceOp.SUM, group=None, async_op=False): op = self._reduce_op(op) return torch.distributed.all_reduce(tensor=tensor, op=op, group=group, async_op=async_op) def all_reduce_coalesced(self, tensors, op=torch.distributed.ReduceOp.SUM, group=None, async_op=False): """ proxy func to torch.distributed.all_reduce_coalesced, which is included in PyTorch 1.13 and above """ if not self.has_all_reduce_coalesced: raise RuntimeError(f"Current torch version does not have all_reduce_coalesced " f"api (torch.__version__: {torch.__version__})") op = self._reduce_op(op) return torch.distributed.all_reduce_coalesced(tensors=tensors, op=op, group=group, async_op=async_op) def reduce(self, tensor, dst, op=ReduceOp.SUM, group=None, async_op=False): return torch.distributed.reduce(tensor=tensor, dst=dst, op=self._reduce_op(op), group=group, async_op=async_op) def reduce_scatter(self, output, input_list, op=ReduceOp.SUM, group=None, async_op=False): return torch.distributed.reduce_scatter(output=output, input_list=input_list, op=self._reduce_op(op), group=group, async_op=async_op) def broadcast(self, tensor, src, group=None, async_op=False): return torch.distributed.broadcast(tensor=tensor, src=src, group=group, async_op=async_op) def all_gather(self, tensor_list, tensor, group=None, async_op=False): return torch.distributed.all_gather(tensor_list=tensor_list, tensor=tensor, group=group, async_op=async_op) def all_gather_into_tensor(self, output_tensor, input_tensor, group=None, async_op=False): if self.has_all_gather_into_tensor(): return self.all_gather_function(output_tensor=output_tensor, input_tensor=input_tensor, group=group, async_op=async_op) else: utils.logger.warning("unable to find torch.distributed._all_gather_base. will fall back to " "torch.distributed.all_gather which will result in suboptimal performance. " "please consider upgrading your pytorch installation.") pass def all_gather_coalesced(self, output_tensors, input_tensors, group=None, async_op=False): """""" assert len(output_tensors) == len(input_tensors), "" if hasattr(torch.distributed.distributed_c10d, '_all_gather_base_coalesced'): # customized PyTorch return torch.distributed.distributed_c10d._all_gather_base_coalesced(output_tensors, input_tensors, group=group, async_op=async_op) elif has_coalescing_manager(): reqs = [] with get_coalescing_manager(group, input_tensors[0].device, reqs): for output, input in zip(output_tensors, input_tensors): handle = torch.distributed.distributed_c10d.all_gather_into_tensor(output, input, group=group, async_op=True) reqs.append(handle) if async_op: return reqs[-1] else: reqs[-1].wait() def reduce_scatter_tensor(self, output_tensor, input_tensor, op=ReduceOp.SUM, group=None, async_op=False): if self.has_reduce_scatter_tensor(): return self.reduce_scatter_function(output_tensor, input_tensor, op=self._reduce_op(op), group=group, async_op=async_op) else: utils.logger.warning("unable to find torch.distributed.reduce_scatter_tensor. will fall back to " "torch.distributed.reduce_scatter which will result in suboptimal performance. " "please consider upgrading your pytorch installation.") pass def all_to_all_single(self, output, input, output_split_sizes=None, input_split_sizes=None, group=None, async_op=False): return torch.distributed.all_to_all_single(output=output, input=input, output_split_sizes=output_split_sizes, input_split_sizes=input_split_sizes, group=group, async_op=async_op) def send(self, tensor, dst, group=None, tag=0): return torch.distributed.send(tensor=tensor, dst=dst, group=group, tag=tag) def recv(self, tensor, src=None, group=None, tag=0): return torch.distributed.recv(tensor=tensor, src=src, group=group, tag=tag) def isend(self, tensor, dst, group=None, tag=0): return torch.distributed.isend(tensor=tensor, dst=dst, group=group, tag=tag) def irecv(self, tensor, src=None, group=None, tag=0): return torch.distributed.irecv(tensor=tensor, src=src, group=group, tag=tag) def gather(self, tensor, gather_list=None, dst=0, group=None, async_op=False): return torch.distributed.gather(tensor=tensor, gather_list=gather_list, dst=dst, group=group, async_op=async_op) def scatter(self, tensor, scatter_list=None, src=0, group=None, async_op=False): return torch.distributed.scatter(tensor=tensor, scatter_list=scatter_list, src=src, group=group, async_op=async_op) def barrier(self, group=torch.distributed.GroupMember.WORLD, async_op=False, device_ids=None): if group is None: group = torch.distributed.GroupMember.WORLD return torch.distributed.barrier(group=group, async_op=async_op, device_ids=device_ids) def monitored_barrier(self, group=torch.distributed.GroupMember.WORLD, timeout=None, wait_all_ranks=False): if group is None: group = torch.distributed.GroupMember.WORLD return torch.distributed.monitored_barrier(group=group, timeout=timeout, wait_all_ranks=wait_all_ranks) def get_rank(self, group=None): return torch.distributed.get_rank(group=group) def get_world_size(self, group=None): return torch.distributed.get_world_size(group=group) def is_initialized(self): return torch.distributed.is_initialized() def get_backend(self, group=None): return torch.distributed.get_backend(group=group) def new_group(self, ranks): return torch.distributed.new_group(ranks) def get_global_rank(self, group, group_rank): if hasattr(torch.distributed.distributed_c10d, "get_global_rank"): from torch.distributed.distributed_c10d import get_global_rank as _get_global_rank else: from torch.distributed.distributed_c10d import _get_global_rank return _get_global_rank(group, group_rank) def get_world_group(self): return torch.distributed.group.WORLD def destroy_process_group(self, group=None): return torch.distributed.destroy_process_group(group=group) def _reduce_op(self, op): ''' Helper function. If the op provided is not a torch.dist.ReduceOp, convert it and return ''' if not isinstance(op, torch.distributed.ReduceOp): if op == ReduceOp.SUM: op = torch.distributed.ReduceOp.SUM elif op == ReduceOp.PRODUCT: op = torch.distributed.ReduceOp.PRODUCT elif op == ReduceOp.AVG: op = torch.distributed.ReduceOp.AVG elif op == ReduceOp.MIN: op = torch.distributed.ReduceOp.MIN elif op == ReduceOp.MAX: op = torch.distributed.ReduceOp.MAX elif op == ReduceOp.BAND: op = torch.distributed.ReduceOp.BAND elif op == ReduceOp.BOR: op = torch.distributed.ReduceOp.BOR elif op == ReduceOp.BXOR: op = torch.distributed.ReduceOp.BXOR return op # This will become a light-weight wrapper around torch.distributed functions # TODO: create some example to show how this wrapper can help profile communication # TODO: make sure there is no performance regression with this approach # TODO: explore monkey-patching if this does not work
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import math from deepspeed.utils import log_dist def get_caller_func(frame=3): import sys return sys._getframe(frame).f_code.co_name # Helper function to pretty-print message sizes def convert_size(size_bytes): if size_bytes == 0: return "0B" size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB") i = int(math.floor(math.log(size_bytes, 1024))) p = math.pow(1024, i) s = round(size_bytes / p, 2) return "%s %s" % (s, size_name[i]) # Helper function to calculate algbw and busbw. # See https://gist.github.com/jeffra/b5e80466b4c86be00ea3b6f130fb7a36 and https://github.com/NVIDIA/nccl-tests/blob/master/doc/PERFORMANCE.md def calc_bw_log(comm_op, size, duration): import deepspeed.comm as dist n = dist.get_world_size() tput = 0 busbw = 0 if comm_op == "all_to_all_single": tput = (size / duration) busbw = (size / duration) * ((n - 1) / n) elif comm_op == "all_gather" or comm_op == "all_gather_into_tensor" or comm_op == "reduce_scatter" or comm_op == "reduce_scatter_tensor": size *= n tput = (size / duration) busbw = (size / duration) * ((n - 1) / n) elif comm_op == "all_reduce": tput = (size * 2 / duration) busbw = (size / duration) * (2 * (n - 1) / n) elif comm_op == "send" or comm_op == "recv" or comm_op == "isend" or comm_op == "irecv" or comm_op == "broadcast" or comm_op == "reduce" or comm_op == "gather" or comm_op == "scatter" or comm_op == "barrier": tput = (size / duration) busbw = tput else: print_rank_0("wrong comm_op specified") # noqa: F821 exit(0) # convert to Gbps tput *= 8 busbw *= 8 tput /= 1e6 busbw /= 1e6 return tput, busbw class CommsLogger: def __init__(self): from deepspeed.comm.constants import COMMS_LOGGER_VERBOSE_DEFAULT, COMMS_LOGGER_DEBUG_DEFAULT, COMMS_LOGGER_PROF_OPS_DEFAULT, COMMS_LOGGER_PROF_ALL_DEFAULT, COMMS_LOGGER_ENABLED_DEFAULT self.comms_dict = {} self.verbose = COMMS_LOGGER_VERBOSE_DEFAULT self.debug = COMMS_LOGGER_DEBUG_DEFAULT self.prof_ops = COMMS_LOGGER_PROF_OPS_DEFAULT self.prof_all = COMMS_LOGGER_PROF_ALL_DEFAULT self.enabled = COMMS_LOGGER_ENABLED_DEFAULT def configure(self, comms_config): self.enabled = comms_config.comms_logger_enabled if self.enabled: self.verbose = comms_config.comms_logger.verbose self.debug = comms_config.comms_logger.debug self.prof_ops = comms_config.comms_logger.prof_ops self.prof_all = comms_config.comms_logger.prof_all # There are three settings for the op profiler: # - Global profiling (profile all comms) # - Op-type profiling (e.g. profile all all_reduce comms) # - Op profiling (e.g. profile a specific all_reduce op) def start_profiling_comms(self): self.prof_all = True def stop_profiling_comms(self): self.prof_all = True # E.g. start_profiling_op('all_reduce') def start_profiling_op(self, op_name_list): self.prof_ops = list(set(self.prof_ops) | set(op_name_list)) def stop_profiling_op(self, op_name_list): self.prof_ops = [op for op in self.prof_ops if op not in op_name_list] # Add log entry def append(self, raw_name, record_name, latency, msg_size): import deepspeed.comm as dist algbw, busbw = calc_bw_log(raw_name, msg_size, latency) if record_name in self.comms_dict.keys(): # If this comm_op has already been logged with this message size, just add to existing record if msg_size in self.comms_dict[record_name].keys(): self.comms_dict[record_name][msg_size][0] += 1 self.comms_dict[record_name][msg_size][1].append(latency) self.comms_dict[record_name][msg_size][2].append(algbw) self.comms_dict[record_name][msg_size][3].append(busbw) # If this is a new message size for this comm_op, add new record under existing comm_op else: self.comms_dict[record_name][msg_size] = [1, [latency], [algbw], [busbw]] else: # Create entirely new record self.comms_dict[record_name] = {msg_size: [1, [latency], [algbw], [busbw]]} # If verbose, print every comm op # TODO: Add to tensorboard if self.verbose: n = dist.get_world_size() log_str = f"rank={dist.get_rank()} | comm op: " + record_name + " | time (ms): {:.2f}".format(latency) log_str += " | msg size: " + convert_size(msg_size) log_str += " | algbw (Gbps): {:.2f} ".format(algbw) log_str += " | busbw (Gbps): {:.2f} ".format(busbw) log_dist(log_str, [0]) # Print summary at end of iteration, epoch, or training def log_all(self): from deepspeed.utils.timer import trim_mean print( f"{'Comm. Op': <20}{'Message Size': <20}{'Count': <20}{'Total Latency(ms)': <20}{'Avg Latency(ms)': <20}{'tput_avg (Gbps)': <20}{'busbw_avg (Gbps)': <20}" ) for record_name in self.comms_dict.keys(): print(record_name) for msg_size, vals in sorted(self.comms_dict[record_name].items()): # vals[0] is the count for each msg size count = vals[0] # vals[1] is a list of latency records for each msg size total_lat = sum(vals[1]) # vals[2] and vals[3] are the lists of algbw and busbw, respectively # Get rid of outliers when we print avg_lat = trim_mean(vals[1], 0.1) avg_algbw = trim_mean(vals[2], 0.1) avg_busbw = trim_mean(vals[3], 0.1) print( f"{' ': <20}{convert_size(msg_size): <20}{count: <20}{total_lat: <20.2f}{avg_lat: <20.2f}{avg_algbw: <20.2f}{avg_busbw: <20.2f}" )
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import functools import logging import sys import os log_levels = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } class LoggerFactory: @staticmethod def create_logger(name=None, level=logging.INFO): """create a logger Args: name (str): name of the logger level: level of logger Raises: ValueError is name is None """ if name is None: raise ValueError("name for logger cannot be None") formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] " "[%(filename)s:%(lineno)d:%(funcName)s] %(message)s") logger_ = logging.getLogger(name) logger_.setLevel(level) logger_.propagate = False ch = logging.StreamHandler(stream=sys.stdout) ch.setLevel(level) ch.setFormatter(formatter) logger_.addHandler(ch) return logger_ logger = LoggerFactory.create_logger(name="DeepSpeed", level=logging.INFO) @functools.lru_cache(None) def warning_once(*args, **kwargs): """ This method is identical to `logger.warning()`, but will emit the warning with the same message only once Note: The cache is for the function arguments, so 2 different callers using the same arguments will hit the cache. The assumption here is that all warning messages are unique across the code. If they aren't then need to switch to another type of cache that includes the caller frame information in the hashing function. """ logger.warning(*args, **kwargs) logger.warning_once = warning_once def print_configuration(args, name): logger.info("{}:".format(name)) for arg in sorted(vars(args)): dots = "." * (29 - len(arg)) logger.info(" {} {} {}".format(arg, dots, getattr(args, arg))) def log_dist(message, ranks=None, level=logging.INFO): from deepspeed import comm as dist """Log message when one of following condition meets + not dist.is_initialized() + dist.get_rank() in ranks if ranks is not None or ranks = [-1] Args: message (str) ranks (list) level (int) """ should_log = not dist.is_initialized() ranks = ranks or [] my_rank = dist.get_rank() if dist.is_initialized() else -1 if ranks and not should_log: should_log = ranks[0] == -1 should_log = should_log or (my_rank in set(ranks)) if should_log: final_message = "[Rank {}] {}".format(my_rank, message) logger.log(level, final_message) def print_json_dist(message, ranks=None, path=None): from deepspeed import comm as dist """Print message when one of following condition meets + not dist.is_initialized() + dist.get_rank() in ranks if ranks is not None or ranks = [-1] Args: message (str) ranks (list) path (str) """ should_log = not dist.is_initialized() ranks = ranks or [] my_rank = dist.get_rank() if dist.is_initialized() else -1 if ranks and not should_log: should_log = ranks[0] == -1 should_log = should_log or (my_rank in set(ranks)) if should_log: message['rank'] = my_rank import json with open(path, 'w') as outfile: json.dump(message, outfile) os.fsync(outfile) def get_current_level(): """ Return logger's current log level """ return logger.getEffectiveLevel() def should_log_le(max_log_level_str): """ Args: max_log_level_str: maximum log level as a string Returns ``True`` if the current log_level is less or equal to the specified log level. Otherwise ``False``. Example: ``should_log_le("info")`` will return ``True`` if the current log level is either ``logging.INFO`` or ``logging.DEBUG`` """ if not isinstance(max_log_level_str, str): raise ValueError(f"{max_log_level_str} is not a string") max_log_level_str = max_log_level_str.lower() if max_log_level_str not in log_levels: raise ValueError(f"{max_log_level_str} is not one of the `logging` levels") return get_current_level() <= log_levels[max_log_level_str]
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch from typing import Callable from torch import Tensor from packaging import version as pkg_version class OnDevice(object): """ Create modules/tensors w. specific devices and dtypes. Examples: Create MyModule which consists of many different sub-modules and parameters. In this case we can create MyModule as a collection of 'meta' tensors by passing `device='meta'` or we can create the module _directly_ on a CUDA device by passing `device=f'cuda:{local_rank}'` (where `local_rank` is the local GPU id. with OnDevice(dtype=torch.float16, device='meta'): model = MyModel() with OnDevice(dtype=torch.float16, device=f'cuda:{local_rank}'): model = MyModel() """ _orig_torch_empty = torch.empty _orig_torch_zeros = torch.zeros _orig_torch_ones = torch.ones _orig_torch_full = torch.full def __init__(self, dtype, device="meta", enabled=True): self.dtype = dtype self.enabled = enabled self.device = device if device == "meta": if pkg_version.parse('1.10') > pkg_version.parse(torch.__version__): raise NotImplementedError("Meta tensor support is not available, please upgrade to torch 1.10+") def fp_tensor_constructor(self, fn: Callable, target_fp_dtype: torch.dtype) -> Callable: def wrapped_fn(*args, **kwargs) -> Tensor: if kwargs.get("device", None) is None: kwargs['device'] = self.device tensor: Tensor = fn(*args, **kwargs) if tensor.is_floating_point(): tensor = tensor.to(target_fp_dtype) return tensor return wrapped_fn def get_new_tensor_fn_for_dtype(self, dtype: torch.dtype) -> Callable: def new_tensor(cls, *args) -> Tensor: tensor = OnDevice._orig_torch_empty(0, device=self.device).new_empty(*args) if tensor.is_floating_point(): tensor = tensor.to(dtype) return tensor return new_tensor def __enter__(self): if not self.enabled: return torch.Tensor.__old_new__ = torch.Tensor.__new__ torch.Tensor.__new__ = self.get_new_tensor_fn_for_dtype(self.dtype) torch.empty = self.fp_tensor_constructor(self._orig_torch_empty, self.dtype) torch.zeros = self.fp_tensor_constructor(self._orig_torch_zeros, self.dtype) torch.ones = self.fp_tensor_constructor(self._orig_torch_ones, self.dtype) torch.full = self.fp_tensor_constructor(self._orig_torch_full, self.dtype) def __exit__(self, exc_type, exc_value, traceback): if not self.enabled: return torch.Tensor.__new__ = torch.Tensor.__old_new__ torch.empty = self._orig_torch_empty torch.zeros = self._orig_torch_zeros torch.ones = self._orig_torch_ones torch.full = self._orig_torch_full
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import time from numpy import mean from deepspeed.utils.logging import log_dist from deepspeed.accelerator import get_accelerator from deepspeed import comm as dist try: import psutil PSUTILS_INSTALLED = True except ImportError: PSUTILS_INSTALLED = False pass class CudaEventTimer(object): def __init__(self, start_event: get_accelerator().Event, end_event: get_accelerator().Event): self.start_event = start_event self.end_event = end_event def get_elapsed_msec(self): get_accelerator().current_stream().wait_event(self.end_event) self.end_event.synchronize() return self.start_event.elapsed_time(self.end_event) class SynchronizedWallClockTimer: """Group of timers. Borrowed from Nvidia Megatron code""" class Timer: """Timer.""" def __init__(self, name): self.name_ = name self.started_ = False self.event_timers = [] self.use_host_timer = get_accelerator().is_synchronized_device() self.start_event = None self.elapsed_records = None self.start_time = 0.0 self.end_time = 0.0 def start(self): """Start the timer.""" assert not self.started_, f"{self.name_} timer has already been started" if self.use_host_timer: self.start_time = time.time() else: event_class = get_accelerator().Event self.start_event = event_class(enable_timing=True) self.start_event.record() self.started_ = True def stop(self, reset=False, record=False): """Stop the timer.""" assert self.started_, "timer is not started" event_class = get_accelerator().Event if self.use_host_timer: self.end_time = time.time() self.event_timers.append(self.end_time - self.start_time) else: event_class = get_accelerator().Event end_event = event_class(enable_timing=True) end_event.record() self.event_timers.append(CudaEventTimer(self.start_event, end_event)) self.start_event = None self.started_ = False def _get_elapsed_msec(self): if self.use_host_timer: self.elapsed_records = [et * 1000.0 for et in self.event_timers] else: self.elapsed_records = [et.get_elapsed_msec() for et in self.event_timers] self.event_timers.clear() return sum(self.elapsed_records) def reset(self): """Reset timer.""" self.started_ = False self.start_event = None self.elapsed_records = None self.event_timers.clear() def elapsed(self, reset=True): """Calculate the elapsed time.""" started_ = self.started_ # If the timing in progress, end it first. if self.started_: self.stop() # Get the elapsed time. elapsed_ = self._get_elapsed_msec() # Reset the elapsed time if reset: self.reset() # If timing was in progress, set it back. if started_: self.start() return elapsed_ def mean(self): self.elapsed(reset=False) return trim_mean(self.elapsed_records, 0.1) def __init__(self): self.timers = {} def get_timers(self): return self.timers def __call__(self, name): if name not in self.timers: self.timers[name] = self.Timer(name) return self.timers[name] @staticmethod def memory_usage(): alloc = "mem_allocated: {:.4f} GB".format(get_accelerator().memory_allocated() / (1024 * 1024 * 1024)) max_alloc = "max_mem_allocated: {:.4f} GB".format(get_accelerator().max_memory_allocated() / (1024 * 1024 * 1024)) cache = "cache_allocated: {:.4f} GB".format(get_accelerator().memory_cached() / (1024 * 1024 * 1024)) max_cache = "max_cache_allocated: {:.4f} GB".format(get_accelerator().max_memory_cached() / (1024 * 1024 * 1024)) return " | {} | {} | {} | {}".format(alloc, max_alloc, cache, max_cache) def log(self, names, normalizer=1.0, reset=True, memory_breakdown=False, ranks=None): """Log a group of timers.""" assert normalizer > 0.0 string = f"rank={dist.get_rank()} time (ms)" for name in names: if name in self.timers: elapsed_time = (self.timers[name].elapsed(reset=reset) / normalizer) string += " | {}: {:.2f}".format(name, elapsed_time) log_dist(string, ranks=ranks or [0]) def get_mean(self, names, normalizer=1.0, reset=True): """Get the mean of a group of timers.""" assert normalizer > 0.0 means = {} for name in names: if name in self.timers: elapsed_time = (self.timers[name].mean() * 1000.0 / normalizer) means[name] = elapsed_time return means class ThroughputTimer: def __init__( self, batch_size, start_step=2, steps_per_output=50, monitor_memory=False, logging_fn=None, ): from deepspeed.utils import logger self.start_time = 0 self.end_time = 0 self.started = False self.batch_size = 1 if batch_size is None else batch_size self.start_step = start_step self.epoch_count = 0 self.micro_step_count = 0 self.global_step_count = 0 self.total_elapsed_time = 0 self.step_elapsed_time = 0 self.steps_per_output = steps_per_output self.monitor_memory = monitor_memory self.logging = logging_fn if self.logging is None: self.logging = logger.info self.initialized = False if self.monitor_memory and not PSUTILS_INSTALLED: raise ImportError("Unable to import 'psutils', please install package") def update_epoch_count(self): self.epoch_count += 1 self.micro_step_count = 0 def _init_timer(self): self.initialized = True def start(self): self._init_timer() self.started = True if self.global_step_count >= self.start_step: get_accelerator().synchronize() self.start_time = time.time() def stop(self, global_step=False, report_speed=True): if not self.started: return self.started = False self.micro_step_count += 1 if global_step: self.global_step_count += 1 if self.start_time > 0: get_accelerator().synchronize() self.end_time = time.time() duration = self.end_time - self.start_time self.total_elapsed_time += duration self.step_elapsed_time += duration if global_step: if report_speed and self.global_step_count % self.steps_per_output == 0: self.logging( "epoch={}/micro_step={}/global_step={}, RunningAvgSamplesPerSec={}, CurrSamplesPerSec={}, " "MemAllocated={}GB, MaxMemAllocated={}GB".format( self.epoch_count, self.micro_step_count, self.global_step_count, self.avg_samples_per_sec(), self.batch_size / self.step_elapsed_time, round(get_accelerator().memory_allocated() / 1024**3, 2), round(get_accelerator().max_memory_allocated() / 1024**3, 2), )) if self.monitor_memory: virt_mem = psutil.virtual_memory() swap = psutil.swap_memory() self.logging("epoch={}/micro_step={}/global_step={}, vm %: {}, swap %: {}".format( self.epoch_count, self.micro_step_count, self.global_step_count, virt_mem.percent, swap.percent, )) self.step_elapsed_time = 0 def avg_samples_per_sec(self): if self.global_step_count > 0: total_step_offset = self.global_step_count - self.start_step avg_time_per_step = self.total_elapsed_time / total_step_offset # training samples per second return self.batch_size / avg_time_per_step return float("-inf") def trim_mean(data, trim_percent): """Compute the trimmed mean of a list of numbers. Args: data (list): List of numbers. trim_percent (float): Percentage of data to trim. Returns: float: Trimmed mean. """ assert trim_percent >= 0.0 and trim_percent <= 1.0 n = len(data) # Account for edge case of empty list if len(data) == 0: return 0 data.sort() k = int(round(n * (trim_percent))) return mean(data[k:n - k])
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from .logging import logger, log_dist from .comms_logging import get_caller_func #from .distributed import init_distributed from .init_on_device import OnDevice from .groups import * from .nvtx import instrument_w_nvtx # TODO: Move tensor fragment and mixed precision to zero utils from .tensor_fragment import tensor_fragment, get_full_hp_param, get_hp_fragment_mapping, fragment_address, get_full_hp_grad from .tensor_fragment import safe_get_full_fp32_param, safe_get_full_grad, safe_get_full_optimizer_state from .mixed_precision_linkage import link_hp_params from deepspeed.runtime.dataloader import RepeatingLoader from .numa import get_numactl_cmd
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from enum import IntEnum class ActivationFuncType(IntEnum): UNKNOWN = 0 GELU = 1 ReLU = 2 GATED_GELU = 3 GATED_SILU = 4 GATED_ACTIVATION_TYPES = [ ActivationFuncType.GATED_GELU, ActivationFuncType.GATED_SILU, ] class NormType(IntEnum): UNKNOWN = 0 LayerNorm = 1 GroupNorm = 2 RMSNorm = 3
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team # The file has been adapted from https://github.com/NVIDIA/Megatron-LM and retains the following license from the original file # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Support different forms of parallelism in DeepSpeed using multiple process groups. Given that there are multiple scenarios and use-cases, this file is going to be updated frequently. For now, the group creation needed for the training scenario is being implemented. For inference and other new scenarios, the code will be either reused or added to this file. """ from deepspeed import comm as dist from deepspeed.utils import log_dist from deepspeed.utils.exceptions import DeprecatedException # Expert parallel group that the current rank belongs to. _EXPERT_PARALLEL_GROUP = {} # Expert data parallel group that the current rank belongs to. _EXPERT_DATA_PARALLEL_GROUP = {} # dist world group needs to be cloned for some cases _WORLD_GROUP = None # global object to maintain mpu object if passed by a Megatron client mpu = None # global object that stores tensor parallel world size for experts expert_tensor_parallel_world_size = 1 # Deprecated groups initialize function. def initialize(ep_size=1, mpu=None): """ Deprecated function. Retained to inform the users.""" raise DeprecatedException( "Please do not use the groups.initialize() API as it is deprecated. Instead, pass the desired ep_size to deepspeed.moe.layer.MoE(..,ep_size,..)" ) def _ensure_divisibility(numerator, denominator): """Ensure that numerator is divisible by the denominator.""" assert numerator % denominator == 0, '{} is not divisible by {}'.format(numerator, denominator) # Not currently used. Helper function to create a model (tensor) parallel group. def _create_model_parallel(model_parallel_size_): """ Initialize model data parallel groups. Arguments: model_parallel_size: number of GPUs used to parallelize model. Returns: Tuple of data parallel group and model parallel group Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we use 2 GPUs to parallelize the model. The present function will create 4 model parallel groups and 2 data parallel groups as: 4 model parallel groups: [g0, g1], [g2, g3], [g4, g5], [g6, g7] 2 data parallel groups: [g0, g2, g4, g6], [g1, g3, g5, g7] Note that for efficiency, the caller should make sure adjacent ranks are on the same DGX box. For example if we are using 2 DGX-1 boxes with a total of 16 GPUs, rank 0 to 7 belong to the first box and ranks 8 to 15 belong to the second box. """ log_dist(f'Creating model parallel group with size {model_parallel_size_}', ranks=[0]) # Get world size and rank. Ensure some consistencies. assert dist.is_initialized() world_size = dist.get_world_size() model_parallel_size = min(model_parallel_size_, world_size) _ensure_divisibility(world_size, model_parallel_size) rank = dist.get_rank() _DATA_PARALLEL_GROUP = None _MODEL_PARALLEL_GROUP = None # Build the data parallel groups. for i in range(model_parallel_size): ranks = range(i, world_size, model_parallel_size) group = dist.new_group(ranks) if i == (rank % model_parallel_size): _DATA_PARALLEL_GROUP = group # Build the model parallel groups. for i in range(world_size // model_parallel_size): ranks = range(i * model_parallel_size, (i + 1) * model_parallel_size) group = dist.new_group(ranks) if i == (rank // model_parallel_size): _MODEL_PARALLEL_GROUP = group return _DATA_PARALLEL_GROUP, _MODEL_PARALLEL_GROUP def _create_expert_and_data_parallel(expert_parallel_size_): """ Create expert and data parallel groups. Note: Caller of this function is responsible to check if the groups already exist. Example - E + D parallel world_size = 16 expert_parallel_size = 2 # number of experts in same group expert_data_parallel_group = [0,2,4,6,8,10,12,14], [1,3,5,7,9,11,13,15] - all reduce is only on MoE params expert_parallel_group = [0, 1], [2,3], [4,5], [6,7], [8,9] - no all reduce, but all to all data_parallel_group = [0,1,...,15] - all reduce is only on non-MoE """ assert dist.is_initialized() log_dist(f'Creating expert and data parallel groups with size {expert_parallel_size_}', ranks=[0]) world_size = dist.get_world_size() rank = dist.get_rank() _ensure_divisibility(world_size, expert_parallel_size_) group_name = f"ep_size_{expert_parallel_size_}" # Build the expert data parallel groups. global _EXPERT_DATA_PARALLEL_GROUP # Only create group if it does not already exist if group_name not in _EXPERT_DATA_PARALLEL_GROUP: for i in range(expert_parallel_size_): ranks = range(i, world_size, expert_parallel_size_) group = dist.new_group(ranks) log_dist(f'Creating expert data parallel process group named {group_name} with ranks: {list(ranks)}', [0]) if i == (rank % expert_parallel_size_): _EXPERT_DATA_PARALLEL_GROUP[group_name] = group # Build the expert parallel groups. global _EXPERT_PARALLEL_GROUP # Only create group if it does not already exist if group_name not in _EXPERT_PARALLEL_GROUP: for i in range(world_size // expert_parallel_size_): ranks = range(i * expert_parallel_size_, (i + 1) * expert_parallel_size_) group = dist.new_group(ranks) log_dist(f'creating expert parallel process group named {group_name} with ranks: {list(ranks)}', [0]) if i == (rank // expert_parallel_size_): _EXPERT_PARALLEL_GROUP[group_name] = group def _get_expert_parallel_ranks(world_size, model_parallel_size_, expert_parallel_size_): """Generate expert parallel and expert data parallel group ranks list. Example - E + M + D parallel world_size = 16 model_degree = 2 expert_degree = 4 # number of experts in same group mp_group = [0, 1], [2,3], [4,5] ... data_parallel_group =[0,2,4,6,8,10, 12,14], [1,3,5,7,9,11,13,15] expert_parallel_group = [0,2,4,6], [8,10,12,14] [1,3,5,7], [9,11,13,15] expert_data_parallel_group = [0,8],[2,10],[4,12],[6,14], [1,9],[3,11],[5,13],[7,15] Args: world_size (int): Distributed world size. model_parallel_size_ (int): Model parallel group size. expert_parallel_size_ (int): Expert parallel group size. Returns: Expert parallel group ranks and Expert data parallel group ranks list. """ _ensure_divisibility(world_size, model_parallel_size_) dp_world_size = world_size // model_parallel_size_ _ensure_divisibility(dp_world_size, expert_parallel_size_) # Generate data parallel groups data_parallel_groups = [] dp_group_size = model_parallel_size_ for i in range(dp_group_size): data_parallel_groups.append(list(range(i, world_size, dp_group_size))) expert_parallel_groups = [] expert_data_parallel_groups = [] for dp_ranks in data_parallel_groups: # partition of expert parallel groups, e.g. [0,2,4,6], [8,10,12,14] part_ep_groups = [] for i in range(0, dp_world_size, expert_parallel_size_): part_ep_groups.append(dp_ranks[i:i + expert_parallel_size_]) expert_parallel_groups.extend(part_ep_groups) # zip part_ep_groups get expert data parallel ranks, e.g [0,8],[2,10],[4,12],[6,14] for expert_dp_ranks in zip(*part_ep_groups): expert_data_parallel_groups.append(list(expert_dp_ranks)) return expert_parallel_groups, expert_data_parallel_groups def _create_expert_data_and_model_parallel(expert_parallel_size_, mpu): """ Create expert and data parallel groups based on MPU (model parallel) group. Note: Caller of this function is responsible to check if the groups already exist. Example - E + M + D parallel world_size = 16 model_degree = 2 expert_degree = 4 # number of experts in same group mp_group = [0, 1], [2,3], [4,5] ... data_parallel_group =[0,2,4,6,8,10, 12,14], [1,3,5,7,9,11,13,15] expert_parallel_group = [0,2,4,6], [8,10,12,14] [1,3,5,7], [9,11,13,15] expert_data_parallel_group = [0,8],[2,10],[4,12],[6,14], [1,9],[3,11],[5,13],[7,15] """ assert dist.is_initialized(), "dist is not initialized" model_parallel_size_ = mpu.get_model_parallel_world_size() global expert_tensor_parallel_world_size expert_tensor_parallel_world_size = model_parallel_size_ world_size = dist.get_world_size() rank = dist.get_rank() dp_world_size = mpu.get_data_parallel_world_size() dp_rank = mpu.get_data_parallel_rank() _ensure_divisibility(world_size, model_parallel_size_) _ensure_divisibility(dp_world_size, expert_parallel_size_) log_dist( f"Creating deepspeed groups with model parallel size {model_parallel_size_}, expert parallel size {expert_parallel_size_}, world size {world_size}, dp world size {dp_world_size}", [0]) global _EXPERT_PARALLEL_GROUP, _EXPERT_DATA_PARALLEL_GROUP # Get world size and rank. Ensure some consistencies. _DATA_PARALLEL_GROUP = mpu.get_data_parallel_group() _MODEL_PARALLEL_GROUP = mpu.get_model_parallel_group() group_name = f"ep_size_{expert_parallel_size_}" # Only create groups if they don't already exist # Need to check conditions outside the group creation loop because of the way torch.dist group creation works if group_name not in _EXPERT_DATA_PARALLEL_GROUP and group_name not in _EXPERT_PARALLEL_GROUP: expert_parallel_groups, expert_data_parallel_groups = _get_expert_parallel_ranks( world_size, model_parallel_size_, expert_parallel_size_) for ranks in expert_parallel_groups: group = dist.new_group(ranks) if rank in list(ranks): _EXPERT_PARALLEL_GROUP[group_name] = group for ranks in expert_data_parallel_groups: group = dist.new_group(ranks) if rank in list(ranks): _EXPERT_DATA_PARALLEL_GROUP[group_name] = group def _get_max_expert_size(): """Get the maximum ep_size from all the created groups.""" assert _EXPERT_PARALLEL_GROUP is not None, "Warning! Process group not initialized" keylist = [] for key in _EXPERT_PARALLEL_GROUP.keys(): # index 2 is ep_size in the group name: ep_size_<ep_size> index = 2 keylist.append(int(key.split('_')[index])) return max(keylist) if len(keylist) > 0 else None def _get_max_expert_size_name(): """Get the name of the group with max. ep_size""" return f'ep_size_{_get_max_expert_size()}' def _get_max_expert_parallel_group(): """Get the max expert parallel size.""" return _get_expert_parallel_group(_get_max_expert_size_name()) def _get_expert_parallel_group(group_name): """Get the expert parallel group the caller rank belongs to.""" assert group_name in _EXPERT_PARALLEL_GROUP, \ 'expert parallel group is not initialized' return _EXPERT_PARALLEL_GROUP[group_name] def _get_expert_parallel_group_dict(): """Get the expert parallel group dict.""" return _EXPERT_PARALLEL_GROUP def _get_expert_data_parallel_group(group_name): """Get the expert data parallel group the caller rank belongs to.""" assert group_name in _EXPERT_DATA_PARALLEL_GROUP, \ 'expert data parallel group is not initialized' return _EXPERT_DATA_PARALLEL_GROUP[group_name] def _get_expert_data_parallel_group_dict(): """Get the expert data parallel group dict.""" return _EXPERT_DATA_PARALLEL_GROUP def _clone_world_group(): """Create a clone of the world group Note: We need to clone the dist world group because we use dist.get_global_rank() utility function in DeepSpeed at many places. As that function does not work on dist.group.WORLD, we need to keep a clone of it. """ assert dist.is_initialized(), "dist is not initialized" global _WORLD_GROUP if _WORLD_GROUP is None: # If not cloned already, clone the world group _WORLD_GROUP = dist.new_group(ranks=range(dist.get_world_size())) return _WORLD_GROUP def _get_data_parallel_group(): """Get the data parallel group the caller rank belongs to.""" assert dist.is_initialized(), \ 'dist is not initialized' global mpu if mpu is not None: return mpu.get_data_parallel_group() # Return the clone of dist world group return _clone_world_group() def _get_broadcast_src_rank(): return dist.get_global_rank(_get_data_parallel_group(), 0) def _get_expert_broadcast_src_rank(group_name): return dist.get_global_rank(_get_expert_data_parallel_group(group_name), 0) def _get_expert_parallel_world_size(group_name): """Return world size for the expert parallel group.""" return dist.get_world_size(group=_get_expert_parallel_group(group_name)) def _get_expert_data_parallel_world_size(group_name): """Return world size for the expert data parallel group.""" return dist.get_world_size(group=_get_expert_data_parallel_group(group_name)) def _get_expert_parallel_rank(group_name): """Return my rank for the expert parallel group.""" return dist.get_rank(group=_get_expert_parallel_group(group_name)) def _get_expert_parallel_src_rank(group_name): """Calculate the global rank corresponding to a local rank zero in the expert parallel group.""" global_rank = dist.get_rank() local_world_size = _get_expert_parallel_world_size(group_name) return (global_rank // local_world_size) * local_world_size def _get_expert_data_parallel_rank(group_name): """Return my rank for the expert data parallel group.""" return dist.get_rank(group=_get_expert_data_parallel_group(group_name)) def _get_data_parallel_world_size(): """Return world size for the data parallel group.""" global mpu if mpu is not None: return mpu.get_data_parallel_world_size() return dist.get_world_size(group=_get_data_parallel_group()) def _get_model_parallel_world_size(): """Return world size for the model parallel group.""" global mpu if mpu is not None: return mpu.get_model_parallel_world_size() return 1 def _get_data_parallel_rank(): """Return my rank for the data parallel group.""" global mpu if mpu is not None: return mpu.get_data_parallel_rank() return dist.get_rank(group=_get_data_parallel_group()) def _get_expert_model_parallel_world_size(): global expert_tensor_parallel_world_size return expert_tensor_parallel_world_size
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team # For lazy import with printflock() fcntl = None # for debug purposes map module and param objects to their fully qualified names module_names = {} param_names = {} def debug_extract_module_and_param_names(model): # extract the fully qualified names as soon as the model is acquired global module_names global param_names # XXX: can probably make a map of param2module and vice-versa module_names = {module: name for name, module in model.named_modules()} param_names = {param: name for name, param in model.named_parameters()} def debug_module2name(module): if module in module_names: return module_names[module] else: return "unknown" def debug_module2name_id(module): return f"name={debug_module2name(module)} id={module.id}" def debug_module2name_class(module): return f"name={debug_module2name(module)} {module.__class__.__name__}" def debug_param2name(param): if param in param_names: return param_names[param] else: return "unknown" def debug_param2name_id(param): return f"name={debug_param2name(param)} id={param.ds_id}" def debug_param2name_id_shape(param): return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape}" def debug_param2name_id_shape_device(param): return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} device={param.device}" def debug_param2name_id_numel(param): return f"name={debug_param2name(param)} id={param.ds_id} numel={param.numel()}" def debug_param2name_id_shape_status(param): return f"name={debug_param2name(param)} id={param.ds_id} shape={param.data.shape} status={param.ds_status}" def printflock(*msgs): """ For printing messages for all concurrent gpus w/o getting interleaved text. This is useful when debugging issues where multi-gpus don't sync. 1. Enable the force debug in say partitioning and zero3 files 2. Override the usual versions with :: def print_rank_0(message, debug=False, force=False): rank = deepspeed.comm.get_rank() printflock(f"[{rank}] {message}") 3. run the program and you get both logs non-interleaved But this makes it very difficult to make sense of the output, so the ``log_rank_file`` helper function might be more useful, as it's easier to send each log stream into a separate file and then compare those. """ global fcntl if fcntl == None: import fcntl with open(__file__, "r") as fh: fcntl.flock(fh, fcntl.LOCK_EX) try: print(*msgs) finally: fcntl.flock(fh, fcntl.LOCK_UN) fh = None def log_rank_file(rank, *msgs): """ Print to a log file of the given rank This is useful for debugging hanging in sync processes. Here is a possible workflow: 1. Enable the force debug in say partitioning and zero3 files 2. Override the usual versions of print_rank_0 in those files with :: def print_rank_0(message, debug=False, force=False): rank = deepspeed.comm.get_rank() log_rank_file(rank, message) 3. run the program 4. fix up the expected differences, e.g. different cuda numbers :: perl -pi -e 's|cuda:1|cuda:0|' log_rank_* 5. now diff and see where names and ids diverge - you will find where the gpus don't do the same work (e.g. when some layers get conditionally skipped on one gpu but not all) diff -u log_rank_0.txt log_rank_1.txt | less """ global fh if fh is None: fh = open(f"log_rank_{rank}.txt", "w") for m in msgs: fh.write(f"{m}\n") fh.flush() def print_backward_tensors(tensor): def _print_bwd_tensors(grad_fn): print(f"Backward tensors in {grad_fn}") for funcs in grad_fn.next_functions: if funcs[0]: try: tensor = getattr(funcs[0], 'variable') print(funcs[0]) print(f"Tensor - id: {id(tensor)}, shape: {tensor.shape}, data: {tensor}, grad: {tensor.grad}") except AttributeError as e: _print_bwd_tensors(funcs[0]) if hasattr(tensor, 'grad_fn'): _print_bwd_tensors(tensor.grad_fn)
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team class DeprecatedException(Exception): pass
#!/usr/bin/env python # Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team # This script extracts fp32 consolidated weights from a zero 2 and 3 DeepSpeed checkpoints. It gets # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in # the future. Once extracted, the weights don't require DeepSpeed and can be used in any # application. # # example: python zero_to_fp32.py . pytorch_model.bin import argparse import torch import glob import math import os import re from collections import OrderedDict from dataclasses import dataclass # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with # DeepSpeed data structures it has to be available in the current python environment. from deepspeed.utils import logger from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) @dataclass class zero_model_state: buffers: dict() param_shapes: dict() shared_params: list ds_version: int frozen_param_shapes: dict() frozen_param_fragments: dict() debug = 0 # load to cpu device = torch.device('cpu') def atoi(text): return int(text) if text.isdigit() else text def natural_keys(text): ''' alist.sort(key=natural_keys) sorts in human order http://nedbatchelder.com/blog/200712/human_sorting.html (See Toothy's implementation in the comments) ''' return [atoi(c) for c in re.split(r'(\d+)', text)] def get_model_state_file(checkpoint_dir, zero_stage): if not os.path.isdir(checkpoint_dir): raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") # there should be only one file if zero_stage == 2: file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") elif zero_stage == 3: file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") if not os.path.exists(file): raise FileNotFoundError(f"can't find model states file at '{file}'") return file def get_checkpoint_files(checkpoint_dir, glob_pattern): # XXX: need to test that this simple glob rule works for multi-node setup too ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) if len(ckpt_files) == 0: raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") return ckpt_files def get_optim_files(checkpoint_dir): return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") def get_model_state_files(checkpoint_dir): return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") def parse_model_states(files): zero_model_states = [] for file in files: state_dict = torch.load(file, map_location=device) if BUFFER_NAMES not in state_dict: raise ValueError(f"{file} is not a model state checkpoint") buffer_names = state_dict[BUFFER_NAMES] if debug: print("Found buffers:", buffer_names) # recover just the buffers while restoring them to fp32 if they were saved in fp16 buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} param_shapes = state_dict[PARAM_SHAPES] # collect parameters that are included in param_shapes param_names = [] for s in param_shapes: for name in s.keys(): param_names.append(name) # update with frozen parameters frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) if frozen_param_shapes is not None: if debug: print(f"Found frozen_param_shapes: {frozen_param_shapes}") param_names += list(frozen_param_shapes.keys()) # handle shared params shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] ds_version = state_dict.get(DS_VERSION, None) frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) z_model_state = zero_model_state(buffers=buffers, param_shapes=param_shapes, shared_params=shared_params, ds_version=ds_version, frozen_param_shapes=frozen_param_shapes, frozen_param_fragments=frozen_param_fragments) zero_model_states.append(z_model_state) return zero_model_states def parse_optim_states(files, ds_checkpoint_dir): total_files = len(files) state_dicts = [] for f in files: state_dicts.append(torch.load(f, map_location=device)) if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: raise ValueError(f"{files[0]} is not a zero checkpoint") zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] # For ZeRO-2 each param group can have different partition_count as data parallelism for expert # parameters can be different from data parallelism for non-expert parameters. So we can just # use the max of the partition_count to get the dp world_size. if type(world_size) is list: world_size = max(world_size) if world_size != total_files: raise ValueError( f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." ) # the groups are named differently in each stage if zero_stage == 2: fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS elif zero_stage == 3: fp32_groups_key = FP32_FLAT_GROUPS else: raise ValueError(f"unknown zero stage {zero_stage}") if zero_stage == 2: fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] elif zero_stage == 3: # if there is more than one param group, there will be multiple flattened tensors - one # flattened tensor per group - for simplicity merge them into a single tensor # # XXX: could make the script more memory efficient for when there are multiple groups - it # will require matching the sub-lists of param_shapes for each param group flattened tensor fp32_flat_groups = [ torch.cat(state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key], 0) for i in range(len(state_dicts)) ] return zero_stage, world_size, fp32_flat_groups def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir): """ Returns fp32 state_dict reconstructed from ds checkpoint Args: - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) """ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") optim_files = get_optim_files(ds_checkpoint_dir) zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") model_files = get_model_state_files(ds_checkpoint_dir) zero_model_states = parse_model_states(model_files) print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') if zero_stage == 2: return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states) elif zero_stage == 3: return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states) def _zero2_merge_frozen_params(state_dict, zero_model_states): if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: return frozen_param_shapes = zero_model_states[0].frozen_param_shapes frozen_param_fragments = zero_model_states[0].frozen_param_fragments if debug: num_elem = sum(s.numel() for s in frozen_param_shapes.values()) print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') wanted_params = len(frozen_param_shapes) wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) print(f'Frozen params: Have {avail_numel} numels to process.') print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') total_params = 0 total_numel = 0 for name, shape in frozen_param_shapes.items(): total_params += 1 unpartitioned_numel = shape.numel() total_numel += unpartitioned_numel state_dict[name] = frozen_param_fragments[name] if debug: print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): param_shapes = zero_model_states[0].param_shapes # Reconstruction protocol: # # XXX: document this if debug: for i in range(world_size): for j in range(len(fp32_flat_groups[0])): print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") # XXX: memory usage doubles here (zero2) num_param_groups = len(fp32_flat_groups[0]) merged_single_partition_of_fp32_groups = [] for i in range(num_param_groups): merged_partitions = [sd[i] for sd in fp32_flat_groups] full_single_fp32_vector = torch.cat(merged_partitions, 0) merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) avail_numel = sum( [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) if debug: wanted_params = sum([len(shapes) for shapes in param_shapes]) wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) # not asserting if there is a mismatch due to possible padding print(f"Have {avail_numel} numels to process.") print(f"Need {wanted_numel} numels in {wanted_params} params.") # params # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support # out-of-core computing solution total_numel = 0 total_params = 0 for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): offset = 0 avail_numel = full_single_fp32_vector.numel() for name, shape in shapes.items(): unpartitioned_numel = shape.numel() total_numel += unpartitioned_numel total_params += 1 if debug: print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) offset += unpartitioned_numel # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex # paddings performed in the code it's almost impossible to predict the exact numbers w/o the # live optimizer object, so we are checking that the numbers are within the right range align_to = 2 * world_size def zero2_align(x): return align_to * math.ceil(x / align_to) if debug: print(f"original offset={offset}, avail_numel={avail_numel}") offset = zero2_align(offset) avail_numel = zero2_align(avail_numel) if debug: print(f"aligned offset={offset}, avail_numel={avail_numel}") # Sanity check if offset != avail_numel: raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states): state_dict = OrderedDict() # buffers buffers = zero_model_states[0].buffers state_dict.update(buffers) if debug: print(f"added {len(buffers)} buffers") _zero2_merge_frozen_params(state_dict, zero_model_states) _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) # recover shared parameters for pair in zero_model_states[0].shared_params: if pair[1] in state_dict: state_dict[pair[0]] = state_dict[pair[1]] return state_dict def zero3_partitioned_param_info(unpartitioned_numel, world_size): remainder = unpartitioned_numel % world_size padding_numel = (world_size - remainder) if remainder else 0 partitioned_numel = math.ceil(unpartitioned_numel / world_size) return partitioned_numel, padding_numel def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: return if debug: for i in range(world_size): num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') frozen_param_shapes = zero_model_states[0].frozen_param_shapes wanted_params = len(frozen_param_shapes) wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size print(f'Frozen params: Have {avail_numel} numels to process.') print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') total_params = 0 total_numel = 0 for name, shape in zero_model_states[0].frozen_param_shapes.items(): total_params += 1 unpartitioned_numel = shape.numel() total_numel += unpartitioned_numel param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) if debug: print( f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" ) print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): param_shapes = zero_model_states[0].param_shapes avail_numel = fp32_flat_groups[0].numel() * world_size # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each # param, re-consolidating each param, while dealing with padding if any # merge list of dicts, preserving order param_shapes = {k: v for d in param_shapes for k, v in d.items()} if debug: for i in range(world_size): print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") wanted_params = len(param_shapes) wanted_numel = sum(shape.numel() for shape in param_shapes.values()) # not asserting if there is a mismatch due to possible padding avail_numel = fp32_flat_groups[0].numel() * world_size print(f"Trainable params: Have {avail_numel} numels to process.") print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") # params # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support # out-of-core computing solution offset = 0 total_numel = 0 total_params = 0 for name, shape in param_shapes.items(): unpartitioned_numel = shape.numel() total_numel += unpartitioned_numel total_params += 1 partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) if debug: print( f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" ) # XXX: memory usage doubles here state_dict[name] = torch.cat( tuple(fp32_flat_groups[i].narrow(0, offset, partitioned_numel) for i in range(world_size)), 0).narrow(0, 0, unpartitioned_numel).view(shape) offset += partitioned_numel offset *= world_size # Sanity check if offset != avail_numel: raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states): state_dict = OrderedDict() # buffers buffers = zero_model_states[0].buffers state_dict.update(buffers) if debug: print(f"added {len(buffers)} buffers") _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) # recover shared parameters for pair in zero_model_states[0].shared_params: if pair[1] in state_dict: state_dict[pair[0]] = state_dict[pair[1]] return state_dict def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag=None): """ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example via a model hub. Args: - ``checkpoint_dir``: path to the desired checkpoint folder - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` Returns: - pytorch ``state_dict`` Note: this approach may not work if your application doesn't have sufficient free CPU memory and you may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with the checkpoint. A typical usage might be :: from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint # do the training and checkpoint saving state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu model = model.cpu() # move to cpu model.load_state_dict(state_dict) # submit to model hub or save the model to share with others In this example the ``model`` will no longer be usable in the deepspeed context of the same application. i.e. you will need to re-initialize the deepspeed engine, since ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. """ if tag is None: latest_path = os.path.join(checkpoint_dir, 'latest') if os.path.isfile(latest_path): with open(latest_path, 'r') as fd: tag = fd.read().strip() else: raise ValueError(f"Unable to find 'latest' file at {latest_path}") ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) if not os.path.isdir(ds_checkpoint_dir): raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") return _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir) def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, output_file, tag=None): """ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. Args: - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - ``output_file``: path to the pytorch fp32 state_dict output file (e.g. path/pytorch_model.bin) - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` """ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) print(f"Saving fp32 state dict to {output_file}") torch.save(state_dict, output_file) def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): """ 1. Put the provided model to cpu 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` 3. Load it into the provided model Args: - ``model``: the model object to update - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` Returns: - ``model`: modified model Make sure you have plenty of CPU memory available before you call this function. If you don't have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it conveniently placed for you in the checkpoint folder. A typical usage might be :: from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) # submit to model hub or save the model to share with others Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context of the same application. i.e. you will need to re-initialize the deepspeed engine, since ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. """ logger.info(f"Extracting fp32 weights") state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) logger.info(f"Overwriting model with fp32 weights") model = model.cpu() model.load_state_dict(state_dict, strict=False) return model if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("checkpoint_dir", type=str, help="path to the desired checkpoint folder, e.g., path/checkpoint-12") parser.add_argument( "output_file", type=str, help="path to the pytorch fp32 state_dict output file (e.g. path/checkpoint-12/pytorch_model.bin)") parser.add_argument("-d", "--debug", action='store_true', help="enable debug") args = parser.parse_args() debug = args.debug convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, args.output_file)
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import torch from dataclasses import dataclass from deepspeed import comm as dist @dataclass class fragment_address: numel: int start: int @dataclass class tensor_fragment: lp_fragment: torch.Tensor lp_fragment_address: fragment_address hp_fragment: torch.Tensor hp_fragment_address: fragment_address optim_fragment: {} gradient_dict: {} offload_gradient_dict: {} use_offload: bool param_group_index: int def update_hp(self): self.hp_fragment.data.copy_(self.lp_fragment.data) def update_lp(self): self.lp_fragment.data.copy_(self.hp_fragment.data) def get_optim_state_fragment(self, key): if key in self.optim_fragment: return self.optim_fragment[key] else: raise ValueError(f'{key} not found in optimizer state fragment') def get_hp_fragment_address(self): return self.hp_fragment_address def get_optim_state_keys(self): return list(self.optim_fragment.keys()) def get_full_hp_param(self, optim_state_key=None): reduce_buffer = torch.zeros_like(self, dtype=torch.float32).flatten() if self._hp_mapping is not None: lp_frag_address = self._hp_mapping.lp_fragment_address reduce_fragment = torch.narrow(reduce_buffer, 0, lp_frag_address.start, lp_frag_address.numel) if optim_state_key is None: hp_fragment = self._hp_mapping.hp_fragment else: hp_fragment = self._hp_mapping.get_optim_state_fragment(optim_state_key) reduce_fragment.data.copy_(hp_fragment.data) dist.all_reduce(reduce_buffer, group=self._dp_group) return reduce_buffer.reshape_as(self) def get_full_hp_grad(self): reduce_buffer = torch.zeros_like(self, dtype=torch.float32).flatten() if self._hp_mapping is not None: hp_mapping = self._hp_mapping if hp_mapping.use_offload: gradient_dict = hp_mapping.offload_gradient_dict else: gradient_dict = hp_mapping.gradient_dict if hp_mapping.param_group_index not in gradient_dict or gradient_dict[hp_mapping.param_group_index] is None: raise ValueError("Gradients are only available immediately after backward and before engine step") lp_grad_fragment = gradient_dict[hp_mapping.param_group_index][self._index_in_param_group] hp_grad_fragment = lp_grad_fragment.to(torch.float32).flatten() lp_frag_address = self._hp_mapping.lp_fragment_address reduce_fragment = torch.narrow(reduce_buffer, 0, lp_frag_address.start, lp_frag_address.numel) if self.view(-1).shape == hp_grad_fragment.shape: reduce_buffer.data.copy_(hp_grad_fragment.data) else: reduce_fragment.data.copy_(hp_grad_fragment.data) dist.all_reduce(reduce_buffer, group=self._dp_group) return reduce_buffer.reshape_as(self) def safe_get_full_fp32_param(param): """Assemble and return the fp32 parameter of a low-precision (e.g., fp16) parameter. Args: param (``torch.nn.Parameter``): A model parameter """ # ZeRO stage 3 param if hasattr(param, 'ds_id'): return param._z3_optimizer.get_full_hp_param(param) # ZeRO stage 1, 2, and bf16_optimizer params if hasattr(param, '_hp_mapping'): return param.get_full_hp_param() return None def safe_get_full_optimizer_state(param, optim_state_key): """Assemble and return the fp32 optimizer state of a low-precision (e.g., fp16) parameter. Args: param (``torch.nn.Parameter``): A model parameter """ # ZeRO stage 3 param if hasattr(param, 'ds_id'): return param._z3_optimizer.get_full_hp_param(param, optim_state_key) # ZeRO stage 1, 2, and bf16_optimizer params if hasattr(param, '_hp_mapping'): return param.get_full_hp_param(optim_state_key) return None # TODO: Figure out the correct return dtype def safe_get_full_grad(param): """Assemble and return the fp32 gradient of a low-precision (e.g., fp16) parameter. Args: param (``torch.nn.Parameter``): A model parameter """ if param.grad is not None: return param.grad # ZeRO stage 3 param if hasattr(param, 'ds_id'): return param._z3_optimizer.get_fp32_grad_for_param(param) # ZeRO stage 1, 2, and bf16_optimizer params if hasattr(param, '_hp_mapping'): return param.get_full_hp_grad() return None def get_hp_fragment_mapping(lp_param, lp_start, flat_hp_partition, gradient_dict, offload_gradient_dict, use_offload, param_group_index, partition_start, partition_size, optimizer_state_dict): lp_end = lp_param.numel() + lp_start hp_start = partition_start hp_end = partition_start + partition_size fragment_start = max(lp_start, hp_start) fragment_end = min(lp_end, hp_end) assert fragment_start < fragment_end, \ f'fragment start {fragment_start} should be < fragment_end {fragment_end}' fragment_numel = fragment_end - fragment_start hp_frag_address = fragment_address(start=fragment_start - hp_start, numel=fragment_numel) hp_fragment_tensor = flat_hp_partition.narrow(0, hp_frag_address.start, hp_frag_address.numel) optim_fragment = { key: value.narrow(0, hp_frag_address.start, hp_frag_address.numel) for key, value in optimizer_state_dict.items() if torch.is_tensor(value) and value.shape == flat_hp_partition.shape } lp_frag_address = fragment_address(start=fragment_start - lp_start, numel=fragment_numel) lp_fragment_tensor = lp_param.flatten().narrow(0, lp_frag_address.start, lp_frag_address.numel) return tensor_fragment(lp_fragment=lp_fragment_tensor, lp_fragment_address=lp_frag_address, hp_fragment=hp_fragment_tensor, hp_fragment_address=hp_frag_address, optim_fragment=optim_fragment, gradient_dict=gradient_dict, offload_gradient_dict=offload_gradient_dict, use_offload=use_offload, param_group_index=param_group_index) ''' Logic for lp_param to hp_param mapping lp lp0 lp1 lp2 lp3 lp4 <------- indices/names lp [ ][ ][ ][ ][ ] <-------- tensors flat_lp [ ] <-------- flat lp params flat_hp [ ] <------------------ flat hp partition on current rank full_hp [ ] <------- full flat hp params lp2 full numel = 16 lp_frag numel = 12 frag_start = 3 frag_end = 15 hp_frag numel = 12 frag_start = 0 frag_end = 11 hp_frag.copy_(lp_frag) lp3: full numel = 4 lp_frag numel = 4 start = 0 end = 3 hp_frag numel = 4 start = 12 end = 15 lp4: full numel = 12 lp_frag numel = 4 start = 0 end = 3 hp_frag numel = 4 start = 16 end = 19 Visual depiction of above lp { } flat_lp [ ] flat_hp ( ) flat_lp [ { ( } ) ] lx hx ly hy ly-hx lp { } flat_lp [ ] flat_hp ( ) flat_lp [ ( { ) } ] hx lx hy ly hy-lx lp { } flat_lp [ ] flat_hp ( ) flat_lp [ ( { } ) ] hx lx ly hy ly-lx lp -> (lx, hy) flat_hp -> (hx, hy) '''
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from deepspeed.accelerator import get_accelerator def instrument_w_nvtx(func): """decorator that causes an NVTX range to be recorded for the duration of the function call.""" def wrapped_fn(*args, **kwargs): get_accelerator().range_push(func.__qualname__) ret_val = func(*args, **kwargs) get_accelerator().range_pop() return ret_val return wrapped_fn
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import types from deepspeed.utils import get_full_hp_param, get_full_hp_grad, get_hp_fragment_mapping def link_hp_params(lp_param_list, flat_hp_partition, gradient_dict, offload_gradient_dict, use_offload, param_group_index, partition_start, partition_size, partition_optimizer_state, dp_group): local_lp_param_and_offset = _init_lp_to_hp_mapping(lp_param_list, partition_start, partition_size, dp_group) for lp_param, lp_start in local_lp_param_and_offset: lp_param._hp_mapping = get_hp_fragment_mapping(lp_param, lp_start, flat_hp_partition, gradient_dict, offload_gradient_dict, use_offload, param_group_index, partition_start, partition_size, partition_optimizer_state) def _init_lp_to_hp_mapping(lp_param_list, partition_start, partition_size, dp_group): current_offset = 0 param_and_offset_list = [] partition_end = partition_start + partition_size index_in_param_group = 0 for i, lp_param in enumerate(lp_param_list): lp_param._hp_mapping = None lp_param._dp_group = dp_group lp_param.get_full_hp_param = types.MethodType(get_full_hp_param, lp_param) lp_param.get_full_hp_grad = types.MethodType(get_full_hp_grad, lp_param) # lp_param overlaps with partition if both are true # 1) current_offset < partition_end, # 2) current_offset + lp_param.numel() >= partition_start lp_param_end = current_offset + lp_param.numel() if current_offset < partition_end and lp_param_end > partition_start: param_and_offset_list.append((lp_param, current_offset)) lp_param._index_in_param_group = index_in_param_group # Indices for params in this partition/GPU index_in_param_group += 1 current_offset += lp_param.numel() return param_and_offset_list
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team # return a list of list for cores to numa mapping # [ # [ cores for numa 0 ] # [ cores belong to numa 1 ] # ... # ] import distutils import os import psutil import subprocess # return a list of list for cores to numa mapping # [ # [ cores for numa 0 ] # [ cores belong to numa 1 ] # ... # ] def get_numa_cores(): ret = [] output = subprocess.check_output(['numactl', '--hardware']).decode("utf-8") lines = output.split('\n') for line in lines: if line.startswith('available:'): num_numas = int(line.split(' ')[1]) break for numa in range(num_numas): for line in lines: if line.startswith(f'node {numa} cpus:'): cores = line.split(' ')[3:] ret.append([int(core) for core in cores]) return ret def check_for_numactl_pkg(): libs = dict( dpkg=["-l", "numactl", "apt"], pacman=["-Q", "numactl", "pacman"], rpm=["-q", "numactl", "yum"], ) found = False for pkgmgr, data in libs.items(): flag, lib, tool = data path = distutils.spawn.find_executable(pkgmgr) if path is not None: cmd = f"{pkgmgr} {flag} {lib}" result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) if result.wait() == 0: found = True else: print(f"please install the {lib} package with {tool}") break return found def parse_range(rng): try: value = int(rng) return range(value, value + 1) except ValueError: # value is not a single number parts = rng.split('-') if len(parts) != 2: raise ValueError("Bad range: '%s', range must be either a number or two number separated by dash" % (rng, )) start = int(parts[0]) end = int(parts[1]) if start > end: raise ValueError("Bad range: '%s', range end must larger than or equal to start" % (rng, )) return range(start, end + 1) # parse comma and dash separated range list into list # i.e. "0,2-4,6" --> [0, 2, 3, 4, 6] # rules: # 1. Range list number be comma separated, each item are either a single number, # or a range marked by two numbers (both number are included in the range) # 2. Sub ranges must be in ascend order and not overlap with each other # 3. No space in the range expression def parse_range_list(range_str): number_list = [] last = -1 range_list = range_str.split(',') for sub_range in range_list: sub_number_list = parse_range(sub_range) if sub_number_list[0] <= last: raise ValueError( "Bad range: '%s', sub ranges must not overlap with each other and should be in ascend order" % (range_str, )) last = sub_number_list[-1] number_list.extend(sub_number_list) return number_list def get_numactl_cmd(bind_core_list, num_local_procs, local_rank): numactl_cmd = [] check_for_numactl_pkg() if 'KMP_AFFINITY' in os.environ.keys(): raise ValueError("Environment variable KMP_AFFINITY conflicts with numactl " "because it interfere with how many CPU cores numactl can set. " "Unset KMP_AFFINITY before launching deepspeed.\n\n" "\t$ unset KMP_AFFINITY\n" "\t$ deepspeed <deepspeed command parameters>") if bind_core_list != None: core_list = parse_range_list(bind_core_list) total_cores = len(core_list) else: total_cores = psutil.cpu_count(logical=False) core_list = range(total_cores) cores_per_rank = total_cores // num_local_procs assert cores_per_rank >= 1, "At least one core needs to be assigned to each rank" core_list_for_rank = core_list[cores_per_rank * local_rank:cores_per_rank * (local_rank + 1)] numactl_cmd.append("numactl") # check if all cores belong to same numa, if true, bind process to that numa domain with -m parameter numa_cores = get_numa_cores() num_numas = len(numa_cores) for i in range(num_numas): if set(core_list_for_rank) <= set(numa_cores[i]): numactl_cmd.append("-m") numactl_cmd.append(f"{i}") break numactl_cmd.append("-C") last_core = core_list_for_rank[0] first_core = last_core core_list_str = f"{last_core}" for core_id in core_list_for_rank[1:]: if core_id == last_core + 1: last_core = core_id continue else: if first_core == last_core: core_list_str = f"{core_list_str},{core_id}" else: core_list_str = f"{core_list_str}-{last_core},{core_id}" first_core = core_id last_core = core_id if first_core != last_core: core_list_str = f"{core_list_str}-{last_core}" numactl_cmd.append(f"{core_list_str}") return cores_per_rank, numactl_cmd
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team from deepspeed.runtime.config_utils import get_scalar_param, DeepSpeedConfigObject from deepspeed.profiling.constants import * class DeepSpeedFlopsProfilerConfig(DeepSpeedConfigObject): def __init__(self, param_dict): super(DeepSpeedFlopsProfilerConfig, self).__init__() self.enabled = None self.profile_step = None self.module_depth = None self.top_modules = None if FLOPS_PROFILER in param_dict.keys(): flops_profiler_dict = param_dict[FLOPS_PROFILER] else: flops_profiler_dict = {} self._initialize(flops_profiler_dict) def _initialize(self, flops_profiler_dict): self.enabled = get_scalar_param(flops_profiler_dict, FLOPS_PROFILER_ENABLED, FLOPS_PROFILER_ENABLED_DEFAULT) self.profile_step = get_scalar_param(flops_profiler_dict, FLOPS_PROFILER_PROFILE_STEP, FLOPS_PROFILER_PROFILE_STEP_DEFAULT) self.module_depth = get_scalar_param(flops_profiler_dict, FLOPS_PROFILER_MODULE_DEPTH, FLOPS_PROFILER_MODULE_DEPTH_DEFAULT) self.top_modules = get_scalar_param(flops_profiler_dict, FLOPS_PROFILER_TOP_MODULES, FLOPS_PROFILER_TOP_MODULES_DEFAULT) self.detailed = get_scalar_param(flops_profiler_dict, FLOPS_PROFILER_DETAILED, FLOPS_PROFILER_DETAILED_DEFAULT) self.output_file = get_scalar_param(flops_profiler_dict, FLOPS_PROFILER_OUTPUT_FILE, FLOPS_PROFILER_OUTPUT_FILE_DEFAULT)
# Copyright (c) Microsoft Corporation. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team ######################################### # flops profiler ######################################### # Flops profiler. By default, this feature is not enabled. # Users can configure in ds_config.json as below example: FLOPS_PROFILER_FORMAT = ''' flops profiler should be enabled as: "session_params": { "flops_profiler": { "enabled": true, "profile_step": 1, "module_depth": -1, "top_modules": 3, "detailed": true, "output_file": null } } ''' FLOPS_PROFILER = "flops_profiler" FLOPS_PROFILER_ENABLED = "enabled" FLOPS_PROFILER_ENABLED_DEFAULT = False FLOPS_PROFILER_PROFILE_STEP = "profile_step" FLOPS_PROFILER_PROFILE_STEP_DEFAULT = 1 FLOPS_PROFILER_MODULE_DEPTH = "module_depth" FLOPS_PROFILER_MODULE_DEPTH_DEFAULT = -1 FLOPS_PROFILER_TOP_MODULES = "top_modules" FLOPS_PROFILER_TOP_MODULES_DEFAULT = 1 FLOPS_PROFILER_DETAILED = "detailed" FLOPS_PROFILER_DETAILED_DEFAULT = True FLOPS_PROFILER_OUTPUT_FILE = "output_file" FLOPS_PROFILER_OUTPUT_FILE_DEFAULT = None