diff --git a/.gitattributes b/.gitattributes index f92b1c0bfac048c6e433ff55cb3af1911ac00466..f4cc696bbfd3a47382b99441add3faa72ff24f04 100644 --- a/.gitattributes +++ b/.gitattributes @@ -136,3 +136,5 @@ venv/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-l venv/lib/python3.10/site-packages/numpy/core/_simd.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/aiohttp/_http_parser.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/scipy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numpy/core/_multiarray_umath.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35d3de2d96eac55ed1df71b1fd93dd37126117d1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/bf16_optimizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/bf16_optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..782f5742f45b11de8b9463c7cc3ff105782dd885 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/__pycache__/bf16_optimizer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/base_optimizer.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/base_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..6cfd66f1cc38ae216e75f006eec2e8094b7b0084 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/base_optimizer.py @@ -0,0 +1,63 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import torch + +from deepspeed.utils import logger +from deepspeed.utils.tensor_fragment import map_to_flat_opt_states +from deepspeed.runtime.utils import bwc_tensor_model_parallel_rank + + +class DeepSpeedOptimizer(object): + pass + + +class ZeROOptimizer(DeepSpeedOptimizer): + + def load_hp_checkpoint_state_from_checkpoint_dir(self, lp_groups_name: str, checkpoint_dir: str) -> None: + checkpoint_dir = os.path.join(checkpoint_dir, "zero") + optim_state_path = os.path.join(checkpoint_dir, "optimizer_state.pt") + assert os.path.isfile( + optim_state_path), f'{optim_state_path} containing optimizer global state is missing! Cannot proceed.' + optim_sd = torch.load(optim_state_path) + + self._load_global_state(optim_sd) + + tp_rank = bwc_tensor_model_parallel_rank(mpu=self.mpu) + if self.mpu is None: + logger.warn("MPU is not provided, setting tp size to 1 in checkpoint loading.") + tp_world_size = 1 + else: + tp_world_size = self.mpu.get_slice_parallel_world_size() if hasattr(self.mpu, "get_slice_parallel_world_size") \ + else self.mpu.get_tensor_model_parallel_world_size() + + for i, (param_group, + loaded_param_group) in enumerate(zip(self.optimizer.param_groups, optim_sd['param_groups'])): + # We have an assumption that all params in the same param_group have the same keys + opt_keys = set() + steps = [] + + lp_groups = getattr(self, lp_groups_name) + for lp in lp_groups[i]: + if lp._hp_mapping is not None: + #print(f"Loading {self.param_names[lp]} {tp_rank=} {tp_world_size=}") + step = lp.load_hp_checkpoint_state(os.path.join(checkpoint_dir, self.param_names[lp]), tp_rank, + tp_world_size) + for key in lp._hp_mapping.get_optim_state_keys(): + opt_keys.add(key) + steps.append(step) + + hp_param = param_group['params'][0] + assert all(step == steps[0] for step in steps), f"Steps {steps} are not equal" + if steps[0] is not None: + self.optimizer.state[hp_param]['step'] = steps[0] + + map_to_flat_opt_states(hp_param, lp_groups[i], self.optimizer.state, opt_keys) + + for key, value in loaded_param_group.items(): + if key == 'params': + continue + param_group[key] = value diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5067f71c8faf166bc78e88f9b62e8627dda7c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57e22aefc42383a85f2daa1cd5291222327f59eb Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/coalesced_collectives.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/coalesced_collectives.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb3fe1a465aeaede7b0310400f0530f7d9f11b10 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/coalesced_collectives.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/hccl.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/hccl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2532465f02a7e01c15323923ec415fcd69b1fbd Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/hccl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/mpi.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/mpi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dad95cc66e2799052415ff5c61a9e398348d141b Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/mpi.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/nccl.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/nccl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fffb6875c3ab23392c2ce0df63df44c125950f4f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/__pycache__/nccl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/coalesced_collectives.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/coalesced_collectives.py new file mode 100644 index 0000000000000000000000000000000000000000..543795126fabe2dc154de317aec6a349c92fdf2d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/coalesced_collectives.py @@ -0,0 +1,141 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +batched collective operations for overhead amortization and better +bandwidth utilization +""" + +import math +from typing import List +import torch +from torch import Tensor +from deepspeed import comm as dist +from deepspeed.comm import ProcessGroup, all_to_all_single +from deepspeed.accelerator import get_accelerator +from deepspeed.utils import instrument_w_nvtx +from deepspeed.ops import op_builder +from deepspeed.utils import logger + + +def _torch_reduce_scatter_fn(input_tensor: Tensor, output_tensor: Tensor, group=None, async_op=False, prof=False): + return instrument_w_nvtx(dist.reduce_scatter_fn)(output_tensor, input_tensor, group=group, async_op=False) + + +quantizer_module = None + + +@instrument_w_nvtx +@torch.no_grad() +def all_to_all_quant_reduce(tensors: List[Tensor], groups: {}) -> List[Tensor]: + global quantizer_module + if quantizer_module is None: + quantizer_module = op_builder.QuantizerBuilder().load() + local_world_size = get_accelerator().device_count() + global_world_size = dist.get_world_size() + num_nodes = global_world_size // local_world_size + this_rank = dist.get_rank() + intra_idx = int(this_rank / local_world_size) + inter_idx = this_rank % local_world_size + output_lst: List[Tensor] = [None] * len(tensors) + for idx, tensor in enumerate(tensors): + if tensor.dim() == 1: + output_lst[idx] = reduce_scatter_coalesced([tensor])[0] + elif tensor.numel() % (2 * global_world_size) != 0: + # Due to the constraint of 2-stage all-to-all, the input tensor must be divisible by 2 * global_world_size + # Otherwise, all-to-all cannot be performed because of shape mismatch. + # See more at https://github.com/microsoft/DeepSpeed/pull/5056 + logger.warning( + f"qgZ falls back to reduce_scatter because tensor size = {tensor.numel()} is not divisible by (2 * global_world_size) = {2 * global_world_size}. Please consider allocating a new world to enable qgZ" + ) + output_lst[idx] = reduce_scatter_coalesced([tensor])[0] + else: + intra_quant_group = max(tensor.shape[0], tensor.shape[1], global_world_size) + + inter_quant_group = intra_quant_group // local_world_size + intra_quant_int4, intra_q_scales = quantizer_module.swizzle_quant(tensor, intra_quant_group, 4, + quantizer_module.Symmetric, 1, num_nodes, + local_world_size) + local_output = torch.empty_like(intra_quant_int4) + scale_output = torch.empty_like(intra_q_scales) + all_to_all_single(local_output, intra_quant_int4, group=groups[f'local_{intra_idx}']) + all_to_all_single(scale_output, intra_q_scales, group=groups[f'local_{intra_idx}']) + global_input_tensor, global_scales = quantizer_module.quantized_reduction( + local_output, scale_output, intra_quant_group, inter_quant_group, 4, quantizer_module.Symmetric, + local_world_size) + global_output = torch.empty_like(global_input_tensor) + global_scale_output = torch.empty_like(global_scales) + all_to_all_single(global_output, global_input_tensor, group=groups[f'global_{inter_idx}']) + all_to_all_single(global_scale_output, global_scales, group=groups[f'global_{inter_idx}']) + final_output = quantizer_module.dequantize(global_output, global_scale_output, global_scale_output.numel(), + 4, quantizer_module.Symmetric) + assert final_output.numel( + ) % num_nodes == 0, f"final_output.numel()={final_output.numel()} is not divisible by num_nodes={num_nodes}" + output_lst[idx] = (sum(list(final_output.chunk(num_nodes))) / num_nodes).view(-1) + return output_lst + + +@instrument_w_nvtx +@torch.no_grad() +def reduce_scatter_coalesced( + tensors: List[Tensor], + group: ProcessGroup = None, +) -> List[Tensor]: + """simultaneously reduce-scatter a list of tensors - this can be done more + efficiently than individual reduce scatter calls + TODO. see if PyTorch team wants a c++ version of this for ProcessGroupNCCL + """ + this_rank = dist.get_rank(group) + world_sz = dist.get_world_size(group) + + partition_lst_for_each_tensor = [None] * len(tensors) + for tensor_idx, tensor in enumerate(tensors): + flattened_tensor = tensor.view(-1) + chunk_sz = math.ceil(tensor.numel() / world_sz) + partition_lst_for_each_tensor[tensor_idx] = [ + flattened_tensor[rank * chunk_sz:rank * chunk_sz + chunk_sz] for rank in range(0, world_sz) + ] + + padded_partition_sz_for_each_tensor = tuple(math.ceil(t.numel() / world_sz) for t in tensors) + + if len(tensors) == 1 and tensors[0].numel() % world_sz == 0: + # if there's only one tensor being reduced and we don't need to pad + # we have an opportunity to avoid a memory allocation + tensor_partition_flat_buffer = tensors[0].view(-1) + else: + # interleave tensor partitions such that the correct reduced partitions of each tensor + # end up at each rank + tensor_partitions_lst_with_padding = [] + for rank in range(world_sz): + for tensor_idx in range(len(tensors)): + # add tensor content + tensor_chunk = partition_lst_for_each_tensor[tensor_idx][rank] + tensor_partitions_lst_with_padding.append(tensor_chunk) + + # add padding if necessary + padding_sz = padded_partition_sz_for_each_tensor[tensor_idx] - tensor_chunk.numel() + if padding_sz > 0: + tensor_partitions_lst_with_padding.append( + torch.empty(padding_sz, dtype=tensor_chunk.dtype, device=tensor_chunk.device)) + + tensor_partition_flat_buffer = instrument_w_nvtx(torch.cat)(tensor_partitions_lst_with_padding) + + tensor_partition_flat_buffer.div_(world_sz) # pre-divide + tensor_partition_buffer_for_each_rank: List[Tensor] = torch.chunk(tensor_partition_flat_buffer, world_sz) + + # batched reduce-scatter call + _torch_reduce_scatter_fn(tensor_partition_flat_buffer, + tensor_partition_buffer_for_each_rank[this_rank], + group=group) + + # reverse procedure of the interleaving done previously, done on the + # result of the batched reduce-scatter + output_lst: List[Tensor] = [None] * len(tensors) + offset = 0 + for tensor_idx in range(len(tensors)): + output_lst[tensor_idx] = tensor_partition_buffer_for_each_rank[this_rank].narrow( + 0, offset, partition_lst_for_each_tensor[tensor_idx][this_rank].numel()) + + offset += padded_partition_sz_for_each_tensor[tensor_idx] + return output_lst diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/hccl.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/hccl.py new file mode 100644 index 0000000000000000000000000000000000000000..09fb11a731b8f4d4e9993d9caa1af45abb4611fd --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/hccl.py @@ -0,0 +1,124 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import numpy as np +import torch +import torch_npu +import deepspeed.comm as dist + + +class HcclBackend(object): + + def __init__(self, mpu=None): + if mpu is None: + self.world_group = dist.new_group(ranks=range(dist.get_world_size())) + else: + self.mpu = mpu + self.world_group = self.mpu.get_data_parallel_group() + self.size = dist.get_world_size(group=self.world_group) + self.rank = dist.get_rank(group=self.world_group) + + def my_igather(self, rank, size, group, sendbuf, recvbuf, root): + req = [] + if rank == root: + for idx in range(size): + if idx != rank: + req.append(dist.irecv(recvbuf[idx], src=idx, group=group)) + else: + recvbuf[rank] = sendbuf + else: + req.append(dist.isend(sendbuf, group=group, dst=root)) + return req + + def my_gather(self, rank, size, group, sendbuf, recvbuf, root): + if rank == root: + for idx in range(size): + if idx != rank: + dist.recv(recvbuf[idx], src=idx, group=group) + else: + recvbuf[rank] = sendbuf + else: + dist.send(sendbuf, group=group, dst=root) + + def compressed_allreduce(self, buffer_m: torch.tensor, worker_error, server_error, local_rank): + original_shape = buffer_m.size() + if len(original_shape) > 1: + buffer_m = torch.flatten(buffer_m) + + # align size of original_buffer and error + original_size = buffer_m.numel() + worker_error_size = worker_error.numel() + if original_size != worker_error_size: + empty_tensor = torch.zeros(worker_error_size - original_size, device=buffer_m.device) + buffer_m = torch.cat([buffer_m, empty_tensor]) + + buffer_m.add_(worker_error) + worker_scale = torch.linalg.norm(buffer_m) / np.sqrt(torch.numel(buffer_m)) + + worker_error.set_(buffer_m - worker_scale * buffer_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)) + + sign_list_packed_tmp = torch_npu.npu_sign_bits_pack(buffer_m, self.size).type(torch.int8) + + recvbuf_sign = torch.zeros([self.size, len(sign_list_packed_tmp[self.rank])], + dtype=sign_list_packed_tmp[0].dtype, + device=sign_list_packed_tmp.device) + + sign_list_packed = [sign_list_packed_tmp[idx] for idx in range(self.size)] + + recvbuf_scale = [ + torch.zeros(1, dtype=worker_scale.dtype, device=torch.device(local_rank)) for _ in range(self.size) + ] + + # communication phase 1 + # all to all for sign + dist.all_to_all_single(recvbuf_sign, torch.stack(sign_list_packed), group=self.world_group) + # all gather for scale + dist.all_gather(recvbuf_scale, worker_scale, group=self.world_group) + + flattened_recvbuf_sign = recvbuf_sign.type(torch.uint8).flatten() + compensated_server_m = torch_npu.npu_sign_bits_unpack(flattened_recvbuf_sign, self.size, torch.float32) \ + .mul_(torch.stack(recvbuf_scale).mul_(1 / self.size)).sum(0) + + compensated_server_m.add_(server_error) + + server_scale = torch.norm(compensated_server_m) / np.sqrt(compensated_server_m.numel()) + + server_error.set_(compensated_server_m - + server_scale * compensated_server_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)) + + server_sign_packed = torch_npu.npu_sign_bits_pack(compensated_server_m, 1).type(torch.int8) + + # recvbuf_sign_server + recvbuf_sign_server_tmp = torch.zeros([self.size, len(server_sign_packed[0])], + dtype=recvbuf_sign.dtype, + device=server_sign_packed.device) + + recvbuf_sign_server = [recvbuf_sign_server_tmp[idx] for idx in range(self.size)] + + # recvbuf_scale_server + recvbuf_scale_server_tmp = torch.zeros([self.size, 1], + dtype=worker_scale.dtype, + device=server_sign_packed.device) + + recvbuf_scale_server = [recvbuf_scale_server_tmp[idx] for idx in range(self.size)] + + # communication Phase 2 + dist.all_gather(recvbuf_sign_server, server_sign_packed[0], group=self.world_group) + dist.all_gather(recvbuf_scale_server, server_scale, group=self.world_group) + + recvbuf_sign_server = torch.stack(recvbuf_sign_server) + + flattened_recvbuf_sign_server = recvbuf_sign_server.type(torch.uint8).flatten() + + buffer_m.data.copy_( + torch_npu.npu_sign_bits_unpack(flattened_recvbuf_sign_server, self.size, + torch.float32).mul_(recvbuf_scale_server_tmp).flatten().data) + + if original_size != worker_error_size: + buffer_m = buffer_m[0:original_size] + if len(original_shape) > 1: + buffer_m = buffer_m.reshape(original_shape) + + return buffer_m diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/mpi.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/mpi.py new file mode 100644 index 0000000000000000000000000000000000000000..bc544787aa7a7de5181d83897e725991f572e5ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/mpi.py @@ -0,0 +1,215 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import cupy +import time +import numpy as np +from mpi4py import MPI + +from deepspeed.runtime.compression.cupy import CupyBackend + + +class MpiBackend(object): + + def __init__(self, cuda_aware): + self.comm = MPI.COMM_WORLD + self.rank = self.comm.Get_rank() + self.size = self.comm.Get_size() + self.cuda_aware = cuda_aware + self.compression_backend = CupyBackend() + + def my_igather(self, rank, size, comm, sendbuf, recbuf, root): + req = [] + if rank == root: + for idx in range(size): + if idx != rank: + req.append(comm.Irecv(recbuf[idx], source=idx)) + else: + recbuf[rank] = sendbuf + else: + req.append(comm.Isend(sendbuf, dest=root)) + return req + + def gather_cuda(self, rank, world_size, comm, cupy_sign_list_packed, cupy_recvbuf_sign, cupy_worker_scale, + cupy_recvbuf_scale): + # We do in-place operations on cupy buffers so we do not return any buffers + requests = [] + for idx in range(world_size): + req_sign = self.my_igather(rank, world_size, comm, cupy_sign_list_packed[idx], cupy_recvbuf_sign, root=idx) + requests += req_sign + + for idx in range(world_size): + req_scale = self.my_igather(rank, world_size, comm, cupy_worker_scale, cupy_recvbuf_scale, root=idx) + requests += req_scale + + MPI.Request.Waitall(requests) + + def gather_host(self, rank, world_size, comm, cupy_sign_list_packed, cupy_recvbuf_sign, cupy_worker_scale, + cupy_recvbuf_scale): + + # In-place operations are not possible for newly created cupy arrays + # so we need to return the new buffers + numpy_recvbuf_sign = np.zeros([world_size, cupy_sign_list_packed[rank].size], + dtype=cupy_sign_list_packed[0].dtype) + numpy_recvbuf_scale = np.zeros([world_size, 1], dtype=cupy_worker_scale.dtype) + + # 1. convert from cupy to numpy + numpy_sign_list_packed = cupy_sign_list_packed + + for idx in range(world_size): + numpy_sign_list_packed[idx] = cupy.asnumpy(cupy_sign_list_packed[idx]) + + numpy_worker_scale = cupy.asnumpy(cupy_worker_scale) + numpy_recvbuf_scale = cupy.asnumpy(cupy_recvbuf_scale) + + cupy.cuda.get_current_stream().synchronize() + + # 2. use numpy buffers for communication + requests = [] + + for idx in range(world_size): + req_sign = self.my_igather(rank, + world_size, + comm, + numpy_sign_list_packed[idx], + numpy_recvbuf_sign, + root=idx) + requests += req_sign + + for idx in range(world_size): + req_scale = self.my_igather(rank, world_size, comm, numpy_worker_scale, numpy_recvbuf_scale, root=idx) + requests += req_scale + + MPI.Request.Waitall(requests) + + # 3. Convert back from numpy to cupy + cupy_recvbuf_sign = cupy.asarray(numpy_recvbuf_sign) + for idx in range(world_size): + cupy_sign_list_packed[idx] = cupy.asarray(numpy_sign_list_packed[idx]) + + cupy_worker_scale = cupy.asarray(numpy_worker_scale) + cupy_recvbuf_scale = cupy.asarray(numpy_recvbuf_scale) + cupy.cuda.get_current_stream().synchronize() + + return cupy_sign_list_packed, cupy_recvbuf_sign, cupy_worker_scale, cupy_recvbuf_scale + + def allgather_cuda(self, comm, cupy_server_sign_packed, cupy_recvbuf_sign_server, cupy_server_scale, + cupy_recvbuf_scale_server): + comm.Allgather(cupy_server_sign_packed, cupy_recvbuf_sign_server) + comm.Allgather(cupy_server_scale, cupy_recvbuf_scale_server) + + def allgather_host(self, comm, cupy_server_sign_packed, cupy_recvbuf_sign_server, cupy_server_scale, + cupy_recvbuf_scale_server): + + # 1. Convert cupy to numpy + numpy_recvbuf_sign_server = np.zeros([comm.Get_size(), cupy_server_sign_packed.size], + dtype=cupy_server_sign_packed.dtype) + numpy_recvbuf_scale_server = np.zeros([comm.Get_size(), 1], dtype=cupy_server_scale.dtype) + + numpy_server_sign_packed = cupy.asnumpy(cupy_server_sign_packed) + numpy_recvbuf_sign_server = cupy.asnumpy(cupy_recvbuf_sign_server) + numpy_server_scale = cupy.asnumpy(cupy_server_scale) + numpy_recvbuf_scale_server = cupy.asnumpy(cupy_recvbuf_scale_server) + cupy.cuda.get_current_stream().synchronize() + + # 2. Communicate numpy buffers + comm.Allgather(numpy_server_sign_packed, numpy_recvbuf_sign_server) + comm.Allgather(numpy_server_scale, numpy_recvbuf_scale_server) + comm.Barrier() + + # 3. Convert numpy back to cupy + cupy_server_sign_packed = cupy.asarray(numpy_server_sign_packed) + cupy_recvbuf_sign_server = cupy.asarray(numpy_recvbuf_sign_server) + cupy_server_scale = cupy.asarray(numpy_server_scale) + cupy_recvbuf_scale_server = cupy.asarray(numpy_recvbuf_scale_server) + cupy.cuda.get_current_stream().synchronize() + + return cupy_server_sign_packed, cupy_recvbuf_sign_server, cupy_server_scale, cupy_recvbuf_scale_server + + def compressed_allreduce(self, buffer_m: torch.tensor, worker_error, server_error, local_rank): + + all_start_time = time.time() + original_shape = buffer_m.size() + if len(original_shape) > 1: + buffer_m = torch.flatten(buffer_m) + original_size = buffer_m.numel() + worker_error_size = worker_error.numel() + cupy.cuda.Device(local_rank).use() + + if original_size != worker_error_size: + empty_tensor = torch.zeros(worker_error_size - original_size, device=buffer_m.device) + buffer_m = torch.cat([buffer_m, empty_tensor]) + + buffer_m.add_(worker_error) + worker_scale = torch.linalg.norm(buffer_m) / np.sqrt(torch.numel(buffer_m)) + worker_error.set_(buffer_m - worker_scale * buffer_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)) + + cupy_sign_list_packed = self.compression_backend.compress_by_chunk( + self.compression_backend.torch2cupy(buffer_m.sign_().add_(1).bool()), self.size) + cupy_worker_scale = self.compression_backend.torch2cupy(worker_scale) + + cupy_recvbuf_sign = cupy.zeros([self.size, cupy_sign_list_packed[self.rank].size], + dtype=cupy_sign_list_packed[0].dtype) + cupy_recvbuf_scale = cupy.zeros([self.size, 1], dtype=cupy_worker_scale.dtype) + + # Communication Phase 1 + gather_start = time.time() + if self.cuda_aware: + self.gather_cuda(self.rank, self.size, self.comm, cupy_sign_list_packed, cupy_recvbuf_sign, + cupy_worker_scale, cupy_recvbuf_scale) + else: + _, cupy_recvbuf_sign, _, cupy_recvbuf_scale = self.gather_host(self.rank, self.size, self.comm, + cupy_sign_list_packed, cupy_recvbuf_sign, + cupy_worker_scale, cupy_recvbuf_scale) + gather_end = time.time() + + # cupy_sign_list_packed, cupy_worker_scale, worker_scale = None, None, None + cupy_sign_list_packed = None + + compensated_server_m = self.compression_backend.cupy2torch( + (cupy.unpackbits(cupy_recvbuf_sign.flatten())).reshape(self.size, -1)).float().add_(-0.5).mul_(2.0).mul_( + self.compression_backend.cupy2torch(cupy_recvbuf_scale).mul_(1 / self.size)).sum(0) + compensated_server_m.add_(server_error) + server_scale = torch.linalg.norm(compensated_server_m) / np.sqrt(compensated_server_m.numel()) + server_error.set_(compensated_server_m - + server_scale * compensated_server_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)) + + cupy_server_scale = self.compression_backend.torch2cupy(server_scale) + + cupy_server_sign_packed = self.compression_backend.compress_by_chunk( + self.compression_backend.torch2cupy(compensated_server_m.sign_().add_(1).bool()), 1) + compensated_server_m = None + + cupy_recvbuf_sign_server = cupy.zeros([self.size, cupy_server_sign_packed[0].size], + dtype=cupy_recvbuf_sign.dtype) + cupy_recvbuf_scale_server = cupy.zeros([self.size, 1], dtype=cupy_recvbuf_scale.dtype) + # cupy_recvbuf_sign, cupy_recvbuf_scale = None, None + cupy_recvbuf_sign = None + + # Communication Phase 2 + if self.cuda_aware: + self.allgather_cuda(self.comm, cupy_server_sign_packed[0], cupy_recvbuf_sign_server, cupy_server_scale, + cupy_recvbuf_scale_server) + else: + _, cupy_recvbuf_sign_server, _, cupy_recvbuf_scale_server = self.allgather_host( + self.comm, cupy_server_sign_packed[0], cupy_recvbuf_sign_server, cupy_server_scale, + cupy_recvbuf_scale_server) + + # cupy_server_sign_packed, cupy_server_scale, server_scale = None, None, None + cupy_server_sign_packed = None + + buffer_m.data.copy_( + self.compression_backend.cupy2torch((cupy.unpackbits(cupy_recvbuf_sign_server.flatten())).reshape( + self.size, -1)).float().add_(-0.5).mul_(2.0).mul_( + self.compression_backend.cupy2torch(cupy_recvbuf_scale_server)).flatten().data) + if original_size != worker_error_size: + buffer_m = buffer_m[0:original_size] + if len(original_shape) > 1: + buffer_m = buffer_m.reshape(original_shape) + + # cupy_recvbuf_sign_server, cupy_recvbuf_scale_server = None, None + + return buffer_m diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/nccl.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/nccl.py new file mode 100644 index 0000000000000000000000000000000000000000..a57b7519a295e584e77561843c52fa025bfaa66d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/comm/nccl.py @@ -0,0 +1,166 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from deepspeed import comm as dist +import cupy +import numpy as np + +from deepspeed.runtime.compression.cupy import CupyBackend +from deepspeed.utils.torch import required_torch_version +from deepspeed.accelerator import get_accelerator + + +class NcclBackend(object): + + def __init__(self, mpu=None): + if mpu is None: + self.world_group = dist.new_group(ranks=range(dist.get_world_size())) + else: + self.mpu = mpu + self.world_group = self.mpu.get_data_parallel_group() + self.rank = dist.get_rank(group=self.world_group) + self.size = dist.get_world_size(group=self.world_group) + self.compression_backend = CupyBackend() + self.bool_not_supported = required_torch_version(min_version=1.10) + + def my_igather(self, rank, size, group, sendbuf, recvbuf, root): + req = [] + if rank == root: + for idx in range(size): + if idx != rank: + req.append(dist.irecv(recvbuf[idx], src=idx, group=group)) + else: + recvbuf[rank] = sendbuf + else: + req.append(dist.isend(sendbuf, group=group, dst=root)) + return req + + def my_gather(self, rank, size, group, sendbuf, recvbuf, root): + if rank == root: + for idx in range(size): + if idx != rank: + dist.recv(recvbuf[idx], src=idx, group=group) + else: + recvbuf[rank] = sendbuf + else: + dist.send(sendbuf, group=group, dst=root) + + def compressed_allreduce(self, buffer_m: torch.tensor, worker_error, server_error, local_rank): + + # all_start_time = time.time() + original_shape = buffer_m.size() + if len(original_shape) > 1: + buffer_m = torch.flatten(buffer_m) + original_size = buffer_m.numel() + worker_error_size = worker_error.numel() + cupy.cuda.Device(local_rank).use() + + if original_size != worker_error_size: + empty_tensor = torch.zeros(worker_error_size - original_size, device=buffer_m.device) + buffer_m = torch.cat([buffer_m, empty_tensor]) + + buffer_m.add_(worker_error) + worker_scale = torch.linalg.norm(buffer_m) / np.sqrt(buffer_m.numel()) + worker_error.set_(buffer_m - worker_scale * buffer_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)) + + if self.bool_not_supported: + cupy_sign_list_packed = self.compression_backend.compress_by_chunk( + self.compression_backend.torch2cupy(buffer_m.sign_().add_(1).bool().to(dtype=torch.uint8)), self.size) + else: + cupy_sign_list_packed = self.compression_backend.compress_by_chunk( + self.compression_backend.torch2cupy(buffer_m.sign_().add_(1).bool()), self.size) + cupy_worker_scale = self.compression_backend.torch2cupy(worker_scale) + + cupy_recvbuf_sign = cupy.zeros([self.size, cupy_sign_list_packed[self.rank].size], + dtype=cupy_sign_list_packed[0].dtype) + # cupy_recvbuf_scale = cupy.zeros([self.size, 1], dtype=cupy_worker_scale.dtype) + + sign_list_packed = [ + self.compression_backend.cupy2torch(cupy_sign_list_packed[idx]) for idx in range(self.size) + ] + + # worker_scale = self.compression_backend.cupy2torch(cupy_worker_scale) + recvbuf_sign = self.compression_backend.cupy2torch(cupy_recvbuf_sign) + #recvbuf_scale = self.compression_backend.cupy2torch(cupy_recvbuf_scale) + recvbuf_scale = [ + torch.zeros(1, dtype=worker_scale.dtype, device=torch.device(get_accelerator().device_name(local_rank))) + for i in range(self.size) + ] + + # communication phase 1 + # gather_start = time.time() + # Alltoall for sign + dist.all_to_all_single(recvbuf_sign, torch.stack(sign_list_packed), group=self.world_group) + # Allgather for scale + dist.all_gather(recvbuf_scale, worker_scale, group=self.world_group) + + # gather_end = time.time() + + # cupy_sign_list_packed, sign_list_packed, cupy_worker_scale, worker_scale = None, None, None, None + cupy_sign_list_packed = None + + cupy_recvbuf_sign = self.compression_backend.torch2cupy(recvbuf_sign) + #cupy_recvbuf_scale = self.compression_backend.torch2cupy(torch.stack(recvbuf_scale)) + + compensated_server_m = self.compression_backend.cupy2torch( + (cupy.unpackbits(cupy_recvbuf_sign.flatten())).reshape(self.size, -1)).float().add_(-0.5).mul_(2.0).mul_( + torch.stack(recvbuf_scale).mul_(1 / self.size)).sum(0) + compensated_server_m.add_(server_error) + server_scale = torch.linalg.norm(compensated_server_m) / np.sqrt(compensated_server_m.numel()) + server_error.set_(compensated_server_m - + server_scale * compensated_server_m.sign().add_(1).bool().float().add_(-0.5).mul_(2.0)) + + # cupy_server_scale = self.compression_backend.torch2cupy(server_scale) + + if self.bool_not_supported: + cupy_server_sign_packed = self.compression_backend.compress_by_chunk( + self.compression_backend.torch2cupy(compensated_server_m.sign_().add_(1).bool().to(dtype=torch.uint8)), + 1) + else: + cupy_server_sign_packed = self.compression_backend.compress_by_chunk( + self.compression_backend.torch2cupy(compensated_server_m.sign_().add_(1).bool()), 1) + compensated_server_m = None + + cupy_recvbuf_sign_server = cupy.zeros([self.size, cupy_server_sign_packed[0].size], + dtype=cupy_recvbuf_sign.dtype) + # cupy_recvbuf_sign, recvbuf_sign = None, None + cupy_recvbuf_sign = None + + server_sign_packed = [self.compression_backend.cupy2torch(cupy_server_sign_packed[0])] + recvbuf_sign_server = [ + self.compression_backend.cupy2torch(cupy_recvbuf_sign_server[idx]) for idx in range(self.size) + ] + + # server_scale = self.compression_backend.cupy2torch(cupy_server_scale) + cupy_recvbuf_scale_server = cupy.zeros([self.size, 1], dtype=cupy_worker_scale.dtype) + # cupy_recvbuf_scale, recvbuf_scale = None, None + + recvbuf_scale_server = [ + self.compression_backend.cupy2torch(cupy_recvbuf_scale_server[idx]) for idx in range(self.size) + ] + + # Communication Phase 2 + dist.all_gather(recvbuf_sign_server, server_sign_packed[0], group=self.world_group) + dist.all_gather(recvbuf_scale_server, server_scale, group=self.world_group) + + cupy_server_sign_packed = None + + # need to convert from a tensor list to a single tensor + # dist.all_gather only provides a tensor list as the recv/output buffer + recvbuf_sign_server = torch.stack(recvbuf_sign_server) + + cupy_recvbuf_sign_server = self.compression_backend.torch2cupy(recvbuf_sign_server) + + buffer_m.data.copy_( + self.compression_backend.cupy2torch((cupy.unpackbits(cupy_recvbuf_sign_server.flatten())).reshape( + self.size, -1)).float().add_(-0.5).mul_(2.0).mul_( + self.compression_backend.cupy2torch(cupy_recvbuf_scale_server)).flatten().data) + if original_size != worker_error_size: + buffer_m = buffer_m[0:original_size] + if len(original_shape) > 1: + buffer_m = buffer_m.reshape(original_shape) + + return buffer_m diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/config_utils.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/config_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5522a8e79d69e57d96f005bcf7b295fedac25bfc --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/config_utils.py @@ -0,0 +1,205 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Collection of DeepSpeed configuration utilities +""" +import json +import collections +import collections.abc +from functools import reduce +from deepspeed.pydantic_v1 import BaseModel +from deepspeed.utils import logger + + +class DeepSpeedConfigModel(BaseModel): + """ + This class should be used as a base for all DeepSpeed configs. It extends + pydantic.BaseModel to allow for deprecated fields. To enable this feature, + add deprecated=True to pydantic.Field: + + my_dep_field: int = Field(0, deprecated=True) + + Deprecated Field kwargs: + - deprecated: [True|False], default False + Enables / Disables deprecated fields + - deprecated_msg: str, default "" + Message to include with deprecation warning + - new_param: str, default "" + Name of the field replacing the deprecated field + - set_new_param: [True|False], default True + If new_param is provided, enables setting the value of that param with + deprecated field value + - new_param_fn: callable, default (lambda x: x) + If new_param is provided and set_new_param is True, this function will + modify the value of the deprecated field before placing that value in + the new_param field + + Example: + my_new_field is replacing a deprecated my_old_field. The expected type + for my_new_field is int while the expected type for my_old_field is + str. We want to maintain backward compatibility with our configs, so we + define the fields with: + + class MyExampleConfig(DeepSpeedConfigModel): + my_new_field: int = 0 + my_old_field: str = Field('0', + deprecated=True, + new_param='my_new_field', + new_param_fn=(lambda x: int(x))) + """ + + def __init__(self, strict=False, **data): + if (not strict): # This is temporary until we refactor all DS configs, allows HF to load models + data = {k: v for k, v in data.items() if (v != "auto" or k == "replace_method")} + super().__init__(**data) + self._deprecated_fields_check(self) + + def _process_deprecated_field(self, pydantic_config, field): + # Get information about the deprecated field + fields_set = pydantic_config.__fields_set__ + dep_param = field.name + kwargs = field.field_info.extra + new_param_fn = kwargs.get("new_param_fn", lambda x: x) + param_value = new_param_fn(getattr(pydantic_config, dep_param)) + new_param = kwargs.get("new_param", "") + dep_msg = kwargs.get("deprecated_msg", "") + if dep_param in fields_set: + logger.warning(f"Config parameter {dep_param} is deprecated" + + (f" use {new_param} instead" if new_param else "") + (f". {dep_msg}" if dep_msg else "")) + # Check if there is a new param and if it should be set with a value + if new_param and kwargs.get("set_new_param", True): + # Remove the deprecate field if there is a replacing field + try: + delattr(pydantic_config, dep_param) + except Exception as e: + logger.error(f"Tried removing deprecated '{dep_param}' from config") + raise e + + # Set new param value + new_param_nested = new_param.split(".") + if len(new_param_nested) > 1: + # If the new param exists in a subconfig, we need to get + # the fields set for that subconfig + pydantic_config = reduce(getattr, new_param_nested[:-1], pydantic_config) + fields_set = pydantic_config.__fields_set__ + new_param_name = new_param_nested[-1] + assert ( + new_param_name not in fields_set + ), f"Cannot provide deprecated parameter '{dep_param}' and replacing parameter '{new_param}' together" + # A custom function for converting the old param value to new param value can be provided + try: + setattr(pydantic_config, new_param_name, param_value) + except Exception as e: + logger.error(f"Tried setting value for '{new_param}' with value from deprecated '{dep_param}'") + raise e + + def _deprecated_fields_check(self, pydantic_config): + fields = pydantic_config.__fields__ + for field in fields.values(): + if field.field_info.extra.get("deprecated", False): + self._process_deprecated_field(pydantic_config, field) + + class Config: + validate_all = True + validate_assignment = True + use_enum_values = True + allow_population_by_field_name = True + extra = "forbid" + arbitrary_types_allowed = True + + +def get_config_default(config, field_name): + assert field_name in config.__fields__, f"'{field_name}' is not a field in {config}" + assert not config.__fields__.get( + field_name).required, f"'{field_name}' is a required field and does not have a default value" + return config.__fields__.get(field_name).default + + +class pp_int(int): + """ + A wrapper for integers that will return a custom string or comma-formatted + string of the integer. For example, print(pp_int(1e5)) will return + "10,000". This is useful mainly for auto-generated documentation purposes. + """ + + def __new__(cls, val, custom_print_str=None): + inst = super().__new__(cls, val) + inst.custom_print_str = custom_print_str + return inst + + def __repr__(self): + if self.custom_print_str: + return self.custom_print_str + return f"{self.real:,}" + + +# adapted from https://stackoverflow.com/a/50701137/9201239 +class ScientificNotationEncoder(json.JSONEncoder): + """ + This class overrides ``json.dumps`` default formatter. + + This version keeps everything as normal except formats numbers bigger than 1e3 using scientific notation. + + Just pass ``cls=ScientificNotationEncoder`` to ``json.dumps`` to activate it + + """ + + def iterencode(self, o, _one_shot=False, level=0): + indent = self.indent if self.indent is not None else 4 + prefix_close = " " * level * indent + level += 1 + prefix = " " * level * indent + if isinstance(o, bool): + return "true" if o else "false" + elif isinstance(o, float) or isinstance(o, int): + if o > 1e3: + return f"{o:e}" + else: + return f"{o}" + elif isinstance(o, collections.abc.Mapping): + x = [f'\n{prefix}"{k}": {self.iterencode(v, level=level)}' for k, v in o.items()] + return "{" + ", ".join(x) + f"\n{prefix_close}" + "}" + elif isinstance(o, collections.abc.Sequence) and not isinstance(o, str): + return f"[{ f', '.join(map(self.iterencode, o)) }]" + return "\n, ".join(super().iterencode(o, _one_shot)) + + +class DeepSpeedConfigObject(object): + """ + For json serialization + """ + + def repr(self): + return self.__dict__ + + def __repr__(self): + return json.dumps( + self.__dict__, + sort_keys=True, + indent=4, + cls=ScientificNotationEncoder, + ) + + +def get_scalar_param(param_dict, param_name, param_default_value): + return param_dict.get(param_name, param_default_value) + + +def get_list_param(param_dict, param_name, param_default_value): + return param_dict.get(param_name, param_default_value) + + +def get_dict_param(param_dict, param_name, param_default_value): + return param_dict.get(param_name, param_default_value) + + +def dict_raise_error_on_duplicate_keys(ordered_pairs): + """Reject duplicate keys.""" + d = dict((k, v) for k, v in ordered_pairs) + if len(d) != len(ordered_pairs): + counter = collections.Counter([pair[0] for pair in ordered_pairs]) + keys = [key for key, value in counter.items() if value > 1] + raise ValueError("Duplicate keys in DeepSpeed config: {}".format(keys)) + return d diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..247ec6dd796c322839da61c1eefb12f39ff4c7df Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..32848d2be41ba6f8ffc3d3b14e329642b482d374 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..909d44f349da13bbe728f6eea678c5b84c6cc808 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/curriculum_scheduler.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/curriculum_scheduler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b179a497003e43177eb8f1579827737346f57419 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/__pycache__/curriculum_scheduler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5067f71c8faf166bc78e88f9b62e8627dda7c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bf6f047de7457c36fe8368fdbed85b864d92965 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/basic_layer.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/basic_layer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..577b93548c80ca062aaf6ae60998343a1196bb1a Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/basic_layer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/helper.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/helper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bc7e9852ac8d577cce928bd62ceb21010434603 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/helper.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/scheduler.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/scheduler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f632c5e73c31187afefed545d5e08fcb817df0e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/scheduler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f176f58731a6c01e73a4f8e056dec5f3e3d1708 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/basic_layer.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/basic_layer.py new file mode 100644 index 0000000000000000000000000000000000000000..780a88c3d5a2fb016f2c59bbe3a88fbaff6499b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/basic_layer.py @@ -0,0 +1,113 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.utils import logger +from torch import Tensor +from torch.nn import Module +from ..constants import * +from deepspeed.ops.random_ltd.dropping_utils import gpt_sample_tokens, bert_sample_tokens, GatherTokens, ScatterTokens + + +#####based on the paper random-ltd: https://arxiv.org/abs/2211.11586 +class RandomLayerTokenDrop(Module): + """ + A layer wrapper for random LTD + """ + + def __init__(self, layer: Module): + super(RandomLayerTokenDrop, self).__init__() + self.random_ltd_layer = layer + self.reserved_length = None #config['max_value'] + self.random_ltd_scheduler = None + self.max_length = None + self.reserved_length = -1 + self.curr_seq = -1 + self.batch_first = False + + def init_config(self, config, scheduler, random_ltd_layer_id): + self.random_ltd_scheduler = scheduler + self.random_ltd_layer_id = random_ltd_layer_id + self.max_length = self.random_ltd_scheduler.state[RANDOM_LTD_MAX_VALUE] + + self.mask_name = config[RANDOM_LTD_MODEL_MASK_NAME] + self.micro_bs = config[RANDOM_LTD_MICRO_BATCH_SIZE] + self.random_ltd_num_layer = self.random_ltd_scheduler.random_ltd_layer_num + hs_order = config[RANDOM_LTD_HIDDEN_STATE_ORDER] + self.model_type = config[RANDOM_LTD_MODEL_TYPE] + + if hs_order == 'batch_seq_dim': + self.get_hidden_tensor_shape = self.get_bsh + self.batch_first = True + elif hs_order == 'seq_batch_dim': + self.get_hidden_tensor_shape = self.get_sbh + self.batch_first = False + else: + logger.warning( + "************For now, we only support batch_seq_dim or seq_batch_dim inputs. You can easily \ + your own input dimension orders************") + raise NotImplementedError + + if self.model_type == 'encoder': + self.index_generator = bert_sample_tokens + elif self.model_type == 'decoder': + self.index_generator = gpt_sample_tokens + else: + logger.warning("************For now, we only support encoder-only or decoder-only models************") + raise NotImplementedError + + def get_bsh(self, hidden_stats): + self.curr_seq, self.curr_micro_batch = hidden_stats.size()[1], hidden_stats.size()[0] + + def get_sbh(self, hidden_stats): + self.curr_seq, self.curr_micro_batch = hidden_stats.size()[0], hidden_stats.size()[1] + + def forward(self, hidden_states, **kwargs) -> Tensor: + if self.random_ltd_scheduler is not None: + self.reserved_length = self.random_ltd_scheduler.get_current_seq() + self.get_hidden_tensor_shape(hidden_states) + if self.training and self.random_ltd_scheduler is not None and self.reserved_length < self.curr_seq: + if self.mask_name is not None: + mask = kwargs[self.mask_name] + else: + mask = None + if self.random_ltd_layer_id == 0: + sampled_indices, part_attention_mask = self.index_generator(self.reserved_length,\ + self.curr_seq, \ + self.curr_micro_batch, \ + self.random_ltd_num_layer, \ + hidden_states.device, mask) + self.random_ltd_scheduler.state[RANDOM_LTD_SAMPLE_INDEX] = sampled_indices + self.random_ltd_scheduler.state[RANDOM_LTD_ATTENTION_MASK] = part_attention_mask + else: + sampled_indices = self.random_ltd_scheduler.state[RANDOM_LTD_SAMPLE_INDEX] + part_attention_mask = self.random_ltd_scheduler.state[RANDOM_LTD_ATTENTION_MASK] + + hidden_states, part_hidden_states = GatherTokens.apply(hidden_states, + sampled_indices[self.random_ltd_layer_id, :, :], + self.batch_first) + if self.mask_name is not None: + if self.model_type == 'encoder': + kwargs[self.mask_name] = part_attention_mask[self.random_ltd_layer_id] + else: + kwargs[self.mask_name] = part_attention_mask + + outputs = self.random_ltd_layer(part_hidden_states, **kwargs) + + if isinstance(outputs, tuple): + hidden_states = ScatterTokens.apply(hidden_states, outputs[0], + sampled_indices[self.random_ltd_layer_id, :, :], self.batch_first) + my_list = list(outputs) + my_list[0] = hidden_states + return tuple(my_list) + elif isinstance(outputs, Tensor): + hidden_states = ScatterTokens.apply(hidden_states, outputs, + sampled_indices[self.random_ltd_layer_id, :, :], self.batch_first) + return hidden_states + else: + logger.warning("************For now, we only support tuple and tensor output. \ + You need to adjust the output according to the layer in your model************") + raise NotImplementedError + else: + return self.random_ltd_layer(hidden_states, **kwargs) diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/helper.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/helper.py new file mode 100644 index 0000000000000000000000000000000000000000..150182d77bcfda20b1aa1aabd4b8785542ca9d1b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/helper.py @@ -0,0 +1,46 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .basic_layer import RandomLayerTokenDrop +from collections import OrderedDict +from deepspeed.compression.helper import recursive_getattr, recursive_setattr + + +def convert_to_random_ltd(model, convert_type): + if hasattr(model, 'module'): + c_model = model.module + else: + c_model = model + + for name, module in c_model.named_modules(): + + if isinstance(module, convert_type): + old_module = recursive_getattr(c_model, name) + new_module = RandomLayerTokenDrop(old_module) + recursive_setattr(c_model, name, new_module) + + model.random_ltd_initialize() + return model + + +def save_without_random_ltd(model): + if hasattr(model, 'module'): + c_model = model.module + else: + c_model = model + + model_dic = c_model.state_dict() + return remove_random_ltd_state_dict(model_dic) + + +def remove_random_ltd_state_dict(state_dict): + new_state_dict = OrderedDict() + for key, value in state_dict.items(): + if '.random_ltd_layer' in key: + new_key = ''.join(key.split('.random_ltd_layer')) + else: + new_key = key + new_state_dict[new_key] = value + return new_state_dict diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/scheduler.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..8a0b031d4f633976f438d5151973e58afa77712e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/scheduler.py @@ -0,0 +1,107 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import math + +from deepspeed.utils import logger +# from deepspeed.runtime.lr_schedules import WarmupLR +from ..constants import * + +#####based on the paper random-ltd: https://arxiv.org/abs/2211.11586 + + +class BaseScheduler(object): + + def __init__(self): + self.state = {} + + def __fixed_root_get_value(self, global_steps, root_degree=None): + s_state = self.state[RANDOM_LTD_SCHEDULE_CONFIG] + if root_degree is None: + root_degree = s_state['root_degree'] + next_seq = (float(global_steps) / s_state[RANDOM_LTD_REQUIRE_STEP])**(1.0 / root_degree) + next_seq = math.floor(next_seq * (self.state[RANDOM_LTD_MAX_VALUE] - self.state[RANDOM_LTD_MIN_VALUE]) + + self.state[RANDOM_LTD_MIN_VALUE]) + next_seq -= (next_seq % s_state[RANDOM_LTD_INCREASE_STEP]) + next_seq = min(next_seq, self.state[RANDOM_LTD_MAX_VALUE]) + return next_seq + + def get_value(self, global_steps): + if self.state[RANDOM_LTD_SCHEDULER_TYPE] == 'fixed_linear': + return self.__fixed_root_get_value(global_steps, 1) + else: + raise RuntimeError('Unsupported random LTD schedule type') + + +class RandomLTDScheduler(BaseScheduler): + + def __init__(self, config): + super().__init__() + self.model_layer_num = config[RANDOM_LTD_TOTAL_LAYER_NUM] + self.random_ltd_layer_num = config[RANDOM_LTD_LAYER_NUM] + self.config_schedule = config[RANDOM_LTD_SCHEDULER] + self.global_batch_size = config[RANDOM_LTD_GLOBAL_BATCH_SIZE] + self.reset_to_init() + + if config[RANDOM_LTD_LAYER_TOKEN_LR_SCHEDULE][RANDOM_LTD_LAYER_TOKEN_LR_ENABLED]: + logger.warning("**********Work In Progress************") + raise NotImplementedError + + self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] = 0 + + # self.first_step = True + def get_total_layer_tokens(self, train_iters): + for step in range(train_iters): + self.update_seq(step) + return self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] + + def reset_to_init(self): + if self.config_schedule is not None: + self.state[RANDOM_LTD_MIN_VALUE] = self.config_schedule[RANDOM_LTD_MIN_VALUE] + self.state[RANDOM_LTD_MAX_VALUE] = self.config_schedule[RANDOM_LTD_MAX_VALUE] + self.state[RANDOM_LTD_CURRENT_VALUE] = self.config_schedule[RANDOM_LTD_MIN_VALUE] + self.state[RANDOM_LTD_SCHEDULE_CONFIG] = self.config_schedule[RANDOM_LTD_SCHEDULE_CONFIG] + self.state[RANDOM_LTD_SCHEDULER_TYPE] = self.config_schedule[RANDOM_LTD_SCHEDULER_TYPE] + self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] = 0 + self.state[RANDOM_LTD_CURR_STEP] = -1 + + def get_current_seq(self): + return self.state[RANDOM_LTD_CURRENT_VALUE] + + def set_current_seq(self, seq_length): + self.state[RANDOM_LTD_CURRENT_VALUE] = seq_length + + def get_random_ltd_layer_num(self): + return self.random_ltd_layer_num + + def get_state(self): + return self.state + + def set_state(self, state): + self.state = state + + def update_seq(self, global_steps): + if self.state[RANDOM_LTD_CURRENT_VALUE] < self.state[RANDOM_LTD_MAX_VALUE]: + self.state[RANDOM_LTD_CURRENT_VALUE] = self.get_value(global_steps) + if global_steps != self.state[RANDOM_LTD_CURR_STEP]: + self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] += self.global_batch_size*(self.state[RANDOM_LTD_CURRENT_VALUE] * self.random_ltd_layer_num \ + + self.state[RANDOM_LTD_MAX_VALUE] * (self.model_layer_num - self.random_ltd_layer_num)) + self.state[RANDOM_LTD_CURR_STEP] = global_steps + + def state_dict(self): + return { + RANDOM_LTD_CONSUMED_LAYER_TOKENS: self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS], + RANDOM_LTD_CURR_STEP: self.state[RANDOM_LTD_CURR_STEP], + RANDOM_LTD_CURRENT_VALUE: self.state[RANDOM_LTD_CURRENT_VALUE], + RANDOM_LTD_MIN_VALUE: self.state[RANDOM_LTD_MIN_VALUE], + RANDOM_LTD_MAX_VALUE: self.state[RANDOM_LTD_MAX_VALUE], + } + + def load_state_dict(self, state_dict): + self.state[RANDOM_LTD_CONSUMED_LAYER_TOKENS] = state_dict[RANDOM_LTD_CONSUMED_LAYER_TOKENS] + self.state[RANDOM_LTD_CURR_STEP] = state_dict[RANDOM_LTD_CURR_STEP] + self.state[RANDOM_LTD_CURRENT_VALUE] = state_dict[RANDOM_LTD_CURRENT_VALUE] + self.state[RANDOM_LTD_MIN_VALUE] = state_dict[RANDOM_LTD_MIN_VALUE] + self.state[RANDOM_LTD_MAX_VALUE] = state_dict[RANDOM_LTD_MAX_VALUE] diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/utils.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..afcfef2ef4dc04279b7fa3ee49916519f33ffbec --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_routing/utils.py @@ -0,0 +1,27 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + + +def bsh_decoder_gather(reserved_length, hidden_states, mask): + # random-layer-token-drop + rand_list = [] + part_hidden_states = [] # batch, seq, hidden ## different from megatron + for k in range(hidden_states.size(0)): + B_tmp = torch.randperm(hidden_states.size(1), device=hidden_states.device)[:reserved_length] + B = B_tmp.sort()[0] + rand_list.append(B) + part_hidden_states.append(hidden_states[k:k + 1, B, :]) + + part_hidden_states = torch.cat(part_hidden_states, dim=0) + part_mask = mask[:, :, :reserved_length, :reserved_length] + return part_hidden_states, rand_list, part_mask + + +def bsh_decoder_scatter(hidden_states, part_hidden_states, rand_list): + for k in range(hidden_states.size(0)): + hidden_states[k, rand_list[k], :] = part_hidden_states[k, :, :] + return hidden_states diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bae94d7ae40948dfb217b4fd81f2b30909de79f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/data_analyzer.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/data_analyzer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a840f79e0507c2046b4cb501dd4c80c0a97cb24 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/data_analyzer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/data_sampler.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/data_sampler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7db536150b617fd887f35d6948aa2318554c8f8f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/data_sampler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/indexed_dataset.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/indexed_dataset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4360e34423673953cc23405f72f880c0f7734041 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/indexed_dataset.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f25003d2e9d1c68ec895992f533165cc0930484 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/data_pipeline/data_sampling/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/dataloader.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/dataloader.py new file mode 100644 index 0000000000000000000000000000000000000000..499473b4ced81ba5ae5a447e32e03a2971b63c8f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/dataloader.py @@ -0,0 +1,162 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from torch.utils.data import DataLoader, RandomSampler +from torch.utils.data.distributed import DistributedSampler +from deepspeed.accelerator import get_accelerator + +from deepspeed.runtime.data_pipeline.data_sampling.data_sampler import DeepSpeedDataSampler +from deepspeed.runtime.data_pipeline.constants import CURRICULUM_LEARNING, \ + DATA_EFFICIENCY, DATA_SAMPLING_NUM_WORKERS +from deepspeed.runtime.constants import GRADIENT_ACCUMULATION_STEPS, \ + DATA_PARALLEL_GROUP, GLOBAL_RANK + + +class RepeatingLoader: + + def __init__(self, loader): + """Wraps an iterator to allow for infinite iteration. This is especially useful + for DataLoader types that we wish to automatically restart upon completion. + + Args: + loader (iterator): The data loader to repeat. + """ + self.loader = loader + self.data_iter = iter(self.loader) + + def __iter__(self): + return self + + def __next__(self): + try: + batch = next(self.data_iter) + except StopIteration: + self.data_iter = iter(self.loader) + batch = next(self.data_iter) + return batch + + +class DeepSpeedDataLoader(object): + + def __init__(self, + dataset, + batch_size, + pin_memory, + local_rank, + tput_timer, + collate_fn=None, + num_local_io_workers=None, + data_sampler=None, + data_parallel_world_size=None, + data_parallel_rank=None, + dataloader_drop_last=False, + deepspeed_dataloader_config={}): + self.deepspeed_dataloader_config = deepspeed_dataloader_config + self.tput_timer = tput_timer + self.batch_size = batch_size + self.curriculum_learning_enabled = False + if CURRICULUM_LEARNING in deepspeed_dataloader_config: + self.curriculum_learning_enabled = deepspeed_dataloader_config[CURRICULUM_LEARNING] + + if self.curriculum_learning_enabled: + data_sampler = DeepSpeedDataSampler(self.deepspeed_dataloader_config[DATA_EFFICIENCY], + len(dataset), + self.batch_size, + data_parallel_rank, + data_parallel_world_size, + self.deepspeed_dataloader_config[DATA_PARALLEL_GROUP], + self.deepspeed_dataloader_config[GRADIENT_ACCUMULATION_STEPS], + self.deepspeed_dataloader_config[GLOBAL_RANK], + drop_last=dataloader_drop_last) + device_count = get_accelerator().device_count() + num_local_io_workers = self.deepspeed_dataloader_config[DATA_SAMPLING_NUM_WORKERS] + else: + if local_rank >= 0: + if data_sampler is None: + data_sampler = DistributedSampler(dataset=dataset, + num_replicas=data_parallel_world_size, + rank=data_parallel_rank) + device_count = 1 + else: + if data_sampler is None: + data_sampler = RandomSampler(dataset) + device_count = get_accelerator().device_count() + batch_size *= device_count + + if num_local_io_workers is None: + num_local_io_workers = 2 * device_count + + self.num_local_io_workers = num_local_io_workers + self.data_sampler = data_sampler + self.dataset = dataset + self.collate_fn = collate_fn + self.device_count = device_count + self.batch_size = batch_size + self.pin_memory = pin_memory + self.data = None + self.dataloader_drop_last = dataloader_drop_last + self.post_process_func = None + + if self.dataloader_drop_last: + self.len = len(self.data_sampler) // self.batch_size + else: + from math import ceil + self.len = ceil(len(self.data_sampler) / self.batch_size) + + def __iter__(self): + self._create_dataloader() + return self + + def __len__(self): + return self.len + + def __next__(self): + if self.tput_timer: + self.tput_timer.start() + if self.curriculum_learning_enabled: + data = next(self.data_iterator) + if self.post_process_func is not None: + data = self.post_process_func(data, self.data_sampler.state_dict()) + return data + else: + return next(self.data) + + def _create_dataloader(self): + if self.curriculum_learning_enabled: + if self.collate_fn is None: + self.dataloader = DataLoader(self.dataset, + pin_memory=self.pin_memory, + batch_sampler=self.data_sampler, + num_workers=self.num_local_io_workers) + else: + self.dataloader = DataLoader(self.dataset, + pin_memory=self.pin_memory, + batch_sampler=self.data_sampler, + collate_fn=self.collate_fn, + num_workers=self.num_local_io_workers) + self.data_iterator = iter(self.dataloader) + return self.dataloader + else: + if self.collate_fn is None: + self.dataloader = DataLoader(self.dataset, + batch_size=self.batch_size, + pin_memory=self.pin_memory, + sampler=self.data_sampler, + num_workers=self.num_local_io_workers, + drop_last=self.dataloader_drop_last) + else: + self.dataloader = DataLoader(self.dataset, + batch_size=self.batch_size, + pin_memory=self.pin_memory, + sampler=self.data_sampler, + collate_fn=self.collate_fn, + num_workers=self.num_local_io_workers, + drop_last=self.dataloader_drop_last) + self.data = (x for x in self.dataloader) + + return self.dataloader + + +# DataLoader([(torch.randn(3, 3), torch.tensor(i % 2)) for i in range(10)], batch_size=2)) diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/eigenvalue.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/eigenvalue.py new file mode 100644 index 0000000000000000000000000000000000000000..df63854dd1ca05610fca7fc454eb30c46c19d4b3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/eigenvalue.py @@ -0,0 +1,149 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from deepspeed.utils import log_dist +import numpy as np +import logging + + +class Eigenvalue(object): + + def __init__(self, + verbose=False, + max_iter=100, + tol=1e-2, + stability=0, + gas_boundary_resolution=1, + layer_name='', + layer_num=0): + super().__init__() + + self.verbose = verbose + self.max_iter = max_iter + self.tol = tol + self.stability = stability + self.gas_boundary_resolution = gas_boundary_resolution + self.layer_name = layer_name + self.layer_num = layer_num + + assert len(self.layer_name) > 0 and layer_num > 0 + + log_dist( + f'enabled eigenvalue with verbose={verbose}, max_iter={max_iter}, tol={tol}, stability={stability}, gas_boundary_resolution={gas_boundary_resolution}, layer_name={layer_name}, layer_num={layer_num}', + ranks=[0]) + + # Replace all nan/pos-inf/neg-inf to zero + # TODO: Pytorch new version may add this function, replace this one by then. + def nan_to_num(self, x): + device = x.device + x = x.cpu().numpy() + x = np.nan_to_num(x=x, copy=False, nan=0.0, posinf=0.0, neginf=0.0) + return torch.from_numpy(x).to(device) + + def normalize(self, v): + norm_squared = self.inner_product(v, v) + norm = norm_squared**0.5 + self.stability + normalized_vectors = [vector / norm for vector in v] + normalized_vectors = [self.nan_to_num(vector) for vector in normalized_vectors] + return normalized_vectors + + def inner_product(self, xs, ys): + return sum([torch.sum(x * y) for (x, y) in zip(xs, ys)]) + + def get_layers(self, module): + scope_names = self.layer_name.split('.') + assert len(scope_names) > 0 + + m = module + for name in scope_names: + assert hasattr(m, name), "layer_name configuration is invalid." + m = getattr(m, name) + + return m + + def compute_eigenvalue(self, module, device=None, scale=1.0): + block_eigenvalue = [] + param_keys = [] + layers = self.get_layers(module) + + for block in range(self.layer_num): + model_block = layers[block] + + # We found this randn() has obvious accuracy impact in some cases, save/recover random state here. + rng_state = torch.random.get_rng_state() + if device is None: + v = [ + torch.randn(p.size()) for p in model_block.parameters() + if p.grad is not None and p.grad.grad_fn is not None + ] + else: + v = [ + torch.randn(p.size(), device=device) for p in model_block.parameters() + if p.grad is not None and p.grad.grad_fn is not None + ] + torch.random.set_rng_state(rng_state) + + grads = [ + param.grad for param in model_block.parameters() + if param.grad is not None and param.grad.grad_fn is not None + ] + params = [ + param for param in model_block.parameters() + if param.grad is not None and param.grad.grad_fn is not None + ] + + layer_keys = [id(p) for p in model_block.parameters()] + param_keys.append(layer_keys) + + v = self.normalize(v) + + # Disable eigenvalue if the model doesn't support second order gradients computation, + # e.g. when enabling DS transformer kernel. + if len(grads) == 0 or len(params) == 0: + log_dist(f'The model does NOT support eigenvalue computation.', ranks=[0], level=logging.WARNING) + return [] + + i = 0 + eigenvalue_current, eigenvalue_previous = 1., 0. + + while (i < self.max_iter) and abs(eigenvalue_current) > 0 and (abs( + (eigenvalue_current - eigenvalue_previous) / eigenvalue_current) >= + self.tol): # test convergence criteria + eigenvalue_previous = eigenvalue_current + + Hv = torch.autograd.grad(grads, params, grad_outputs=v, only_inputs=True, retain_graph=True) + #Hv = [hv.float() for hv in Hv] + Hv = [self.nan_to_num(hv).float() for hv in Hv] + + eigenvalue_current = self.inner_product(Hv, v).item() + + v = self.normalize(Hv) + v = [x / scale for x in v] + i += 1 + + eigenvalue_current *= scale + block_eigenvalue.append(eigenvalue_current) + + if self.verbose: + log_dist(f'block: {block}, power iteration: {i}, eigenvalue: {eigenvalue_current}', ranks=[0]) + + block_eigenvalue = self.post_process(block_eigenvalue) + + if self.verbose: + log_dist(f'post processed block_eigenvalue: {block_eigenvalue}', ranks=[0]) + + # {param_id: (eigenvalue, layer_id)} + ev_dict = {} + for i, (layer_keys, value) in enumerate(zip(param_keys, block_eigenvalue)): + ev_dict.update(dict.fromkeys(layer_keys, (value, i))) + + return ev_dict + + # 1. Map all eigenvalues to [0, 1.0]. + # 2. Some layers can't generate valid eigenvalues on fp16 precision, use 1.0 instead. + def post_process(self, value_list): + max_value = abs(max(value_list, key=abs)) + return [abs(v) / max_value if v != 0.0 else 1.0 for v in value_list] diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5067f71c8faf166bc78e88f9b62e8627dda7c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..187bf32efca7dc713180520243cb7767fcb63cda Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/fused_optimizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/fused_optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96080c018f9b764f712ff3aa674b961c565c1bc9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/fused_optimizer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/loss_scaler.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/loss_scaler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63064050e90b156407a7d60dbeadeaeedb76675b Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/loss_scaler.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/unfused_optimizer.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/unfused_optimizer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1637d0f1b112e5b8e03b30ffabdf57218dd52b08 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/__pycache__/unfused_optimizer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/fused_optimizer.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/fused_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..bf1693307ea780a3790f68629b5dd65cd3d05a66 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/fused_optimizer.py @@ -0,0 +1,514 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Copyright NVIDIA/apex +This file is adapted from FP16_Optimizer in NVIDIA/apex +""" + +import torch +from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors +from deepspeed.runtime.base_optimizer import DeepSpeedOptimizer +from deepspeed.runtime.utils import get_global_norm, get_flattened_grad_norm, CheckOverflow, get_weight_norm, get_norm_with_moe_layers, is_model_parallel_parameter +from deepspeed.runtime.fp16.loss_scaler import INITIAL_LOSS_SCALE, SCALE_WINDOW, MIN_LOSS_SCALE +from deepspeed.utils import logger, log_dist +from deepspeed.utils.torch import required_torch_version +from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT, CLIP_GRAD +from deepspeed.accelerator import get_accelerator +from deepspeed.moe.utils import is_moe_param_group +from deepspeed.runtime.constants import PIPE_REPLICATED +from deepspeed.utils.bwc import bwc_tensor_model_parallel_rank + +OVERFLOW_CHECK_TIMER = 'overflow_check' +COMPUTE_NORM_TIMER = 'compute_norm' +UNSCALE_AND_CLIP_TIMER = 'unscale_and_clip' +BASIC_STEP_TIMER = 'basic_step' +UPDATE_FP16_TIMER = 'update_fp16' + +OVERFLOW_TIMERS = [COMPUTE_NORM_TIMER, OVERFLOW_CHECK_TIMER] +STEP_TIMERS = OVERFLOW_TIMERS + [UNSCALE_AND_CLIP_TIMER, BASIC_STEP_TIMER, UPDATE_FP16_TIMER] + + +class FP16_Optimizer(DeepSpeedOptimizer): + """ + FP16 Optimizer for training fp16 models. Handles loss scaling. + + For usage example please see, TODO: DeepSpeed V2 Tutorial + """ + + def __init__(self, + init_optimizer, + deepspeed=None, + static_loss_scale=1.0, + dynamic_loss_scale=False, + initial_dynamic_scale=2**32, + dynamic_loss_args=None, + verbose=True, + mpu=None, + clip_grad=0.0, + fused_adam_legacy=False, + has_moe_layers=False, + timers=None): + + self.fused_adam_legacy = fused_adam_legacy + self.timers = timers + self.deepspeed = deepspeed + self.has_moe_layers = has_moe_layers + self.using_pipeline = self.deepspeed.pipeline_parallelism + if not get_accelerator().is_available(): + raise SystemError("Cannot use fp16 without accelerator.") + self.optimizer = init_optimizer + + # param flattened by groups + self.fp16_groups = [] + self.fp16_groups_flat = [] + self.fp32_groups_flat = [] + + self.flatten_grad_norm_mask_list = [] + self.has_executed_step = False + self._global_grad_norm = 0. + + # loop to deal with groups + for i, param_group in enumerate(self.optimizer.param_groups): + # push this group to list before modify + self.fp16_groups.append(param_group['params']) + # init fp16 weight buffer, flattened + self.fp16_groups_flat.append(_flatten_dense_tensors([p.clone().detach() for p in self.fp16_groups[i]])) + # set model fp16 weight to slices of flattened buffer + updated_params = _unflatten_dense_tensors(self.fp16_groups_flat[i], self.fp16_groups[i]) + for p, q in zip(self.fp16_groups[i], updated_params): + p.data = q.data + # init master weight, flattened + self.fp32_groups_flat.append(self.fp16_groups_flat[i].clone().float().detach()) + # modify optimizer of have flat master weight + self.fp32_groups_flat[i].requires_grad = True # keep this in case internal optimizer uses it + param_group['params'] = [self.fp32_groups_flat[i]] + + # we may have a way of fusing dynamic scale. Do not support for now + if dynamic_loss_scale: + self.dynamic_loss_scale = True + self.cur_iter = 0 + self.last_overflow_iter = -1 + self.scale_factor = 2 + + if dynamic_loss_args is None: + self.cur_scale = initial_dynamic_scale + self.scale_window = 1000 + self.min_loss_scale = 1 + else: + self.cur_scale = dynamic_loss_args[INITIAL_LOSS_SCALE] + self.scale_window = dynamic_loss_args[SCALE_WINDOW] + self.min_loss_scale = dynamic_loss_args[MIN_LOSS_SCALE] + else: + self.dynamic_loss_scale = False + self.cur_iter = 0 + self.cur_scale = static_loss_scale + self.verbose = verbose + + self.custom_loss_scaler = False + self.external_loss_scale = None + + self.clip_grad = clip_grad + self.norm_type = 2 + + if required_torch_version(max_version=0.4): + self.clip_grad_norm = torch.nn.utils.clip_grad_norm + else: + self.clip_grad_norm = torch.nn.utils.clip_grad_norm_ + + #model parallel object + self.mpu = mpu + + self.overflow = False + self.overflow_checker = CheckOverflow(self.fp16_groups, mpu=self.mpu, deepspeed=deepspeed) + self.initialize_optimizer_states() + + def initialize_optimizer_states(self): + for i, group in enumerate(self.fp16_groups): + self.fp32_groups_flat[i].grad = torch.zeros(self.fp32_groups_flat[i].size(), + device=self.fp32_groups_flat[i].device) + + self.optimizer.step() + + for i, group in enumerate(self.fp16_groups): + self.fp32_groups_flat[i].grad = None + + return + + def zero_grad(self, set_to_none=True): + """ + Zero FP16 parameter grads. + """ + # For speed, set model fp16 grad to None by default + for group in self.fp16_groups: + for p in group: + if set_to_none: + p.grad = None + else: + if p.grad is not None: + p.grad.detach_() + p.grad.zero_() + + def step_fused_adam(self, closure=None): + """ + Not supporting closure. + """ + + # First compute norm for all group so we know if there is overflow + grads_groups_flat = [] + norm_groups = [] + for i, group in enumerate(self.fp16_groups): + grads_groups_flat.append( + _flatten_dense_tensors([ + torch.zeros(p.size(), dtype=p.dtype, device=p.device) if p.grad is None else p.grad for p in group + ])) + norm_groups.append(get_weight_norm(grads_groups_flat[i], mpu=self.mpu)) + + self.overflow = self.overflow_checker.check_using_norm(norm_groups) + prev_scale = self.cur_scale + self._update_scale(self.overflow) + + if self.overflow: + if self.verbose: + logger.info("[deepspeed] fp16 dynamic loss scale overflow! Skipping step. Attempted loss " + "scale: {}, reducing to {}".format(prev_scale, self.cur_scale)) + return self.overflow + + scaled_grad_norm = get_global_norm(norm_list=norm_groups) + + combined_scale = self.unscale_and_clip_grads(grads_groups_flat, scaled_grad_norm, apply_scale=False) + + # Stash unscaled gradient norm + self._global_grad_norm = scaled_grad_norm / self.cur_scale + + # norm is in fact norm*cur_scale + self.optimizer.step(grads=[[g] for g in grads_groups_flat], + output_params=[[p] for p in self.fp16_groups_flat], + scale=combined_scale, + grad_norms=norm_groups) + # TODO: we probably don't need this? just to be safe + for i in range(len(norm_groups)): + updated_params = _unflatten_dense_tensors(self.fp16_groups_flat[i], self.fp16_groups[i]) + for p, q in zip(self.fp16_groups[i], updated_params): + p.data = q.data + return self.overflow + + def set_lr(self, lr): + """Set the learning rate.""" + for param_group in self.optimizer.param_groups: + param_group["lr"] = lr + + def get_lr(self): + """Return the current learning rate.""" + return self.optimizer.param_groups[0]["lr"] + + def override_loss_scale(self, loss_scale): + if loss_scale != self.external_loss_scale: + logger.info(f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}') + self.custom_loss_scaler = True + self.external_loss_scale = loss_scale + + def _require_avoid_recompute_norm(self, p, tensor_model_parallel_rank): + # for filtering replicated tensors from tensor + if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated: + return True + if (tensor_model_parallel_rank > 0) and not is_model_parallel_parameter(p): + return True + + def _get_norm_mask_idx(self, group): + """The function preserves the parallel information for norm + from unflattened gradients. + + Args: + group (Iterable[Tensor] ): params group + + Returns: + torch.Tensor: A 2D tensor containing index ranges for each group, + where each row represents a [start index, end index]. + """ + group_mask_idx_list = [] + grad_flat_st_idx = 0 + grad_flat_en_idx = 0 + + for p in group: + grad_flat_en_idx = grad_flat_st_idx + p.numel() + if p.grad is not None and self._require_avoid_recompute_norm(p, bwc_tensor_model_parallel_rank(self.mpu)): + # merge range + if len(group_mask_idx_list) > 0 and grad_flat_st_idx == group_mask_idx_list[-1][-1]: + group_mask_idx_list[-1][-1] = grad_flat_en_idx + else: + group_mask_idx_list.append([grad_flat_st_idx, grad_flat_en_idx]) + grad_flat_st_idx = grad_flat_en_idx + + return torch.tensor(group_mask_idx_list, device=get_accelerator().current_device()) + + def step(self, closure=None): + """ + Not supporting closure. + """ + + if self.fused_adam_legacy: + return self.step_fused_adam() + + # First determine if there is overflow. + self.timers(OVERFLOW_CHECK_TIMER).start() + fp16_params = [] + for i, group in enumerate(self.fp16_groups): + fp16_params.extend([p for p in group if p.grad is not None]) + self.overflow = self.overflow_checker.has_overflow(fp16_params) + self.timers(OVERFLOW_CHECK_TIMER).stop() + prev_scale = self.cur_scale + self._update_scale(self.overflow) + if self.overflow: + if self.verbose: + log_dist( + "Overflow detected. Skipping step. Attempted loss " + f"scale: {prev_scale}, reducing to {self.cur_scale}", + ranks=[0]) + # Clear gradients + for i, group in enumerate(self.fp16_groups): + for p in group: + p.grad = None + + self.timers.log(OVERFLOW_TIMERS) + return self.overflow + + grads_groups_flat = [] + non_experts_grads_for_norm = [] + expert_grads_for_norm = {} + assert len(self.fp16_groups) == len(self.optimizer.param_groups) + + for i, group in enumerate(self.fp16_groups): + data_type = self.fp32_groups_flat[i].dtype + + grads_groups_flat.append( + _flatten_dense_tensors([ + torch.zeros(p.size(), dtype=data_type, device=p.device) if p.grad is None else p.grad.to(data_type) + for p in group + ])) + + self.fp32_groups_flat[i].grad = grads_groups_flat[i] + param_group = self.optimizer.param_groups[i] + + # split expert and non_expert grads for norm + if self.has_moe_layers and is_moe_param_group(param_group): + if param_group['name'] not in expert_grads_for_norm: + expert_grads_for_norm[param_group['name']] = [] + + expert_grads_for_norm[param_group['name']].append(self.fp32_groups_flat[i]) + else: + # retrieves the required mask for calculating the norm of flat_grad + # perform this collect operation only once + if not self.has_executed_step: + cur_flat_grad_norm_mask = self._get_norm_mask_idx(group) + self.flatten_grad_norm_mask_list.append(cur_flat_grad_norm_mask) + + non_experts_grads_for_norm.append(self.fp32_groups_flat[i]) + + for p in group: + p.grad = None + + self.timers(COMPUTE_NORM_TIMER).start() + + all_groups_norm = get_flattened_grad_norm(non_experts_grads_for_norm, + mpu=self.mpu, + grad_norm_mask=self.flatten_grad_norm_mask_list) + + if self.has_moe_layers: + all_groups_norm = get_norm_with_moe_layers(all_groups_norm, + mpu=self.mpu, + expert_tensors=expert_grads_for_norm, + norm_type=self.norm_type) + + scaled_global_grad_norm = get_global_norm(norm_list=[all_groups_norm]) + self.timers(COMPUTE_NORM_TIMER).stop() + + # Stash unscaled gradient norm + self._global_grad_norm = scaled_global_grad_norm / self.cur_scale + + self.timers(UNSCALE_AND_CLIP_TIMER).start() + self.unscale_and_clip_grads(grads_groups_flat, scaled_global_grad_norm) + self.timers(UNSCALE_AND_CLIP_TIMER).stop() + + self.timers(BASIC_STEP_TIMER).start() + self.optimizer.step() + self.timers(BASIC_STEP_TIMER).stop() + + #get rid of the fp32 gradients. Not needed anymore + for group in self.fp32_groups_flat: + group.grad = None + + self.timers(UPDATE_FP16_TIMER).start() + + for i in range(len(self.fp16_groups)): + updated_params = _unflatten_dense_tensors(self.fp32_groups_flat[i], self.fp16_groups[i]) + for p, q in zip(self.fp16_groups[i], updated_params): + p.data.copy_(q.data) + self.has_executed_step = True + self.timers(UPDATE_FP16_TIMER).stop() + + self.timers.log(STEP_TIMERS) + + return self.overflow + + def unscale_and_clip_grads(self, grad_groups_flat, total_norm, apply_scale=True): + # compute combined scale factor for this group + combined_scale = self.cur_scale + if self.clip_grad > 0.: + # norm is in fact norm*scale + clip = ((total_norm / self.cur_scale) + 1e-6) / self.clip_grad + if clip > 1: + combined_scale = clip * self.cur_scale + + if apply_scale: + for grad in grad_groups_flat: + grad.data.mul_(1. / combined_scale) + + return combined_scale + + def backward(self, loss, create_graph=False, retain_graph=False): + """ + :attr:`backward` performs the following steps: + + 1. fp32_loss = loss.float() + 2. scaled_loss = fp32_loss*loss_scale + 3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves + """ + if self.custom_loss_scaler: + scaled_loss = self.external_loss_scale * loss + scaled_loss.backward() + else: + scaled_loss = (loss.float()) * self.cur_scale + scaled_loss.backward(create_graph=create_graph, retain_graph=retain_graph) + + def _update_scale(self, skip): + if self.dynamic_loss_scale: + prev_scale = self.cur_scale + if skip: + self.cur_scale = max(self.cur_scale / self.scale_factor, self.min_loss_scale) + self.last_overflow_iter = self.cur_iter + if self.verbose: + logger.info(f"\nGrad overflow on iteration {self.cur_iter}") + logger.info(f"Reducing dynamic loss scale from {prev_scale} to {self.cur_scale}") + else: + # Ensure self.scale_window updates since last overflow + stable_interval = (self.cur_iter - self.last_overflow_iter) - 1 + if (stable_interval > 0) and (stable_interval % self.scale_window == 0): + self.cur_scale *= self.scale_factor + if self.verbose: + logger.info(f"No Grad overflow for {self.scale_window} iterations") + logger.info(f"Increasing dynamic loss scale from {prev_scale} to {self.cur_scale}") + else: + if skip: + logger.info("Grad overflow on iteration: %s", self.cur_iter) + logger.info("Using static loss scale of: %s", self.cur_scale) + self.cur_iter += 1 + return + + # Promote state so it can be retrieved or set via "fp16_optimizer_instance.state" + def _get_state(self): + return self.optimizer.state + + def _set_state(self, value): + self.optimizer.state = value + + state = property(_get_state, _set_state) + + # Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups" + # (for example, to adjust the learning rate) + def _get_param_groups(self): + return self.optimizer.param_groups + + def _set_param_groups(self, value): + self.optimizer.param_groups = value + + param_groups = property(_get_param_groups, _set_param_groups) + + def state_dict(self): + """ + Returns a dict containing the current state of this :class:`FP16_Optimizer` instance. + This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict + of the contained Pytorch optimizer. + Example:: + checkpoint = {} + checkpoint['model'] = model.state_dict() + checkpoint['optimizer'] = optimizer.state_dict() + torch.save(checkpoint, "saved.pth") + """ + state_dict = {} + state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale + state_dict['cur_scale'] = self.cur_scale + state_dict['cur_iter'] = self.cur_iter + if state_dict['dynamic_loss_scale']: + state_dict['last_overflow_iter'] = self.last_overflow_iter + state_dict['scale_factor'] = self.scale_factor + state_dict['scale_window'] = self.scale_window + state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict() + state_dict['fp32_groups_flat'] = self.fp32_groups_flat + state_dict[CLIP_GRAD] = self.clip_grad + return state_dict + + # Refresh fp32 master params from fp16 copies + def refresh_fp32_params(self): + for current, saved in zip(self.fp32_groups_flat, self.fp16_groups_flat): + current.data.copy_(saved.data) + + def load_state_dict(self, state_dict, load_optimizer_states=True): + """ + Loads a state_dict created by an earlier call to state_dict(). + If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``, + whose parameters in turn came from ``model``, it is expected that the user + will call ``model.load_state_dict()`` before + ``fp16_optimizer_instance.load_state_dict()`` is called. + Example:: + model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half() + optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) + optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) + ... + checkpoint = torch.load("saved.pth") + model.load_state_dict(checkpoint['model']) + optimizer.load_state_dict(checkpoint['optimizer']) + """ + # I think it should actually be ok to reload the optimizer before the model. + self.dynamic_loss_scale = state_dict['dynamic_loss_scale'] + self.cur_scale = state_dict['cur_scale'] + self.cur_iter = state_dict['cur_iter'] + if state_dict['dynamic_loss_scale']: + self.last_overflow_iter = state_dict['last_overflow_iter'] + self.scale_factor = state_dict['scale_factor'] + self.scale_window = state_dict['scale_window'] + if load_optimizer_states: + self.optimizer.load_state_dict(state_dict[OPTIMIZER_STATE_DICT]) + self.clip_grad = state_dict[CLIP_GRAD] + # At this point, the optimizer's references to the model's fp32 parameters are up to date. + # The optimizer's hyperparameters and internal buffers are also up to date. + # However, the fp32 master copies of the model's fp16 params stored by the optimizer are still + # out of date. There are two options. + # 1: Refresh the master params from the model's fp16 params. + # This requires less storage but incurs precision loss. + # 2: Save and restore the fp32 master copies separately. + # We choose option 2. + # + # Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device + # of their associated parameters, because it's possible those buffers might not exist yet in + # the current optimizer instance. In our case, as long as the current FP16_Optimizer has been + # constructed in the same way as the one whose state_dict we are loading, the same master params + # are guaranteed to exist, so we can just copy_() from the saved master params. + for current, saved in zip(self.fp32_groups_flat, state_dict['fp32_groups_flat']): + current.data.copy_(saved.data) + + def __repr__(self): + return repr(self.optimizer) + + # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" + def _get_loss_scale(self): + if self.custom_loss_scaler: + return self.external_loss_scale + else: + return self.cur_scale + + def _set_loss_scale(self, value): + self.loss_scaler.cur_scale = value + + loss_scale = property(_get_loss_scale, _set_loss_scale) diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/loss_scaler.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/loss_scaler.py new file mode 100644 index 0000000000000000000000000000000000000000..451451c51a3218b26667555ba6079536d09b6bda --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/loss_scaler.py @@ -0,0 +1,270 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +Taken and modified for DeepSpeed from: + https://github.com/NVIDIA/Megatron-LM/blob/master/fp16/loss_scaler.py +Commit: 93ab4bea59dc5cbf97c079d313741866af4deac9 +""" + +import torch +from deepspeed import comm as dist +from deepspeed.utils import logger + +INITIAL_LOSS_SCALE = 'init_scale' +SCALE_WINDOW = 'scale_window' +DELAYED_SHIFT = 'delayed_shift' +CONSECUTIVE_HYSTERESIS = 'consecutive_hysteresis' +MIN_LOSS_SCALE = 'min_scale' + + +# item() is a recent addition, so this helps with backward compatibility. +def to_python_float(t): + if hasattr(t, 'item'): + return t.item() + return t[0] + + +class LossScalerBase: + """LossScalarBase + Base class for a loss scaler + """ + + def __init__(self, cur_scale): + self.cur_scale = cur_scale + self.dynamic = False + + @property + def loss_scale(self): + return self.cur_scale + + def scale_gradient(self, module, grad_in, grad_out): + return tuple(self.loss_scale * g for g in grad_in) + + def update_scale(self, overflow): + pass + + def backward(self, loss, retain_graph=False): + scaled_loss = loss * self.loss_scale + scaled_loss.backward(retain_graph=retain_graph) + # print(f'LossScalerBackward: {scaled_loss=}') + + +class LossScaler(LossScalerBase): + """ + Class that manages a static loss scale. This class is intended to interact with + :class:`FP16_Optimizer`, and should not be directly manipulated by the user. + + Use of :class:`LossScaler` is enabled via the ``static_loss_scale`` argument to + :class:`FP16_Optimizer`'s constructor. + + Args: + scale (float, optional, default=1.0): The loss scale. + """ + + def __init__(self, scale=1): + super(LossScaler, self).__init__(scale) + + # `params` is a list / generator of torch.Variable + def has_overflow(self, params): + return False + + # `x` is a torch.Tensor + def _has_inf_or_nan(x): + return False + + +class DynamicLossScaler(LossScalerBase): + """ + Class that manages dynamic loss scaling. It is recommended to use :class:`DynamicLossScaler` + indirectly, by supplying ``dynamic_loss_scale=True`` to the constructor of + :class:`FP16_Optimizer`. However, it's important to understand how :class:`DynamicLossScaler` + operates, because the default options can be changed using the + the ``dynamic_loss_args`` argument to :class:`FP16_Optimizer`'s constructor. + + Loss scaling is designed to combat the problem of underflowing gradients encountered at long + times when training fp16 networks. Dynamic loss scaling begins by attempting a very high loss + scale. Ironically, this may result in OVERflowing gradients. If overflowing gradients are + encountered, :class:`DynamicLossScaler` informs :class:`FP16_Optimizer` that an overflow has + occurred. + :class:`FP16_Optimizer` then skips the update step for this particular iteration/minibatch, + and :class:`DynamicLossScaler` adjusts the loss scale to a lower value. + If a certain number of iterations occur without overflowing gradients detected, + :class:`DynamicLossScaler` increases the loss scale once more. + In this way :class:`DynamicLossScaler` attempts to "ride the edge" of + always using the highest loss scale possible without incurring overflow. + + Args: + init_scale (float, optional, default=2**32): Initial loss scale attempted by :class:`DynamicLossScaler.` + scale_factor (float, optional, default=2.0): Factor used when adjusting the loss scale. If an overflow is encountered, the loss scale is readjusted to loss scale/``scale_factor``. If ``scale_window`` consecutive iterations take place without an overflow, the loss scale is readjusted to loss_scale*``scale_factor``. + scale_window (int, optional, default=1000): Number of consecutive iterations without an overflow to wait before increasing the loss scale. + consecutive_hysteresis (bool, optional, default=False): Whether to refill hysteresis if we reach an iteration that doesn't overflow + """ + + def __init__(self, + init_scale=2**32, + scale_factor=2., + scale_window=1000, + min_scale=1, + delayed_shift=1, + consecutive_hysteresis=False, + raise_error_at_min_scale=True, + dtype=torch.half): + super(DynamicLossScaler, self).__init__(init_scale) + self.cur_iter = 0 + self.last_overflow_iter = -1 + self.scale_factor = scale_factor + self.scale_window = scale_window + self.min_scale = min_scale + self.delayed_shift = delayed_shift + self.cur_hysteresis = delayed_shift + self.consecutive_hysteresis = consecutive_hysteresis + self.raise_error_at_min_scale = raise_error_at_min_scale + self.dynamic = True + self.dtype = dtype + + # `params` is a list / generator of torch.Variable + def has_overflow_serial(self, params): + for p in params: + if p.grad is not None and self._has_inf_or_nan(p.grad.data): + return True + + return False + + # `x` is a torch.Tensor + def _has_inf_or_nan(x): + try: + # if x is half, the .float() incurs an additional deep copy, but it's necessary if + # Pytorch's .sum() creates a one-element tensor of the same type as x + # (which is true for some recent version of pytorch). + cpu_sum = float(x.float().sum()) + # More efficient version that can be used if .sum() returns a Python scalar + # cpu_sum = float(x.sum()) + except RuntimeError as instance: + # We want to check if inst is actually an overflow exception. + # RuntimeError could come from a different error. + # If so, we still want the exception to propagate. + if "value cannot be converted" not in instance.args[0]: + raise + return True + else: + if cpu_sum in [float('inf'), -float('inf')] or cpu_sum != cpu_sum: + return True + return False + + # `overflow` is boolean indicating whether the gradient overflowed + def update_scale(self, overflow): + if overflow: + # self.cur_scale /= self.scale_factor + if self.delayed_shift == 1 or self.cur_hysteresis == 1: + if (self.cur_scale == self.min_scale) and self.raise_error_at_min_scale: + raise Exception( + "Current loss scale already at minimum - cannot decrease scale anymore. Exiting run.") + else: + next_scale = max(self.cur_scale / self.scale_factor, self.min_scale) + if dist.get_rank() == 0: + overflow_msg = f"[deepspeed] OVERFLOW! Rank {dist.get_rank()} Skipping step." + if self.dtype == torch.half: + overflow_msg += f" Attempted loss scale: {int(self.cur_scale)}, reducing to {int(next_scale)}" + logger.info(overflow_msg) + self.cur_scale = next_scale + else: + if dist.get_rank() == 0: + overflow_msg = f"[deepspeed] OVERFLOW! Rank {dist.get_rank()} Skipping step." + if self.dtype == torch.half: + overflow_msg += f" Attempted loss scale: {int(self.cur_scale)}, but hysteresis is {self.cur_hysteresis}. Reducing hysteresis to {self.cur_hysteresis-1}" + logger.info(overflow_msg) + self.cur_hysteresis -= 1 + self.last_overflow_iter = self.cur_iter + else: + if self.consecutive_hysteresis: + if dist.get_rank() == 0: + hysteresis_msg = f"Consecutive hysteresis is enabled. Restoring hysteresis to {self.delayed_shift}" + logger.info(hysteresis_msg) + self.cur_hysteresis = self.delayed_shift + if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0: + if not self.consecutive_hysteresis: + self.cur_hysteresis = self.delayed_shift + self.cur_scale *= self.scale_factor + self.cur_iter += 1 + + +# Although loss scaling is only defined for fp16, yet for backwards compatibility +# we still create a scaler for other dtypes (fp32, bf16) which does not perform any scaling. +def CreateLossScaler(dtype, static_loss_scale, dynamic_scaling, dynamic_loss_args): + if dtype == torch.half and dynamic_scaling: + if dynamic_loss_args is None: + return DynamicLossScaler(dtype=dtype) + return DynamicLossScaler(dtype=dtype, **dynamic_loss_args) + + loss_scale_value = static_loss_scale if dtype == torch.half else 1.0 + return LossScaler(scale=loss_scale_value) + + +############################################################## +# Example usage below here -- assuming it's in a separate file +############################################################## +""" +TO-DO separate out into an example. +if __name__ == "__main__": + import torch + from torch.autograd import Variable + from dynamic_loss_scaler import DynamicLossScaler + + # N is batch size; D_in is input dimension; + # H is hidden dimension; D_out is output dimension. + N, D_in, H, D_out = 64, 1000, 100, 10 + + # Create random Tensors to hold inputs and outputs, and wrap them in Variables. + x = Variable(torch.randn(N, D_in), requires_grad=False) + y = Variable(torch.randn(N, D_out), requires_grad=False) + + w1 = Variable(torch.randn(D_in, H), requires_grad=True) + w2 = Variable(torch.randn(H, D_out), requires_grad=True) + parameters = [w1, w2] + + learning_rate = 1e-6 + optimizer = torch.optim.SGD(parameters, lr=learning_rate) + loss_scaler = DynamicLossScaler() + + for t in range(500): + y_pred = x.mm(w1).clamp(min=0).mm(w2) + loss = (y_pred - y).pow(2).sum() * loss_scaler.loss_scale + print('Iter {} loss scale: {}'.format(t, loss_scaler.loss_scale)) + print('Iter {} scaled loss: {}'.format(t, loss.data[0])) + print('Iter {} unscaled loss: {}'.format(t, loss.data[0] / loss_scaler.loss_scale)) + + # Run backprop + optimizer.zero_grad() + loss.backward() + + # Check for overflow + has_overflow = DynamicLossScaler.has_overflow(parameters) + + # If no overflow, unscale grad and update as usual + if not has_overflow: + for param in parameters: + param.grad.data.mul_(1. / loss_scaler.loss_scale) + optimizer.step() + # Otherwise, don't do anything -- ie, skip iteration + else: + print('fp16 dynamic loss scale overflow!') + + # Update loss scale for next iteration + loss_scaler.update_scale(has_overflow) + +""" diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ac6f7e9784ce60f6c1b4a9134b73b4e415337641 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .adam import OnebitAdam +from .lamb import OnebitLamb +from .zoadam import ZeroOneAdam diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28cda6a4155684b7a2d04621b0128f5ebea3bf7e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/adam.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/adam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4582d089a6db6ebfbefbcf8d1f66daa540af8de8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/adam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/lamb.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/lamb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..805b3a3e5be825b21ac5d144a54076ac507851a3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/lamb.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/zoadam.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/zoadam.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5681005db06166afad066215a95d8ad69f3a27f0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/__pycache__/zoadam.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/adam.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/adam.py new file mode 100644 index 0000000000000000000000000000000000000000..f8a50393ac5d0fcbdb643e84883204d60681aa12 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/adam.py @@ -0,0 +1,306 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import types +import torch +import numpy as np +from deepspeed.accelerator import get_accelerator +from deepspeed.utils.torch import required_torch_version +from deepspeed import comm as dist + + +class OnebitAdam(torch.optim.Optimizer): + """Implements the 1-bit Adam algorithm. Currently GPU-only. + For usage example please see https://www.deepspeed.ai/tutorials/onebit-adam/ + For technical details please read https://arxiv.org/abs/2102.02888 + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + freeze_step (int, optional): Number of steps for warmup (uncompressed) + stage before we start using compressed communication. (default 100000) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) NOT SUPPORTED in 1-bit Adam! + eps_inside_sqrt (boolean, optional): in the 'update parameters' step, + adds eps to the bias-corrected second moment estimate before + evaluating square root instead of adding it to the square root of + second moment estimate as in the original paper. (default: False) + cuda_aware (boolean, required): Set True if the underlying MPI implementation + supports CUDA-Aware communication. (default: False) + comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl') + .. _Adam\\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, + params, + deepspeed=None, + lr=1e-3, + freeze_step=100000, + bias_correction=True, + betas=(0.9, 0.999), + eps=1e-8, + eps_inside_sqrt=False, + weight_decay=0., + max_grad_norm=0., + amsgrad=False, + cuda_aware=False, + comm_backend_name='nccl'): + + if amsgrad: + raise RuntimeError('1-bit Adam does not support the AMSGrad variant.') + + defaults = dict(lr=lr, + bias_correction=bias_correction, + betas=betas, + eps=eps, + weight_decay=weight_decay, + max_grad_norm=max_grad_norm) + + super(OnebitAdam, self).__init__(params, defaults) + self.eps_mode = 0 if eps_inside_sqrt else 1 + self.comm_time = 0.0 + self.step_time = 0.0 + self.ave_step = 1 + self.bk_time = 0.0 + + self.deepspeed = deepspeed + self.adam_freeze_key = False + self.initialize = False + self.freeze_step = freeze_step + self.cuda_aware = cuda_aware + self.using_pipeline = False + + self.comm_backend_name = comm_backend_name + + assert dist.is_initialized(), "Please initialize the torch distributed backend." + # Empty initializer. Set handle based on the comm backend as follows. + self.comm_backend_handle = None + if self.comm_backend_name == 'nccl': + assert ( + required_torch_version(min_version=1.8) + ), "Please use torch 1.8 or greater to enable NCCL backend in 1-bit Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend" + from deepspeed.runtime.comm.nccl import NcclBackend + self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce') + self.comm_backend_handle = NcclBackend(self.deepspeed.mpu) + elif self.comm_backend_name == 'mpi': + from deepspeed.runtime.comm.mpi import MpiBackend + self.comm_backend_handle = MpiBackend(cuda_aware) + elif self.comm_backend_name == 'hccl': + from deepspeed.runtime.comm.hccl import HcclBackend + self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce') + self.comm_backend_handle = HcclBackend(self.deepspeed.mpu) + self.size = self.comm_backend_handle.size + + self.divider = int(self.size * 8 / np.gcd(self.size, 8)) + + def step(self, closure=None, grads=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + grads (list of tensors, optional): weight gradient to use for the + optimizer update. If gradients have type torch.half, parameters + are expected to be in type torch.float. (default: None) + output params (list of tensors, optional): A reduced precision copy + of the updated weights written out in addition to the regular + updated weights. Have to be of same type as gradients. (default: None) + scale (float, optional): factor to divide gradient tensor values + by before applying to weights. (default: 1) + """ + loss = None + if closure is not None: + loss = closure() + + gather_time = 0 + allgather_time = 0 + all_time = 0 + + if self.adam_freeze_key is False: + v_diff_buffer = 0.0 + + if grads is None: + grads_group = [None] * len(self.param_groups) + # backward compatibility + # assuming a list/generator of parameter means single group + elif isinstance(grads, types.GeneratorType): + grads_group = [grads] + elif type(grads[0]) != list: + grads_group = [grads] + else: + grads_group = grads + + for group, grads_this_group in zip(self.param_groups, grads_group): + if grads_this_group is None: + grads_this_group = [None] * len(group['params']) + + bias_correction = 1 if group['bias_correction'] else 0 + + for p, grad in zip(group['params'], grads_this_group): + if p.grad is None and grad is None: + continue + if grad is None: + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('1-bit Adam does not support sparse gradients') + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p.data) + + if not self.initialize or (self.adam_freeze_key and 'worker_error' not in state.keys()): + state['tensor_size'] = torch.numel(p.data) + state['corrected_tensor_size'] = state['tensor_size'] + + if state['tensor_size'] % (self.size * self.divider) != 0: + state['corrected_tensor_size'] += ((self.size * self.divider) - (state['tensor_size'] % + (self.size * self.divider))) + state['server_chunk_size'] = state['corrected_tensor_size'] // self.size + get_accelerator().empty_cache() + state['worker_error'] = torch.zeros(state['corrected_tensor_size'], device=p.device) + state['server_error'] = torch.zeros(state['server_chunk_size'], device=p.device) + get_accelerator().empty_cache() + self.adam_freeze_key = True + if not self.initialize and dist.get_rank() == 0: + print("Cupy Buffers Initialized Successfully.") + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + if self.adam_freeze_key is False: + exp_avg.mul_(beta1).add_(1 - beta1, grad) + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + grad = None + if self.initialize: + update = exp_avg / (exp_avg_sq.sqrt() + group['eps']) + + else: + if 'non_freeze' in group.keys() and group['non_freeze'] is True: + dist.all_reduce(grad) + grad.mul_(1 / dist.get_world_size()) + exp_avg.mul_(beta1).add_(1 - beta1, grad) + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + grad = None + else: + if self.initialize is True: + exp_avg.mul_(beta1).add_(1 - beta1, grad) + grad = None + + if self.size > 1: + exp_avg.set_( + self.comm_backend_handle.compressed_allreduce(exp_avg, state['worker_error'], + state['server_error'], + self.deepspeed.local_rank)) + # Because 1-bit compression cannot represent exact zero, it is required to + # provide a momentum mask for those params that have constant exact zeros in their + # momentums, otherwise the compression error would keep accumulating. + # For example, for BERT pre-training seq 128, bert.embeddings.position_embeddings.weight + # always have exact zeros in its momentum for row 129 to 512, because it only + # learns up to seq length 128 while the model supports up to 512 seq length. + # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.) + if 'exp_avg_mask' in group: + if exp_avg.device != group['exp_avg_mask'].device: + group['exp_avg_mask'] = group['exp_avg_mask'].to(device=exp_avg.device) + exp_avg.mul_(group['exp_avg_mask']) + + if self.initialize: + update = exp_avg / (exp_avg_sq.sqrt() + group['eps']) + + if self.initialize: + if group['weight_decay'] > 0.0: + update += group['weight_decay'] * p.data + with torch.no_grad(): + p.add_(-group['lr'] * update) + + if not self.initialize: + print('Pop out errors', flush=True) + state.pop('worker_error') + state.pop('server_error') + + if not self.initialize: + self.adam_freeze_key = False + self.initialize = True + print(f"Finished the initialization step at rank {dist.get_rank()}") + return loss + + if self.adam_freeze_key is False: + if state['step'] >= self.freeze_step: + print('OnebitAdam - starting compressed communication') + self.adam_freeze_key = True + if self.using_pipeline: + self.deepspeed.pipeline_enable_backward_allreduce = False + else: + self.deepspeed.enable_backward_allreduce = False + + return loss + + def load_state_dict(self, state_dict): + """ + Overrides load_state_dict() to add special handling when loading checkpoints + """ + # Because at different stage exp_avg_mask may change (e.g., + # BERT pre-training seqlen 128 and 512 ), we don't use the exp_avg_mask + # in checkpoints but always use the one user provided in training script. + # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.) + # Thus here we keep the exp_avg_mask unchanged when loading checkpoint + for i, group in enumerate(self.param_groups): + if 'exp_avg_mask' in group: + state_dict['param_groups'][i]['exp_avg_mask'] = group['exp_avg_mask'] + elif 'exp_avg_mask' not in group and 'exp_avg_mask' in state_dict['param_groups'][i]: + state_dict['param_groups'][i].pop('exp_avg_mask') + super().load_state_dict(state_dict) + if self.state[self.param_groups[0]['params'][0]]['step'] < self.freeze_step: + if dist.get_rank() == 0: + print("Checkpoint loaded and OnebitAdam warmup stage starts/continues.") + if self.adam_freeze_key is True: + self.adam_freeze_key = False + if self.using_pipeline: + self.deepspeed.pipeline_enable_backward_allreduce = True + else: + self.deepspeed.enable_backward_allreduce = True + else: + if dist.get_rank() == 0: + print("Checkpoint loaded and OnebitAdam compression stage starts/continues.") + if self.adam_freeze_key is False: + self.adam_freeze_key = True + if self.using_pipeline: + self.deepspeed.pipeline_enable_backward_allreduce = False + else: + self.deepspeed.enable_backward_allreduce = False + # We reset the compression errors when loading checkpoints for 3 reasons: + # 1) The worker and server error at each GPU are distinct, so in current implementation + # only rank 0's errors are saved in the checkpoint. Thus we have to reset the errors. + # If we want to save them correctly we need O(num_gpu*model_size) memory in order to + # gather all the error, which is a very large memory requirement. It's possible to save + # them in a distributed way, but it will make the checkpoint saving/loading much more complicated. + # 2) Even if we are able to save the compression errors correctly, you need to have the + # exact same number of GPUs in order to load them correctly. + # 3) We verified on BERT pre-training that occasionally resetting the compression error + # at checkpoint loading does not affect the convergence. + # However, please avoid frequent checkpoint loading which could break the error + # compensation mechanism thus affect the convergence. + for group in self.param_groups: + for p in group['params']: + if 'worker_error' in self.state[p]: + self.state[p].pop('worker_error') + if 'server_error' in self.state[p]: + self.state[p].pop('server_error') diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/lamb.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/lamb.py new file mode 100644 index 0000000000000000000000000000000000000000..0f70782fd3fffb097d74e2745318e2b7178423b7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/lamb.py @@ -0,0 +1,443 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import types +import torch +import numpy as np +from deepspeed import comm as dist +from deepspeed.utils.torch import required_torch_version +from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors +from deepspeed.accelerator import get_accelerator + + +class OnebitLamb(torch.optim.Optimizer): + """Implements the 1-bit Lamb algorithm. Currently GPU-only. + For usage example please see https://www.deepspeed.ai/tutorials/onebit-lamb/ + For technical details please see our paper https://arxiv.org/abs/2104.06069. + + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + freeze_step (int, optional): Number of steps for warmup (uncompressed) + stage before we start using compressed communication. (default 100000) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + max_coeff(float, optional): maximum value of the lamb coefficient (default: 10.0) + min_coeff(float, optional): minimum value of the lamb coefficient (default: 0.01) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) NOT SUPPORTED in 1-bit Lamb! + eps_inside_sqrt (boolean, optional): in the 'update parameters' step, + adds eps to the bias-corrected second moment estimate before + evaluating square root instead of adding it to the square root of + second moment estimate as in the original paper. (default: False) + cuda_aware (boolean, required): Set True if the underlying MPI implementation + supports CUDA-Aware communication. (default: False) + comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl') + coeff_beta (float, optional): coefficient used for computing + running averages of lamb coefficient (default: 0.9) note that you may want to + increase or decrease this beta depending on the freeze_step you choose, as + 1/(1 - coeff_beta) should be smaller than or equal to freeze_step + factor_max (float, optional): maximum value of scaling factor to the frozen lamb + coefficient during compression stage (default: 4.0) + factor_min (float, optional): minimum value of scaling factor to the frozen lamb + coefficient during compression stage (default: 0.5) + factor_threshold (float, optional): threshold of how much the scaling factor can + fluctuate between steps (default: 0.1) + .. _Large Batch Optimization for Deep Learning\\: Training BERT in 76 minutes: + https://arxiv.org/abs/1904.00962 + .. _Adam\\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, + params, + deepspeed=None, + lr=1e-3, + freeze_step=100000, + bias_correction=True, + betas=(0.9, 0.999), + eps=1e-8, + eps_inside_sqrt=False, + weight_decay=0., + max_grad_norm=0., + max_coeff=10.0, + min_coeff=0.01, + amsgrad=False, + cuda_aware=False, + comm_backend_name='nccl', + coeff_beta=0.9, + factor_max=4.0, + factor_min=0.5, + factor_threshold=0.1): + + if amsgrad: + raise RuntimeError('1-bit Lamb does not support the AMSGrad variant.') + + defaults = dict(lr=lr, + bias_correction=bias_correction, + betas=betas, + eps=eps, + weight_decay=weight_decay, + max_grad_norm=max_grad_norm, + max_coeff=max_coeff, + min_coeff=min_coeff) + + super(OnebitLamb, self).__init__(params, defaults) + self.eps_mode = 0 if eps_inside_sqrt else 1 + self.deepspeed = deepspeed + self.lamb_freeze_key = False + self.initialize = False + self.freeze_step = freeze_step + self.cuda_aware = cuda_aware + self.coeff_beta = coeff_beta + self.factor_max = factor_max + self.factor_min = factor_min + self.factor_threshold = factor_threshold + self.using_pipeline = False + + self.comm_backend_name = comm_backend_name + + assert dist.is_initialized(), "Please initialize the torch distributed backend." + # Empty initializer. Set handle based on the comm backend as follows. + self.comm_backend_handle = None + if self.comm_backend_name == 'nccl': + assert ( + required_torch_version(min_version=1.8) + ), "Please use torch 1.8 or greater to enable NCCL backend in 1-bit Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend" + from deepspeed.runtime.comm.nccl import NcclBackend + self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce') + self.comm_backend_handle = NcclBackend(self.deepspeed.mpu) + elif self.comm_backend_name == 'mpi': + from deepspeed.runtime.comm.mpi import MpiBackend + self.comm_backend_handle = MpiBackend(cuda_aware) + elif self.comm_backend_name == 'hccl': + from deepspeed.runtime.comm.hccl import HcclBackend + self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce') + self.comm_backend_handle = HcclBackend(self.deepspeed.mpu) + + self.size = self.comm_backend_handle.size + + self.divider = int(self.size * 8 / np.gcd(self.size, 8)) + + self.exp_avg_flat = [] + self.dummy_exp_avg = {} + self.corrected_tensor_sizes = [] + self.server_chunk_sizes = [] + self.worker_errors = [] + self.server_errors = [] + + self.lamb_coeffs = [] + + def step(self, closure=None, grads=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + grads (list of tensors, optional): weight gradient to use for the + optimizer update. If gradients have type torch.half, parameters + are expected to be in type torch.float. (default: None) + """ + loss = None + if closure is not None: + loss = closure() + + if grads is None: + grads_group = [None] * len(self.param_groups) + # backward compatibility + # assuming a list/generator of parameter means single group + elif isinstance(grads, types.GeneratorType): + grads_group = [grads] + elif type(grads[0]) != list: + grads_group = [grads] + else: + grads_group = grads + + # remove the previous stats + del self.lamb_coeffs[:] + + if self.lamb_freeze_key: + exp_avg_last_step = [] + for group in self.param_groups: + exp_avg_last_step.append([self.state[p]['exp_avg'].detach().clone() for p in group['params']]) + if 'scaling_coeff' not in self.state[self.param_groups[0]['params'][0]]: + # Compute the scaling_coeff for each momentum at the end of warmup stage. + # This is used to reduce compression error during compression stage. + momentum_scales = [] + for group in self.param_groups: + momentum_scales.append([(torch.linalg.norm(self.state[p]['exp_avg']) / + np.sqrt(torch.numel(self.state[p]['exp_avg']))).item() + for p in group['params']]) + united_scale = sum([sum(x) for x in momentum_scales]) / sum([len(x) for x in momentum_scales]) + for i, group in enumerate(self.param_groups): + for j, p in enumerate(group['params']): + self.state[p]['scaling_coeff'] = united_scale / momentum_scales[i][j] + + for group, grads_this_group in zip(self.param_groups, grads_group): + if grads_this_group is None: + grads_this_group = [None] * len(group['params']) + + bias_correction = 1 if group['bias_correction'] else 0 + + for p, grad in zip(group['params'], grads_this_group): + if p.grad is None and grad is None: + continue + if grad is None: + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('1-bit Lamb does not support sparse gradients') + + state = self.state[p] + + # State initialization + if len(state) == 0 or (len(state) == 1 and 'scaling_coeff' in state.keys()): + state['step'] = 0 + state['lamb_coeff_freeze'] = 0.0 + state['last_factor'] = 1.0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p.data) + state['exp_avg_sq_fresh'] = torch.zeros_like(p.data) + + if not self.initialize: + self.lamb_freeze_key = True + + exp_avg, exp_avg_sq, exp_avg_sq_fresh = state['exp_avg'], state['exp_avg_sq'], state[ + 'exp_avg_sq_fresh'] + beta1, beta2 = group['betas'] + max_coeff = group['max_coeff'] + min_coeff = group['min_coeff'] + + state['step'] += 1 + + if self.lamb_freeze_key is False: + # warmup stage, baseline Lamb optimization + exp_avg.mul_(beta1).add_(1 - beta1, grad) + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + if state['step'] == self.freeze_step: + exp_avg_sq_fresh.data = exp_avg_sq.detach().clone() + grad = None + if self.initialize: + weight_norm = p.data.pow(2).sum().sqrt() + update = exp_avg / (exp_avg_sq.sqrt() + group['eps']) + if group['weight_decay'] > 0.0: + update += group['weight_decay'] * p.data + update_norm = update.pow(2).sum().sqrt() + lamb_coeff = 1.0 + if weight_norm != 0 and update_norm != 0: + lamb_coeff = (weight_norm / update_norm).item() + if lamb_coeff > max_coeff: + lamb_coeff = max_coeff + if lamb_coeff < min_coeff: + lamb_coeff = min_coeff + if lamb_coeff != 1.0: + state['lamb_coeff_freeze'] = self.coeff_beta * state['lamb_coeff_freeze'] + ( + 1 - self.coeff_beta) * lamb_coeff + self.lamb_coeffs.append(lamb_coeff) + with torch.no_grad(): + p.add_(-group['lr'] * lamb_coeff * update) + else: + # compression stage, update each momentum locally, then + # communicate based on the compressed_allreduce below + if self.initialize: + exp_avg.mul_(beta1).add_(1 - beta1, grad) + exp_avg.mul_(self.state[p]['scaling_coeff']) + grad = None + + # init fused momentum + if len(self.exp_avg_flat) == 0: + momentum_groups = [] + tensor_size = 0 + for group in self.param_groups: + for p in group['params']: + momentum_groups.append(self.state[p]['exp_avg']) + tensor_size += torch.numel(p.data) + corrected_tensor_size = tensor_size + if tensor_size % (self.size * self.divider) != 0: + difference = ((self.size * self.divider) - (tensor_size % (self.size * self.divider))) + corrected_tensor_size += difference + self.dummy_exp_avg[0] = torch.zeros(difference, device=momentum_groups[0].data.device) + momentum_groups.append(self.dummy_exp_avg[0]) + self.corrected_tensor_sizes.append(corrected_tensor_size) + self.server_chunk_sizes.append(corrected_tensor_size // self.size) + + self.exp_avg_flat.append(_flatten_dense_tensors([p.detach().clone() for p in momentum_groups])) + updated_params = _unflatten_dense_tensors(self.exp_avg_flat[0], momentum_groups) + for p, q in zip(momentum_groups, updated_params): + p.data = q.data + + if self.initialize and len(self.worker_errors) == 0: + get_accelerator().empty_cache() + for i in range(len(self.exp_avg_flat)): + self.worker_errors.append( + torch.zeros(self.corrected_tensor_sizes[i], device=self.exp_avg_flat[i].device)) + self.server_errors.append(torch.zeros(self.server_chunk_sizes[i], device=self.exp_avg_flat[i].device)) + get_accelerator().empty_cache() + + if self.lamb_freeze_key: + if self.size > 1: + for i in range(len(self.exp_avg_flat)): + if not self.initialize: + get_accelerator().empty_cache() + self.worker_errors.append( + torch.zeros(self.corrected_tensor_sizes[i], device=self.exp_avg_flat[i].device)) + self.server_errors.append( + torch.zeros(self.server_chunk_sizes[i], device=self.exp_avg_flat[i].device)) + get_accelerator().empty_cache() + if dist.get_rank() == 0: + print("Cupy Buffers Initialized Successfully.") + + self.comm_backend_handle.compressed_allreduce(self.exp_avg_flat[i], self.worker_errors[0], + self.server_errors[0], self.deepspeed.local_rank) + + if dist.get_rank() == 0: + print('Pop out errors', flush=True) + del self.worker_errors[:] + del self.server_errors[:] + else: + self.comm_backend_handle.compressed_allreduce(self.exp_avg_flat[i], self.worker_errors[i], + self.server_errors[i], self.deepspeed.local_rank) + + if self.lamb_freeze_key and self.initialize: + for i, group in enumerate(self.param_groups): + bias_correction = 1 if group['bias_correction'] else 0 + + for j, p in enumerate(group['params']): + state = self.state[p] + exp_avg, exp_avg_sq, exp_avg_sq_fresh = state['exp_avg'], state['exp_avg_sq'], state[ + 'exp_avg_sq_fresh'] + beta1, beta2 = group['betas'] + exp_avg.div_(self.state[p]['scaling_coeff']) + # Because 1-bit compression cannot represent exact zero, it is required to + # provide a momentum mask for those params that have constant exact zeros in their + # momentums, otherwise the compression error would keep accumulating. + # For example, for BERT pre-training seq 128, bert.embeddings.position_embeddings.weight + # always have exact zeros in its momentum for row 129 to 512, because it only + # learns up to seq length 128 while the model supports up to 512 seq length. + # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py about how + # to add this exp_avg_mask for BERT pre-training.) + if 'exp_avg_mask' in group: + if exp_avg.device != group['exp_avg_mask'].device: + group['exp_avg_mask'] = group['exp_avg_mask'].to(device=exp_avg.device) + exp_avg.mul_(group['exp_avg_mask']) + + grad_reconstruct = ((exp_avg - exp_avg_last_step[i][j] * beta1) / (1 - beta1)) + exp_avg_sq_fresh.mul_(beta2).addcmul_(1 - beta2, grad_reconstruct, grad_reconstruct) + denom = exp_avg_sq.sqrt() + group['eps'] + update_prelim = exp_avg / denom + + if group['weight_decay'] > 0.0: + update = update_prelim + group['weight_decay'] * p.data + else: + update = update_prelim + + lamb_coeff = 1.0 + update_norm = update.pow(2).sum().sqrt() + denom_real = exp_avg_sq_fresh.sqrt() + group['eps'] + factor = (denom / denom_real).max().item() + if group['weight_decay'] > 0.0: + update_ratio = min(1.0, (update_prelim.pow(2).sum().sqrt() / update_norm).item()) + factor = factor * update_ratio + (1.0 - update_ratio) + if factor > self.factor_max: + factor = self.factor_max + if factor < self.factor_min: + factor = self.factor_min + if factor > state['last_factor'] * (1.0 + self.factor_threshold): + factor = state['last_factor'] * (1.0 + self.factor_threshold) + if factor < state['last_factor'] * (1.0 - self.factor_threshold): + factor = state['last_factor'] * (1.0 - self.factor_threshold) + state['last_factor'] = factor + lamb_coeff = state['lamb_coeff_freeze'] * factor + self.lamb_coeffs.append(lamb_coeff) + with torch.no_grad(): + p.add_(-group['lr'] * lamb_coeff * update) + del exp_avg_last_step[:] + exp_avg_last_step = None + + if not self.initialize: + self.lamb_freeze_key = False + self.initialize = True + print(f"Finished the initialization step at rank {dist.get_rank()}") + return loss + + if self.lamb_freeze_key is False: + if state['step'] >= self.freeze_step: + print('OnebitLamb - starting compressed communication') + self.lamb_freeze_key = True + if self.using_pipeline: + self.deepspeed.pipeline_enable_backward_allreduce = False + else: + self.deepspeed.enable_backward_allreduce = False + + return loss + + def load_state_dict(self, state_dict): + """ + Overrides load_state_dict() to add special handling when loading checkpoints + """ + # Because at different stage exp_avg_mask may change (e.g., + # BERT pre-training seqlen 128 and 512 ), we don't use the exp_avg_mask + # in checkpoints but always use the one user provided in training script. + # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.) + # Thus here we keep the exp_avg_mask unchanged when loading checkpoint + for i, group in enumerate(self.param_groups): + if 'exp_avg_mask' in group: + state_dict['param_groups'][i]['exp_avg_mask'] = group['exp_avg_mask'] + elif 'exp_avg_mask' not in group and 'exp_avg_mask' in state_dict['param_groups'][i]: + state_dict['param_groups'][i].pop('exp_avg_mask') + super().load_state_dict(state_dict) + # need to reset the fused momentum since loading states will break the linking + del self.exp_avg_flat[:] + self.dummy_exp_avg.clear() + del self.corrected_tensor_sizes[:] + del self.server_chunk_sizes[:] + if self.state[self.param_groups[0]['params'][0]]['step'] < self.freeze_step: + if dist.get_rank() == 0: + print("Checkpoint loaded and OnebitLamb warmup stage starts/continues.") + if self.lamb_freeze_key is True: + self.lamb_freeze_key = False + if self.using_pipeline: + self.deepspeed.pipeline_enable_backward_allreduce = True + else: + self.deepspeed.enable_backward_allreduce = True + for group in self.param_groups: + for p in group['params']: + self.state[p]['lamb_coeff_freeze'] = 0.0 + self.state[p]['last_factor'] = 1.0 + if 'scaling_coeff' in self.state[p]: + self.state[p].pop('scaling_coeff') + else: + if dist.get_rank() == 0: + print("Checkpoint loaded and OnebitLamb compression stage starts/continues.") + if self.lamb_freeze_key is False: + self.lamb_freeze_key = True + if self.using_pipeline: + self.deepspeed.pipeline_enable_backward_allreduce = False + else: + self.deepspeed.enable_backward_allreduce = False + # We reset the compression errors when loading checkpoints for 3 reasons: + # 1) The worker and server error at each GPU are distinct, so in current implementation + # only rank 0's errors are saved in the checkpoint. Thus we have to reset the errors. + # If we want to save them correctly we need O(num_gpu*model_size) memory in order to + # gather all the error, which is a very large memory requirement. It's possible to save + # them in a distributed way, but it will make the checkpoint saving/loading much more complicated. + # 2) Even if we are able to save the compression errors correctly, you need to have the + # exact same number of GPUs in order to load them correctly. + # 3) We verified on BERT pre-training that occasionally resetting the compression error + # at checkpoint loading does not affect the convergence. + # However, please avoid frequent checkpoint loading which could break the error + # compensation mechanism thus affect the convergence. + del self.worker_errors[:] + del self.server_errors[:] + + def get_lamb_coeffs(self): + return self.lamb_coeffs diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/zoadam.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/zoadam.py new file mode 100644 index 0000000000000000000000000000000000000000..bd75ccd4f7a02bb42ba84673a4fdcef2aecdce12 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/onebit/zoadam.py @@ -0,0 +1,359 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import types +import torch +import numpy as np +from deepspeed.accelerator import get_accelerator +from deepspeed.utils.torch import required_torch_version +from deepspeed import comm as dist + + +class ZeroOneAdam(torch.optim.Optimizer): + """Implements the 0/1 Adam algorithm. Currently GPU-only. + For usage example please see https://www.deepspeed.ai/tutorials/zero-one-adam/ + For technical details please read https://arxiv.org/abs/2202.06009 + Arguments: + params (iterable): iterable of parameters to optimize or dicts defining + parameter groups. + lr (float, optional): learning rate. (default: 1e-3) + betas (Tuple[float, float], optional): coefficients used for computing + running averages of gradient and its square. (default: (0.9, 0.999)) + eps (float, optional): term added to the denominator to improve + numerical stability. (default: 1e-8) + weight_decay (float, optional): weight decay (L2 penalty) (default: 0) + var_freeze_step (int, optional): The latest step to update the variance, + using the notation from https://arxiv.org/abs/2202.06009, it denotes the + max{i|i in T_v}. Note that this is different from the freeze step from the + 1-bit Adam. The var_freeze_step is usually the end of the learning rate warmup + and thus does not require tuning. (default: 100000) + var_update_scaler (int, optional): The interval to update the variance. Note that + the update policy for variance follows an exponential rule, where var_update_scaler + denotes the kappa in the 0/1 Adam paper. (default: 16) + local_step_scaler (int, optional): The interval to scale the local steps interval + according to the learning rate policy. (default: 32678) + local_step_clipper (int, optional): The largest interval for local steps with + learning rate policy. This corresponds to the variable H in the 0/1 Adam paper. + (default: 16) + amsgrad (boolean, optional): whether to use the AMSGrad variant of this + algorithm from the paper `On the Convergence of Adam and Beyond`_ + (default: False) NOT SUPPORTED in 0/1 Adam! + eps_inside_sqrt (boolean, optional): in the 'update parameters' step, + adds eps to the bias-corrected second moment estimate before + evaluating square root instead of adding it to the square root of + second moment estimate as in the original paper. (default: False) + cuda_aware (boolean, required): Set True if the underlying MPI implementation + supports CUDA-Aware communication. (default: False) + comm_backend_name (string, optional): Set to 'mpi' if needed. (default: 'nccl') + .. _Adam\\: A Method for Stochastic Optimization: + https://arxiv.org/abs/1412.6980 + .. _On the Convergence of Adam and Beyond: + https://openreview.net/forum?id=ryQu7f-RZ + """ + + def __init__(self, + params, + deepspeed=None, + lr=1e-3, + bias_correction=True, + betas=(0.9, 0.999), + eps=1e-8, + eps_inside_sqrt=False, + weight_decay=0., + max_grad_norm=0., + var_freeze_step=100000, + var_update_scaler=16, + local_step_scaler=32678, + local_step_clipper=16, + amsgrad=False, + cuda_aware=False, + comm_backend_name='nccl'): + + if amsgrad: + raise RuntimeError('0/1 Adam does not support the AMSGrad variant.') + + defaults = dict(lr=lr, + bias_correction=bias_correction, + betas=betas, + eps=eps, + weight_decay=weight_decay, + max_grad_norm=max_grad_norm) + + super(ZeroOneAdam, self).__init__(params, defaults) + self.eps_mode = 0 if eps_inside_sqrt else 1 + self.deepspeed = deepspeed + self.initialize = False + self.cuda_aware = cuda_aware + self.using_pipeline = False + + self.var_freeze_step = var_freeze_step + self.var_update_scaler = var_update_scaler + self.local_step_scaler = local_step_scaler + self.local_step_clipper = local_step_clipper + self.freeze_key = False + self.reinitial_error_buffer = False + + self.comm_backend_name = comm_backend_name + + assert dist.is_initialized(), "Please initialize the torch distributed backend." + # Empty initializer. Set handle based on the comm backend as follows. + self.comm_backend_handle = None + if self.comm_backend_name == 'nccl': + assert ( + required_torch_version(min_version=1.8) + ), "Please use torch 1.8 or greater to enable NCCL backend in 0/1 Adam. Alternatively, please specify 'mpi' as the 'comm_backend_name' in config file to proceed with the MPI backend" + from deepspeed.runtime.comm.nccl import NcclBackend + self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce') + self.comm_backend_handle = NcclBackend(self.deepspeed.mpu) + elif self.comm_backend_name == 'mpi': + from deepspeed.runtime.comm.mpi import MpiBackend + self.comm_backend_handle = MpiBackend(cuda_aware) + elif self.comm_backend_name == 'hccl': + from deepspeed.runtime.comm.hccl import HcclBackend + self.using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce') + self.comm_backend_handle = HcclBackend(self.deepspeed.mpu) + self.size = self.comm_backend_handle.size + + self.divider = int(self.size * 8 / np.gcd(self.size, 8)) + + def step(self, closure=None, grads=None): + """Performs a single optimization step. + Arguments: + closure (callable, optional): A closure that reevaluates the model + and returns the loss. + grads (list of tensors, optional): weight gradient to use for the + optimizer update. If gradients have type torch.half, parameters + are expected to be in type torch.float. (default: None) + output params (list of tensors, optional): A reduced precision copy + of the updated weights written out in addition to the regular + updated weights. Have to be of same type as gradients. (default: None) + scale (float, optional): factor to divide gradient tensor values + by before applying to weights. (default: 1) + """ + loss = None + if closure is not None: + loss = closure() + + if grads is None: + grads_group = [None] * len(self.param_groups) + # backward compatibility + # assuming a list/generator of parameter means single group + elif isinstance(grads, types.GeneratorType): + grads_group = [grads] + elif type(grads[0]) != list: + grads_group = [grads] + else: + grads_group = grads + + for group, grads_this_group in zip(self.param_groups, grads_group): + if grads_this_group is None: + grads_this_group = [None] * len(group['params']) + + bias_correction = 1 if group['bias_correction'] else 0 + + for p, grad in zip(group['params'], grads_this_group): + if p.grad is None and grad is None: + continue + if grad is None: + grad = p.grad.data + if grad.is_sparse: + raise RuntimeError('0/1 Adam does not support sparse gradients') + + state = self.state[p] + + # State initialization + if len(state) == 0: + state['step'] = 0 + # Exponential moving average of gradient values + state['exp_avg'] = torch.zeros_like(p.data) + # Exponential moving average of squared gradient values + state['exp_avg_sq'] = torch.zeros_like(p.data) + + if not self.initialize or 'worker_error' not in state.keys(): + # Some scalars to help scale the variance update/local step policies + state['var_interval'] = 1 + state['var_counter'] = 0 + state['local_step_interval'] = 1 + state['local_step_counter'] = 0 + state['lrs'] = 0 + state['tensor_size'] = torch.numel(p.data) + state['corrected_tensor_size'] = state['tensor_size'] + + if state['tensor_size'] % (self.size * self.divider) != 0: + state['corrected_tensor_size'] += ((self.size * self.divider) - (state['tensor_size'] % + (self.size * self.divider))) + state['server_chunk_size'] = state['corrected_tensor_size'] // self.size + get_accelerator().empty_cache() + state['worker_error'] = torch.zeros(state['corrected_tensor_size'], device=p.device) + state['server_error'] = torch.zeros(state['server_chunk_size'], device=p.device) + # Accumulation of momentum, i.e., the u variable in the 0/1 Adam paper + state['momentum_accumulator'] = torch.zeros_like(p.data) + get_accelerator().empty_cache() + # self.freeze_key = True + if not self.initialize and dist.get_rank() == 0: + print("Cupy Buffers Initialized Successfully.") + + exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] + comm_buffer = state['momentum_accumulator'] + beta1, beta2 = group['betas'] + + state['step'] += 1 + + if self.initialize: + if self.freeze_key is False: + if state['step'] % state['var_interval'] == 0: + exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + else: + if self.size > 1: + with torch.no_grad(): + grad_onebit = self.comm_backend_handle.compressed_allreduce( + grad, state['worker_error'], state['server_error'], self.deepspeed.local_rank) + if 'exp_avg_mask' in group: + if grad_onebit.device != group['exp_avg_mask'].device: + group['exp_avg_mask'] = group['exp_avg_mask'].to(device=grad_onebit.device) + grad_onebit.mul_(group['exp_avg_mask']) + exp_avg.mul_(beta1).add_(1 - beta1, grad_onebit) + else: + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) + state['lrs'] += group['lr'] + grad = None + + if not self.initialize: + if self.size > 1: + comm_buffer.set_( + self.comm_backend_handle.compressed_allreduce(comm_buffer, state['worker_error'], + state['server_error'], + self.deepspeed.local_rank)) + if 'exp_avg_mask' in group: + if comm_buffer.device != group['exp_avg_mask'].device: + group['exp_avg_mask'] = group['exp_avg_mask'].to(device=comm_buffer.device) + comm_buffer.mul_(group['exp_avg_mask']) + + if self.initialize: + update = exp_avg / (exp_avg_sq.sqrt() + group['eps']) + if group['weight_decay'] > 0.0: + update += group['weight_decay'] * p.data + with torch.no_grad(): + p.data.add_(-group['lr'] * update) + if self.freeze_key is True: + comm_buffer.add_(-group['lr'] * update) + if state['step'] % state['local_step_interval'] == 0 and self.freeze_key: + with torch.no_grad(): + p.data.add_(-1 * comm_buffer) + comm_buffer.mul_(exp_avg_sq.sqrt() + group['eps']) + if self.size > 1: + comm_buffer.copy_( + self.comm_backend_handle.compressed_allreduce(comm_buffer, state['worker_error'], + state['server_error'], + self.deepspeed.local_rank)) + if 'exp_avg_mask' in group: + if comm_buffer.device != group['exp_avg_mask'].device: + group['exp_avg_mask'] = group['exp_avg_mask'].to(device=comm_buffer.device) + comm_buffer.mul_(group['exp_avg_mask']) + exp_avg.zero_().add_(comm_buffer / state['lrs'], alpha=-1) + p.data.add_(comm_buffer / (exp_avg_sq.sqrt() + group['eps'])) + comm_buffer.zero_() + + state['lrs'] = 0 + + # According to 0/1 Adam theory, a fixed variance would allow more accurate estimation of momentum + # However, in practice, we can also disable the manual freezing of variance, since the interval of + # updating variance will increase exponentially, so that it has negligible effect on the estimation. + if self.freeze_key is False: + if state['step'] % state['var_interval'] == 0: + state['var_counter'] += 1 + if state['var_counter'] == self.var_update_scaler: + state['var_counter'] = 0 + state['var_interval'] *= 2 + if (state['step'] + 1) % state['var_interval'] == 0: + if self.using_pipeline: + self.deepspeed.pipeline_enable_backward_allreduce = True + else: + self.deepspeed.enable_backward_allreduce = True + else: + if self.using_pipeline: + self.deepspeed.pipeline_enable_backward_allreduce = False + else: + self.deepspeed.enable_backward_allreduce = False + else: + state['local_step_counter'] += 1 + if state['local_step_counter'] == self.local_step_scaler: + state['local_step_counter'] = 0 + state['local_step_interval'] = min(self.local_step_clipper, + state['local_step_interval'] * 2) + + if not self.initialize: + print('Pop out errors', flush=True) + self.freeze_key = False + state.pop('worker_error') + state.pop('server_error') + + if not self.initialize: + self.initialize = True + print(f"Finished the initialization step at rank {dist.get_rank()}") + return loss + + if self.state[self.param_groups[0]['params'][0]]['step'] > self.var_freeze_step: + self.freeze_key = True + if self.using_pipeline: + self.deepspeed.pipeline_enable_backward_allreduce = False + else: + self.deepspeed.enable_backward_allreduce = False + + if self.freeze_key is True and self.reinitial_error_buffer is False: + # We need to reinitialize the error buffers when local step > 1 since + # the errors will be logged for different metrics (gradient vs. accumulated momentum). + for group in self.param_groups: + for p in group['params']: + self.state[p]['worker_error'].zero_() + self.state[p]['server_error'].zero_() + self.reinitial_error_buffer = True + + return loss + + def load_state_dict(self, state_dict): + """ + Overrides load_state_dict() to add special handling when loading checkpoints + """ + # Because at different stage exp_avg_mask may change (e.g., + # BERT pre-training seqlen 128 and 512 ), we don't use the exp_avg_mask + # in checkpoints but always use the one user provided in training script. + # (See example in DeepSpeedExamples/bing_bert/deepspeed_train.py.) + # Thus here we keep the exp_avg_mask unchanged when loading checkpoint + for i, group in enumerate(self.param_groups): + if 'exp_avg_mask' in group: + state_dict['param_groups'][i]['exp_avg_mask'] = group['exp_avg_mask'] + elif 'exp_avg_mask' not in group and 'exp_avg_mask' in state_dict['param_groups'][i]: + state_dict['param_groups'][i].pop('exp_avg_mask') + super().load_state_dict(state_dict) + if self.state[self.param_groups[0]['params'][0]]['step'] < self.var_freeze_step: + self.var_freeze_key = False + if (self.state[self.param_groups[0]['params'][0]]['step'] + + 1) % self.state[self.param_groups[0]['params'][0]]['var_interval'] == 0: + if self.using_pipeline: + self.deepspeed.pipeline_enable_backward_allreduce = True + else: + self.deepspeed.enable_backward_allreduce = True + else: + if self.using_pipeline: + self.deepspeed.pipeline_enable_backward_allreduce = False + else: + self.deepspeed.enable_backward_allreduce = False + else: + self.var_freeze_key = True + if self.using_pipeline: + self.deepspeed.pipeline_enable_backward_allreduce = False + else: + self.deepspeed.enable_backward_allreduce = False + self.reinitial_error_buffer = False + for group in self.param_groups: + for p in group['params']: + if 'worker_error' in self.state[p]: + self.state[p].pop('worker_error') + if 'server_error' in self.state[p]: + self.state[p].pop('server_error') + if 'momentum_accumulator' in self.state[p]: + self.state[p].pop('momentum_accumulator') diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/unfused_optimizer.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/unfused_optimizer.py new file mode 100644 index 0000000000000000000000000000000000000000..530355f846e2c04dfeaaf1886d79f67aa764908f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/fp16/unfused_optimizer.py @@ -0,0 +1,428 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Copyright NVIDIA/apex +This file is adapted from FP16_Optimizer in NVIDIA/apex +""" + +from deepspeed.moe.utils import split_params_grads_into_shared_and_expert_params +import torch +from torch._utils import _flatten_dense_tensors + +from deepspeed.runtime.base_optimizer import DeepSpeedOptimizer +from deepspeed.runtime.utils import get_global_norm, CheckOverflow, get_weight_norm +from deepspeed.runtime.fp16.loss_scaler import INITIAL_LOSS_SCALE, SCALE_WINDOW, MIN_LOSS_SCALE +from deepspeed.utils import logger +from deepspeed.utils.torch import required_torch_version +from deepspeed.checkpoint.constants import OPTIMIZER_STATE_DICT +from deepspeed.accelerator import get_accelerator +from deepspeed import comm as dist + + +class FP16_UnfusedOptimizer(DeepSpeedOptimizer): + """ + FP16 Optimizer without weight fusion to support LAMB optimizer + + For usage example please see, TODO: DeepSpeed V2 Tutorial + """ + + def __init__(self, + init_optimizer, + deepspeed=None, + static_loss_scale=1.0, + dynamic_loss_scale=False, + dynamic_loss_args=None, + verbose=True, + mpu=None, + clip_grad=0.0, + fused_lamb_legacy=False): + + self.fused_lamb_legacy = fused_lamb_legacy + self._global_grad_norm = 0. + + if dist.get_rank() == 0: + logger.info(f'Fused Lamb Legacy : {self.fused_lamb_legacy} ') + + if not get_accelerator().is_available(): + raise SystemError("Cannot use fp16 without accelerator.") + self.optimizer = init_optimizer + + # param groups + self.fp16_groups = [] + self.fp32_groups = [] + + # loop to deal with groups + for i, param_group in enumerate(self.optimizer.param_groups): + #fp16 weights that represents the actual model weights + self.fp16_groups.append(param_group['params']) + + #creating a fp32 copy of the weights that will be updated first then + #copied to fp16 weights + fp32_group = [p.clone().float().detach() for p in param_group['params']] + + #in case the internal optimizer needs it + for p in fp32_group: + p.requires_grad = True + + #setting the param groups in the optimizer to point to fp32 + #note these are not the weights used by the model + #the model uses the fp16 version that we added to fp16_group + self.fp32_groups.append(fp32_group) + param_group['params'] = self.fp32_groups[i] + + # we may have a way of fusing dynamic scale. Do not support for now + if dynamic_loss_scale: + self.dynamic_loss_scale = True + self.cur_iter = 0 + self.last_overflow_iter = -1 + self.scale_factor = 2.0 + if dynamic_loss_args is None: + self.cur_scale = 1.0 * 2**16 + self.scale_window = 1000 + self.min_loss_scale = 0.25 + else: + self.cur_scale = dynamic_loss_args[INITIAL_LOSS_SCALE] + self.scale_window = dynamic_loss_args[SCALE_WINDOW] + self.min_loss_scale = dynamic_loss_args[MIN_LOSS_SCALE] + else: + self.dynamic_loss_scale = False + self.cur_iter = 0 + self.cur_scale = static_loss_scale + + self.custom_loss_scaler = False + self.external_loss_scale = None + + self.verbose = verbose + + self.clip_grad = clip_grad + self.norm_type = 2 + + if required_torch_version(max_version=0.4): + self.clip_grad_norm = torch.nn.utils.clip_grad_norm + else: + self.clip_grad_norm = torch.nn.utils.clip_grad_norm_ + + self.mpu = mpu + + self.overflow = False + self.overflow_checker = CheckOverflow(self.fp16_groups, mpu=self.mpu, deepspeed=deepspeed) + + self.initialize_optimizer_states() + + def zero_grad(self, set_to_none=True): + """ + Zero FP16 parameter grads. + """ + # FP32 grad should never exist outside of the step function + # For speed, set model fp16 grad to None by default + for group in self.fp16_groups: + for p in group: + if set_to_none: + p.grad = None + else: + if p.grad is not None: + p.grad.detach_() + p.grad.zero_() + + def step_fused_lamb(self, closure=None): + """ + Not supporting closure. + """ + # First compute norm for all group so we know if there is overflow + grads_groups_flat = [] + grads_groups = [] + norm_groups = [] + expert_norm_groups = [] + for i, group in enumerate(self.fp16_groups): + grads = [ + torch.zeros(p.size(), dtype=p.dtype, device=p.device) if p.grad is None else p.grad for p in group + ] + grads_groups.append(grads) + grads_groups_flat.append(_flatten_dense_tensors(grads)) + grads_for_norm, expert_grads_for_norm = split_params_grads_into_shared_and_expert_params(group) + norm_group_value = 0.0 + if len(grads_for_norm) > 0: + norm_group_value = get_weight_norm(_flatten_dense_tensors(grads_for_norm), mpu=self.mpu) + norm_groups.append(norm_group_value) + expert_norm_group_value = 0.0 + if len(expert_grads_for_norm) > 0: + expert_norm_group_value = get_weight_norm(_flatten_dense_tensors(expert_grads_for_norm), mpu=self.mpu) + expert_norm_groups.append(expert_norm_group_value) + + self.overflow = self.overflow_checker.check_using_norm(norm_groups + expert_norm_groups) + prev_scale = self.cur_scale + + self._update_scale(self.overflow) + if self.overflow: + if self.verbose: + logger.info("[deepspeed] fp16 dynamic loss scale overflow! Skipping step. Attempted loss " + "scale: {}, reducing to {}".format(prev_scale, self.cur_scale)) + return self.overflow + + self._global_grad_norm = get_global_norm(norm_list=norm_groups) + combined_scale = self.unscale_and_clip_grads(self._global_grad_norm, apply_scale=False) + self.optimizer.step(grads=grads_groups, output_params=self.fp16_groups, scale=combined_scale) + + for fp32_group, fp16_group in zip(self.fp32_groups, self.fp16_groups): + for idx, (fp32_param, fp16_param) in enumerate(zip(fp32_group, fp16_group)): + + #remove the fp32 grad + fp32_param.grad = None + + #copy data from fp32 to fp16 + fp16_param.data.copy_(fp32_param.data) + + return self.overflow + + def set_lr(self, lr): + """Set the learning rate.""" + for param_group in self.optimizer.param_groups: + param_group["lr"] = lr + + def get_lr(self): + """Return the current learning rate.""" + return self.optimizer.param_groups[0]["lr"] + + def override_loss_scale(self, loss_scale): + if loss_scale != self.external_loss_scale: + logger.info(f'[deepspeed] setting loss scale from {self.external_loss_scale} -> {loss_scale}') + self.custom_loss_scaler = True + self.external_loss_scale = loss_scale + + def step(self, closure=None): + """ + Not supporting closure. + """ + + if self.fused_lamb_legacy: + return self.step_fused_lamb() + + self.overflow = self.overflow_checker.check() + prev_scale = self.cur_scale + + self._update_scale(self.overflow) + if self.overflow: + if self.verbose: + logger.info("[deepspeed] fp16 dynamic loss scale overflow! Skipping step. Attempted loss " + "scale: {}, reducing to {}".format(prev_scale, self.cur_scale)) + return self.overflow + + norm_groups = [] + for i, group in enumerate(self.fp16_groups): + grads_for_norm, _ = split_params_grads_into_shared_and_expert_params(group) + norm_group_value = 0.0 + if len(grads_for_norm) > 0: + norm_group_value = get_weight_norm(grads_for_norm, mpu=self.mpu) + norm_groups.append(norm_group_value) + + # copying gradients to fp32 to work with fp32 parameters + for fp32_param, fp16_param in zip(self.fp32_groups[i], self.fp16_groups[i]): + if fp16_param.grad is None: + fp32_param.grad = torch.zeros(fp16_param.size(), dtype=fp32_param.dtype, device=fp32_param.device) + else: + fp32_param.grad = fp16_param.grad.to(fp32_param.dtype) + + self._global_grad_norm = get_global_norm(norm_list=norm_groups) + self.unscale_and_clip_grads(self._global_grad_norm) + + self.optimizer.step() + + for fp32_group, fp16_group in zip(self.fp32_groups, self.fp16_groups): + for idx, (fp32_param, fp16_param) in enumerate(zip(fp32_group, fp16_group)): + + #remove the fp32 grad + fp32_param.grad = None + + #copy data from fp32 to fp16 + fp16_param.data.copy_(fp32_param.data) + + return self.overflow + + def unscale_and_clip_grads(self, total_norm, apply_scale=True): + # compute combined scale factor for this group + combined_scale = self.cur_scale + if self.clip_grad > 0.: + # norm is in fact norm*scale + clip = ((total_norm / self.cur_scale) + 1e-6) / self.clip_grad + if clip > 1: + combined_scale = clip * self.cur_scale + + if apply_scale: + for group in self.fp32_groups: + for param in group: + if param.grad is not None: + param.grad.data.mul_(1. / combined_scale) + + return combined_scale + + def backward(self, loss, create_graph=False, retain_graph=False): + """ + :attr:`backward` performs the following steps: + + 1. fp32_loss = loss.float() + 2. scaled_loss = fp32_loss*loss_scale + 3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves + """ + if self.custom_loss_scaler: + scaled_loss = self.external_loss_scale * loss + scaled_loss.backward() + else: + scaled_loss = (loss.float()) * self.cur_scale + scaled_loss.backward(create_graph=create_graph, retain_graph=retain_graph) + + def _update_scale(self, skip): + if self.dynamic_loss_scale: + prev_scale = self.cur_scale + if skip: + self.cur_scale = max(self.cur_scale / self.scale_factor, self.min_loss_scale) + self.last_overflow_iter = self.cur_iter + if self.verbose: + logger.info("Grad overflow on iteration: %s", self.cur_iter) + logger.info(f"Reducing dynamic loss scale from {prev_scale} to {self.cur_scale}") + else: + # Ensure self.scale_window updates since last overflow + stable_interval = (self.cur_iter - self.last_overflow_iter) - 1 + if (stable_interval > 0) and (stable_interval % self.scale_window == 0): + self.cur_scale *= self.scale_factor + if self.verbose: + logger.info(f"No Grad overflow for {self.scale_window} iterations") + logger.info(f"Increasing dynamic loss scale from {prev_scale} to {self.cur_scale}") + else: + if skip: + logger.info("Grad overflow on iteration %s", self.cur_iter) + logger.info("Using static loss scale of %s", self.cur_scale) + self.cur_iter += 1 + return + + # Promote state so it can be retrieved or set via "fp16_optimizer_instance.state" + def _get_state(self): + return self.optimizer.state + + def _set_state(self, value): + self.optimizer.state = value + + state = property(_get_state, _set_state) + + # Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups" + # (for example, to adjust the learning rate) + def _get_param_groups(self): + return self.optimizer.param_groups + + def _set_param_groups(self, value): + self.optimizer.param_groups = value + + param_groups = property(_get_param_groups, _set_param_groups) + + # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" + def _get_loss_scale(self): + if self.custom_loss_scaler: + return self.external_loss_scale + else: + return self.cur_scale + + def _set_loss_scale(self, value): + self.loss_scaler.cur_scale = value + + loss_scale = property(_get_loss_scale, _set_loss_scale) + + def state_dict(self): + """ + Returns a dict containing the current state of this :class:`FP16_Optimizer` instance. + This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict + of the contained Pytorch optimizer. + Example:: + checkpoint = {} + checkpoint['model'] = model.state_dict() + checkpoint['optimizer'] = optimizer.state_dict() + torch.save(checkpoint, "saved.pth") + """ + state_dict = {} + state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale + state_dict['cur_scale'] = self.cur_scale + state_dict['cur_iter'] = self.cur_iter + if state_dict['dynamic_loss_scale']: + state_dict['last_overflow_iter'] = self.last_overflow_iter + state_dict['scale_factor'] = self.scale_factor + state_dict['scale_window'] = self.scale_window + state_dict[OPTIMIZER_STATE_DICT] = self.optimizer.state_dict() + state_dict['fp32_groups'] = self.fp32_groups + return state_dict + + # Refresh fp32 master params from fp16 copies + def refresh_fp32_params(self): + for current_group, saved_group in zip(self.fp32_groups, self.fp16_groups): + for current, saved in zip(current_group, saved_group): + current.data.copy_(saved.data) + + def load_state_dict(self, state_dict, load_optimizer_states=True): + """ + Loads a state_dict created by an earlier call to state_dict(). + If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``, + whose parameters in turn came from ``model``, it is expected that the user + will call ``model.load_state_dict()`` before + ``fp16_optimizer_instance.load_state_dict()`` is called. + Example:: + model = torch.nn.Linear(D_in, D_out).to(get_accelerator().device_name()).half() + optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) + optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) + ... + checkpoint = torch.load("saved.pth") + model.load_state_dict(checkpoint['model']) + optimizer.load_state_dict(checkpoint['optimizer']) + """ + # I think it should actually be ok to reload the optimizer before the model. + self.dynamic_loss_scale = state_dict['dynamic_loss_scale'] + self.cur_scale = state_dict['cur_scale'] + self.cur_iter = state_dict['cur_iter'] + if state_dict['dynamic_loss_scale']: + self.last_overflow_iter = state_dict['last_overflow_iter'] + self.scale_factor = state_dict['scale_factor'] + self.scale_window = state_dict['scale_window'] + + if load_optimizer_states: + self.optimizer.load_state_dict(state_dict[OPTIMIZER_STATE_DICT]) + # At this point, the optimizer's references to the model's fp32 parameters are up to date. + # The optimizer's hyperparameters and internal buffers are also up to date. + # However, the fp32 master copies of the model's fp16 params stored by the optimizer are still + # out of date. There are two options. + # 1: Refresh the master params from the model's fp16 params. + # This requires less storage but incurs precision loss. + # 2: Save and restore the fp32 master copies separately. + # We choose option 2. + # + # Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device + # of their associated parameters, because it's possible those buffers might not exist yet in + # the current optimizer instance. In our case, as long as the current FP16_Optimizer has been + # constructed in the same way as the one whose state_dict we are loading, the same master params + # are guaranteed to exist, so we can just copy_() from the saved master params. + for current_group, saved_group in zip(self.fp32_groups, state_dict['fp32_groups']): + for current, saved in zip(current_group, saved_group): + current.data.copy_(saved.data) + + def __repr__(self): + return repr(self.optimizer) + + def initialize_optimizer_states(self): + for i, group in enumerate(self.fp16_groups): + for param in group: + param.grad = torch.zeros(param.size(), + dtype=param.dtype, + device=get_accelerator().current_device_name()) + + for i, group in enumerate(self.fp32_groups): + for param in group: + param.grad = torch.zeros(param.size(), + dtype=param.dtype, + device=get_accelerator().current_device_name()) + + self.optimizer.step() + + for i, group in enumerate(self.fp16_groups): + for param in group: + param.grad = None + + for i, group in enumerate(self.fp32_groups): + for param in group: + param.grad = None diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/hybrid_engine.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/hybrid_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..a991c43045635dbc1c11f0bfb9bcd1e3a59ca25f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/hybrid_engine.py @@ -0,0 +1,446 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from deepspeed.inference.config import DeepSpeedInferenceConfig +from deepspeed.module_inject.replace_policy import replace_policies +from deepspeed.module_inject.utils import policy_to_ds_container +from .engine import DeepSpeedEngine +from .utils import TLinear, get_inactive_params +from deepspeed.runtime.zero import GatheredParameters +import time +import gc +import math +from deepspeed import comm as dist +from deepspeed.accelerator import get_accelerator +from torch import nn +from deepspeed.utils import logger + +from deepspeed.ops.op_builder import InferenceBuilder + +from deepspeed.module_inject.layers import LinearLayer, Normalize, EmbeddingLayer, OPTEmbedding +try: + import transformers + OPTLearnedPositionalEmbedding = transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding +except: + OPTLearnedPositionalEmbedding = None +inference_cuda_module = None + + +class DeepSpeedHybridEngine(DeepSpeedEngine): + r"""DeepSpeed engine for training and inference.""" + inference_mp_group = None + + def __init__(self, args, model, **kwargs): + + super().__init__(args, model, **kwargs) + + # synch seed between all GPUs + _rng_state = get_accelerator().get_rng_state().to(get_accelerator().current_device_name()) + dist.broadcast(_rng_state, 0) + get_accelerator().set_rng_state(_rng_state.cpu()) + + self.Z3_enabled = (self._config.zero_config.stage == 3) + self.gather_all_layers = self._config.hybrid_engine.pin_parameters + + # inference containers / fwds + self._inference_containers = [] + self._orig_modules = [] + self._orig_fwds = [] + self.create_inference_module() + + # Performance stats + self._t_start = None + self._total_latency = 0 + self._iters = 0 + self._training_start_time = None + self._generate_latency = 0 + self._training_latency = 0 + self._total_batch_size = None + self._gather_latency = 0 + + global inference_cuda_module + if inference_cuda_module is None: + builder = InferenceBuilder() + inference_cuda_module = builder.load() + + self.is_lora_fused = False + + def convert_to_linear_transposed(self, model): + + def _replace_linear_layer(r_module, parent_type=None, prev_type=None): + for name, child in r_module.named_children(): + if child.__class__ in [torch.nn.Linear] and \ + (parent_type is torch.nn.ModuleList or prev_type is torch.nn.ModuleList): + setattr(r_module, name, TLinear(child, name)) + else: + _replace_linear_layer(child, type(r_module), prev_type=parent_type) + return r_module + + _replace_linear_layer(model) + + def new_inference_container(self, orig_layer, policy_cls, layer_id): + policy = policy_cls(orig_layer, inference=True) + + if self._config.fp16_enabled: + inference_dtype = torch.float16 + elif self._config.bfloat16_enabled: + inference_dtype = torch.bfloat16 + else: + inference_dtype = torch.float32 + + _container = policy_to_ds_container( + policy=policy, + config=DeepSpeedInferenceConfig( + set_empty_params=True, + dtype=inference_dtype, + max_out_tokens=self._config.hybrid_engine.max_out_tokens, + min_out_tokens=self._config.hybrid_engine.max_out_tokens, + transposed_mode=True, + ), + model_config=self.module.config if hasattr(self.module, 'config') else None, + layer_id=layer_id, + child=orig_layer) + + if self.mpu is not None: + if hasattr(self.mpu, 'get_model_parallel_world_size'): + _container.set_tensor_parallel_config(self.mpu.get_model_parallel_world_size(), + self.mpu.get_model_parallel_group()) + else: + _container.set_tensor_parallel_config(self.mpu.get_tensor_model_parallel_world_size(), + self.mpu.get_tensor_model_parallel_group()) + else: + _container.set_tensor_parallel_config(self._config.hybrid_engine.inference_tp_size, self.mp_group) + _container.initialize_tensors(enable_training=True) + _container.create_ds_model_config() + _container.create_module() + _container.set_params_wo_copy(Z3_enabled=self.Z3_enabled) + return _container + + def populate_all_inference_policies(self): + self.inference_policies = {} + for plcy in replace_policies: + _ = plcy(None) + if isinstance(plcy._orig_layer_class, list): + for orig_layer_class in plcy._orig_layer_class: + self.inference_policies.update({orig_layer_class: (self.new_inference_container, plcy)}) + elif plcy._orig_layer_class is not None: + self.inference_policies.update({plcy._orig_layer_class: (self.new_inference_container, plcy)}) + self.inference_policies.update({ + nn.Linear: (LinearLayer, ), + nn.Embedding: (EmbeddingLayer, ), + nn.LayerNorm: (Normalize, ), + OPTLearnedPositionalEmbedding: (OPTEmbedding, ) + }) + + def _fuse_lora_layer(self, layer_id): + self._inference_containers[layer_id].fuse_lora() + + def fuse_lora_weight(self): + for layer_id in range(len(self.layer_params)): + self._fuse_lora_layer(layer_id) + + def _unfuse_lora_layer(self, layer_id): + self._inference_containers[layer_id].unfuse_lora() + + def unfuse_lora_weight(self): + for layer_id in range(len(self.layer_params)): + self._unfuse_lora_layer(layer_id) + + def unfuse_lora_weight_non_pinned(self): + for layer_id in range(len(self.layer_params)): + non_active_params = get_inactive_params(self.layer_params[layer_id]) + non_active_lora_params = get_inactive_params(self.layer_lora_params[layer_id]) + non_active_params.extend(non_active_lora_params) + + with GatheredParameters(non_active_params): + self._unfuse_lora_layer(layer_id) + + def retake_inference_cache(self): + if self._config.hybrid_engine.release_inference_cache: + retake_success = inference_cuda_module.retake_workspace() + + if not retake_success: + logger.warning("Unable to acquire workspace on first attempt, emptying cache and retrying.") + gc.collect() + get_accelerator().empty_cache() + retake_success = inference_cuda_module.retake_workspace() + + if not retake_success: + raise RuntimeError("Unable to retake inference workspace.") + + def generate(self, *inputs, **kwargs): + if self._total_batch_size is None: + bsz = inputs[0].shape[0] if len(inputs) > 0 else \ + kwargs['input_ids'].shape[0] + self._total_batch_size = bsz * dist.get_world_size() + + self._t0 = time.time() + + if self.Z3_enabled and self.gather_all_layers: + if self._config.hybrid_engine.inference_tp_size > 1: + non_tp_params = [] + for other_layer in self._other_layers: + non_tp_params.extend(list(other_layer.parameters())) + + partition_size = self._config.hybrid_engine.tp_gather_partition_size + + layer_groups = math.ceil(len(self.layer_params) / partition_size) + for lg in range(layer_groups): + non_active_params = [] + non_active_lora_params = [] + for layer_id in range(lg * partition_size, min(len(self.layer_params), (lg + 1) * partition_size), + 1): + non_tp_params.extend(self.layer_params[layer_id][:4]) + non_active_params.extend(get_inactive_params(self.layer_params[layer_id])) + non_active_params.extend(get_inactive_params(self.layer_lora_params[layer_id])) + with GatheredParameters(non_active_params): + for layer_id in range(lg * partition_size, + min(len(self.layer_params), (lg + 1) * partition_size), 1): + if len(self.all_lora_params) > 0: + self._fuse_lora_layer(layer_id) + + if self.mpu is not None: + self._inference_containers[layer_id].apply_tensor_parallelism(self.mp_replace, + reversed_dim=True) + + # TODO(cmikeh2) Evaluate if this can be deferred when release_inference_cache + # is enabled. + gc.collect() + get_accelerator().empty_cache() + + self._gather_latency = time.time() - self._t0 + + input_shape = inputs[0].shape if len(inputs) > 0 else \ + kwargs['input_ids'].shape + output = torch.zeros( + (input_shape[0] * self._config.hybrid_engine.inference_tp_size, ) + input_shape[1:], + dtype=inputs[0].dtype if len(inputs) > 0 else kwargs['input_ids'].dtype, + device=inputs[0].device if len(inputs) > 0 else kwargs['input_ids'].device) + input_cont = inputs[0].contiguous() if len(inputs) > 0 else kwargs['input_ids'].contiguous() + dist.all_gather_into_tensor(output, input_cont, group=self.mp_group) + + if len(inputs) > 0: + inputs = (output, *inputs[1:]) + else: + kwargs['input_ids'] = output + + self.retake_inference_cache() + + non_active_params = get_inactive_params(non_tp_params) + with GatheredParameters(non_active_params): + generate_ret_vals = self._generate(*inputs, **kwargs) + + for layer_id in range(len(self.layer_params)): + self._inference_containers[layer_id].release_memory() + + rank = dist.get_rank(group=self.mp_group) + generate_ret_vals = generate_ret_vals[input_shape[0] * rank:input_shape[0] * (rank + 1)] + + else: + non_active_layers = get_inactive_params(self.all_layers_params) + non_active_lora_params = get_inactive_params(self.all_lora_params) + non_active_layers.extend(non_active_lora_params) + with GatheredParameters(non_active_layers): + self._gather_latency = time.time() - self._t0 + + if len(self.all_lora_params) > 0: + self.fuse_lora_weight() + + self.retake_inference_cache() + generate_ret_vals = self._generate(*inputs, **kwargs) + + if len(self.all_lora_params) > 0: + self.unfuse_lora_weight() + else: + if len(self.all_lora_params) > 0 and (not self.Z3_enabled): + self.fuse_lora_weight() + + self.retake_inference_cache() + generate_ret_vals = self._generate(*inputs, **kwargs) + + if len(self.all_lora_params) > 0: + if (not self.Z3_enabled): + self.unfuse_lora_weight() + else: + self.unfuse_lora_weight_non_pinned() + self.is_lora_fused = False + + if self._config.hybrid_engine.release_inference_cache: + inference_cuda_module.release_workspace() + gc.collect() + get_accelerator().empty_cache() + + self._generate_latency = time.time() - self._t0 - self._gather_latency + + return generate_ret_vals + + def create_inference_containers(self, module, layer_id=0): + for name, child in module.named_children(): + if child.__class__ in self.inference_policies: + if self.inference_policies[child.__class__][0] == self.new_inference_container: + self._inference_containers.append(self.inference_policies[child.__class__][0]( + child, self.inference_policies[child.__class__][-1], layer_id)) + self._orig_modules.append(child) + self._orig_fwds.append(child.forward) + + self.layer_params.append(self._inference_containers[layer_id].get_all_params()) + + self.lora_params.append(self._inference_containers[layer_id].get_lora_params()) + self.layer_lora_params.append([]) + for lora_param in self.lora_params[layer_id]: + self.layer_lora_params[layer_id].extend(lora_param[:-1]) + self.all_lora_params.extend(lora_param[:-1]) + + layer_id += 1 + else: + self._other_layers.append(self.inference_policies[child.__class__][0]( + weight=child.weight, bias=child.bias if hasattr(child, 'bias') else None)) + self._orig_modules_others.append(child) + self._orig_fwds_others.append(child.forward) + else: + self.create_inference_containers(child, layer_id=layer_id) + + def create_inference_module(self): + self.layer_params = [] + self.layer_lora_params = [] + self.lora_params = [] + self.all_lora_params = [] + + self._other_layers = [] + self._orig_modules_others = [] + self._orig_fwds_others = [] + + if self._config.hybrid_engine.inference_tp_size > 1: + if self.mpu is None: + global_rank = dist.get_rank() + world_size = dist.get_world_size() + mp_group_id = global_rank // self._config.hybrid_engine.inference_tp_size + num_mp_groups = world_size // self._config.hybrid_engine.inference_tp_size + for mp_group_id in range(num_mp_groups): + ranks = list( + range(mp_group_id * self._config.hybrid_engine.inference_tp_size, \ + (mp_group_id + 1) * self._config.hybrid_engine.inference_tp_size, \ + 1) + ) + mp_group = dist.new_group(ranks) + if global_rank in ranks: + # mp_group is used for broader collective + self.mp_group = mp_group + + # mp_replace is used for container tensor slicing + from deepspeed.module_inject import ReplaceWithTensorSlicing + self.mp_replace = ReplaceWithTensorSlicing( + mp_group=self.mp_group, + mp_size=self._config.hybrid_engine.inference_tp_size, + out_dim=0, + in_dim=1) + + else: + self.mp_group = self.mpu.get_model_parallel_group() if hasattr(self.mpu, 'get_model_parallel_group') else \ + self.mpu.get_tensor_model_parallel_group() + + from deepspeed.module_inject import ReplaceWithTensorSlicing + self.mp_replace = ReplaceWithTensorSlicing(mp_group=self.mp_group, + mp_size=self._config.hybrid_engine.inference_tp_size, + out_dim=0, + in_dim=1) + else: + self.mp_group = None + self.mp_replace = None + self.populate_all_inference_policies() + self.all_layers_params = list(self.module.parameters()) + self.create_inference_containers(self.module) + + if len(self._inference_containers) > 0: + self._generate = self.module.generate + self.module.generate = self.generate + + self._t0 = time.time() + + def _zero3_forward(self, layer_id): + + def run_forward(*inputs, **kwargs): + non_active_params = get_inactive_params(self.layer_params[layer_id]) + non_active_lora_params = get_inactive_params(self.layer_lora_params[layer_id]) + non_active_params.extend(non_active_lora_params) + + with GatheredParameters(non_active_params): + if len(self.all_lora_params) > 0: + # Use the is_lora_fused flag to prevent multiple fusion in Z3 with non-pinned memory + if not self.is_lora_fused: + self._fuse_lora_layer(layer_id) + # Set the is_lora_fused to true when reaching the last layer + if layer_id == len(self.layer_params) - 1: + self.is_lora_fused = True + return self._inference_containers[layer_id].module.forward(*inputs, **kwargs) + + return run_forward + + def eval(self): + if self._t_start is not None: + latency = time.time() - self._t_start + self._total_latency = self._total_latency + latency + self._iters = self._iters + 1 + if not dist.is_initialized() or dist.get_rank() == 0: + if self._total_batch_size is not None: + cur_samples_p_sec = f'|CurSamplesPerSec={(1 / latency * self._total_batch_size):.2f} ' + avg_samples_p_sec = f'|AvgSamplesPerSec={(1 / (self._total_latency / self._iters) * self._total_batch_size):.2f}' + else: + cur_samples_p_sec = '' + avg_samples_p_sec = '' + others = latency - (self._generate_latency + self._training_latency) + print(f'|E2E latency={(latency):.2f}s ' + \ + f'|Gather latency={self._gather_latency:.2f}s ({(self._gather_latency / latency * 100):.2f}%) ' + f'|Generate time={(self._generate_latency):.2f}s ({(self._generate_latency / latency * 100):.2f}%) ' + \ + f'|Training time={(self._training_latency):.2f}s ({(self._training_latency / latency * 100):.2f}%) ' + \ + f'|Others={others:.2f} ({(others / latency * 100):.2f}%)' + \ + cur_samples_p_sec + \ + avg_samples_p_sec) + self._t_start = time.time() + self._training_latency = 0 + super().eval() + if len(self._inference_containers) > 0: + for i, (orig_module, inference_container) in enumerate(zip(self._orig_modules, + self._inference_containers)): + if self.Z3_enabled and not self.gather_all_layers: + orig_module.forward = self._zero3_forward(i) + else: + orig_module.forward = inference_container.module.forward + + inference_container.transform_for_inference() + + if not self.Z3_enabled or self.gather_all_layers: + for orig_module, inference_layer in zip(self._orig_modules_others, self._other_layers): + orig_module.forward = inference_layer.forward + if self.Z3_enabled: + gc.collect() + get_accelerator().empty_cache() + if self._t_start is None: + self._t_start = time.time() + + def train(self, mode=True): + if mode and len(self._orig_modules) > 0: + for inference_container, orig_module, orig_fwd in zip(self._inference_containers, self._orig_modules, + self._orig_fwds): + inference_container.transform_for_training() + orig_module.forward = orig_fwd + for orig_module, orig_fwd in zip(self._orig_modules_others, self._orig_fwds_others): + orig_module.forward = orig_fwd + super().train(mode) + if mode: + self._training_start_time = time.time() + + def step(self, lr_kwargs=None): + super().step(lr_kwargs=lr_kwargs) + + if len(self._inference_containers) > 0: + if not self.Z3_enabled: + for inference_container in self._inference_containers: + inference_container.reset_params() + + if self._training_start_time is not None: + self._training_latency += (time.time() - self._training_start_time) + self._training_start_time = time.time() diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/progressive_layer_drop.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/progressive_layer_drop.py new file mode 100644 index 0000000000000000000000000000000000000000..bd3be8f13011a4c9322839ff8a631778e7fb6847 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/progressive_layer_drop.py @@ -0,0 +1,40 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import numpy as np +from deepspeed.utils import log_dist + + +class ProgressiveLayerDrop(object): + r""" Progressive Layer Dropping (PLD) for model training. + This implements the PLD technique for compressed model training + from this paper: https://arxiv.org/pdf/2010.13369.pdf + Args: + theta (float): a hyper-parameter that controls the trade-off between training time and robustness. + The lower the theta value, the faster the training speed. Default value: 0.5. + gamma (float): a hyper-parameter that controls how fast the drop ratio increases. Default value: 0.001. + """ + + def __init__(self, theta=0.5, gamma=0.001): + super().__init__() + + self.theta = theta + self.gamma = gamma + self.current_theta = 1.0 + log_dist(f'Enabled progressive layer dropping (theta = {self.theta})', ranks=[0]) + + def get_state(self): + kwargs = {'progressive_layer_drop': True, 'pld_theta': self.get_theta()} + return kwargs + + def get_theta(self): + return self.current_theta + + def update_state(self, global_step): + + def _prob(x, gamma, p): + return (1. - p) * np.exp(-gamma * x) + p + + self.current_theta = _prob(global_step, self.gamma, self.theta) diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/sparse_tensor.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/sparse_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..291ba5f0c78668b793bea12749bd0a188b680ece --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/sparse_tensor.py @@ -0,0 +1,69 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Implementation of a compressed sparse tensor. Similar in +functionality to TensorFlow's IndexedSlices implementation. +""" + +import torch + + +class SparseTensor(object): + """ Compressed Sparse Tensor """ + + def __init__(self, dense_tensor=None): + self.orig_dense_tensor = dense_tensor + self.dtype = self.orig_dense_tensor.dtype + self.is_sparse = dense_tensor.is_sparse + if dense_tensor is not None: + if dense_tensor.is_sparse: + dense_tensor = dense_tensor.coalesce() + self.indices = dense_tensor.indices().flatten() + self.values = dense_tensor.values() + else: + result = torch.sum(dense_tensor, dim=1) + self.indices = result.nonzero().flatten() + self.values = dense_tensor[self.indices] + self.dense_size = list(dense_tensor.size()) + else: + self.indices = None + self.values = None + self.dense_size = None + + def to_coo_tensor(self): + return torch.sparse_coo_tensor(self.indices.unsqueeze(0), self.values, self.dense_size) + + @staticmethod + def type(): + return "deepspeed.SparseTensor" + + def to_dense(self): + it = self.indices.unsqueeze(1) + full_indices = torch.cat([it for _ in range(self.dense_size[1])], dim=1) + return self.values.new_zeros(self.dense_size).scatter_add_(0, full_indices, self.values) + + def sparse_size(self): + index_size = list(self.indices.size()) + index_size = index_size[0] + value_size = list(self.values.size()) + value_size = value_size[0] * value_size[1] + dense_size = self.dense_size[0] * self.dense_size[1] + return index_size + value_size, dense_size + + def add(self, b): + assert self.dense_size == b.dense_size + self.indices = torch.cat([self.indices, b.indices]) + self.values = torch.cat([self.values, b.values]) + + def __str__(self): + sparse_size, dense_size = self.sparse_size() + return "DeepSpeed.SparseTensor(indices_size={}, values_size={}, " \ + "dense_size={}, device={}, reduction_factor={})".format( + self.indices.size(), self.values.size(), self.dense_size, + self.indices.get_device(), dense_size / sparse_size + ) + + def __repr__(self): + return self.__str__() diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/state_dict_factory.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/state_dict_factory.py new file mode 100644 index 0000000000000000000000000000000000000000..c2db85d1ba58f16dbe59b1dcd3d3066ab11ff6d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/state_dict_factory.py @@ -0,0 +1,427 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import os +import copy +import collections +import json +from abc import ABC, abstractmethod + +from deepspeed.utils import logger +from deepspeed.runtime.checkpoint_engine.torch_checkpoint_engine import TorchCheckpointEngine + +from .weight_quantizer import WeightQuantization + +AUTO_MODULE_KEY = 'auto' + + +class SDLoaderFactory: + + @staticmethod + def get_sd_loader_json(json_file, checkpoint_engine): + if isinstance(json_file, str): + with open(json_file) as f: + data = json.load(f) + else: + assert isinstance(json_file, dict) + data = json_file + sd_type = data['type'] + ckpt_list = data['checkpoints'] + version = data['version'] + ckpt_type = data.get('parallelization', 'pp') + mp_size = data.get('mp_size', 0) + if sd_type.lower() in ['bloom', 'ds_model']: + return data + return SDLoaderFactory.get_sd_loader(ckpt_list, checkpoint_engine, sd_type, version) + + @staticmethod + def get_sd_loader(ckpt_list, checkpoint_engine, sd_type='Megatron', version=None): + if sd_type == 'Megatron': + return MegatronSDLoader(ckpt_list, version, checkpoint_engine) + else: + assert False, '{} checkpoint type is not supported'.format(sd_type) + + +class SDLoaderBase(ABC): + + def __init__(self, ckpt_list, version, checkpoint_engine): + self.module_key = None + self.ckpt_list = ckpt_list + self.version = version + self.checkpoint_engine = TorchCheckpointEngine() if checkpoint_engine is None else checkpoint_engine + self.check_ckpt_list() + + def load(self, + mp_world_size, + mp_rank, + module_key=AUTO_MODULE_KEY, + is_pipe_parallel=False, + quantize=False, + quantize_bits=8, + quantize_groups=64, + mlp_extra_grouping=True): + self.module_key = module_key + num_ckpt = len(self.ckpt_list) + idx = mp_rank * num_ckpt // mp_world_size + """ We have multiple cases to handle here for both training and inference: + 1. PipeModule loading mp_rank_*.pt files, is_pipe_parallel=True, module_key is not None + a. if no mp_size/pp_size resizing occurs, for both training & inference, loading + the mp_rank related checkpoint directly. + b. if has mp_size/pp_size resizing, only Megatron model inference is supported, + in this case each mp_rank_*.pt have same content, we will load the first checkpoint + file (idx=0), to avoid idx exceeding file list boundary. + + 2. PipeModule loading layer_*.pt files, is_pipe_parallel=True, module_key is None + a. if no mp_size resizing occurs, for both training & inference, loading + the mp_rank related checkpoint directly. + b. if has mp_size resizing, only Megatron model inference is supported, + checkpoint file(s) will be merged/split according to mp_rank, mp_world_size and + checkpoint file list. + + 3. Non-PipeModule loading mp_rank_*.pt files, is_pipe_parallel=False + Same with case (2). + """ + if is_pipe_parallel and module_key is not None and mp_world_size != num_ckpt: + mp_world_size = num_ckpt + idx = 0 + + load_path = self.ckpt_list[idx] + + merge_count = 1 + if num_ckpt == mp_world_size: + assert os.path.exists(load_path) + #logger.info(f'rank: {mp_rank} loading checkpoint: {load_path}') + sd = self.checkpoint_engine.load(load_path, map_location=lambda storage, \ + loc: storage) + + if quantize: + quantizer = WeightQuantization(mlp_extra_grouping=mlp_extra_grouping, mp_size=mp_world_size) + sd_module, all_scales = quantizer.sd_quantize_megatron(self.get_module(sd), quantize_bits, + quantize_groups) + self.set_module(sd, sd_module) + else: + all_scales = None + elif num_ckpt > mp_world_size: + sd, all_scales, merge_count = self.merge_state_dict(mp_world_size, mp_rank, quantize, \ + quantize_bits, quantize_groups, mlp_extra_grouping) + else: + sd, all_scales = self.split_state_dict(mp_world_size, mp_rank, quantize, quantize_bits, \ + quantize_groups, mlp_extra_grouping) + return load_path, sd, (all_scales, merge_count) + + def get_merge_state_dicts(self, mp_world_size, mp_rank): + num_ckpt = len(self.ckpt_list) + assert num_ckpt % mp_world_size == 0, 'Invalid checkpoints and world size for sd merge' + + num_to_merge = num_ckpt // mp_world_size + ckpt_list = [self.ckpt_list[i] for i in range(num_to_merge * mp_rank, num_to_merge * (mp_rank + 1))] + + logger.info(f"mp_rank: {mp_rank}, ckpt_list: {ckpt_list}") + sd_list = [self.checkpoint_engine.load(ckpt, map_location=lambda storage, loc: storage) for ckpt in ckpt_list] + return sd_list + + def get_split_state_dict(self, mp_world_size, mp_rank): + num_ckpt = len(self.ckpt_list) + assert mp_world_size % num_ckpt == 0, 'Invalid checkpoints and world size for sd split' + + num_to_split = mp_world_size // num_ckpt + ckpt_index = mp_rank // num_to_split + ckpt_offset = mp_rank % num_to_split + + logger.info(f"mp_rank: {mp_rank}, ckpt_list: {self.ckpt_list[ckpt_index]}, offset: {ckpt_offset}") + + sd = self.checkpoint_engine.load(self.ckpt_list[ckpt_index], map_location=lambda storage, loc: storage) + + return sd, num_to_split, ckpt_offset + + def _choose_module_key(self, sd): + assert not ('module' in sd + and 'model' in sd), "checkpoint has both 'model' and 'module' keys, not sure how to proceed" + assert 'module' in sd or 'model' in sd, "checkpoint contains neither 'model' or 'module' keys, not sure how to proceed" + if 'module' in sd: + return 'module' + elif 'model' in sd: + return 'model' + + def get_module(self, sd): + if self.module_key is None: + return sd + elif self.module_key == AUTO_MODULE_KEY: + return sd[self._choose_module_key(sd)] + else: + return sd[self.module_key] + + def set_module(self, sd, module): + if self.module_key is None: + sd = module + elif self.module_key == AUTO_MODULE_KEY: + sd[self._choose_module_key(sd)] = module + else: + sd[self.module_key] = module + return sd + + def check_ckpt_list(self): + #logger.info(f'checkpoint file list: {self.ckpt_list}') + assert len(self.ckpt_list) > 0 + + sd = self.checkpoint_engine.load(self.ckpt_list[0], map_location=lambda storage, loc: storage) + + # check checkpoint count is same with saved mp_world_size + if 'mp_world_size' in sd.keys(): + assert len(self.ckpt_list) == sd[ + 'mp_world_size'], f"checkpoint count {len(self.ckpt_list)} is different from saved mp_world_size {sd['mp_world_size']}" + + @abstractmethod + def merge_state_dict(self, mp_world_size, mp_rank, quantize, quantize_bits, groups, mlp_extra_grouping): + pass + + @abstractmethod + def split_state_dict(self, mp_world_size, mp_rank, quantize, quantize_bits, groups, mlp_extra_grouping): + pass + + @abstractmethod + def sanity_check(self, ckpt_file_name): + pass + + +class MegatronSDLoader(SDLoaderBase): + + def __init__(self, ckpt_list, version, checkpoint_engine): + super().__init__(ckpt_list, version, checkpoint_engine) + """ + ## Q/K/V data need special processing + key: transformer.layers.0.attention.query_key_value.weight, shape: torch.Size([3192, 4256]) + key: transformer.layers.0.attention.query_key_value.bias, shape: torch.Size([3192]) + + ## merge or split on axis=0 + key: word_embeddings.weight, shape: torch.Size([12672, 4256]) + key: transformer.layers.0.mlp.dense_h_to_4h.bias, shape: torch.Size([4256]) + key: transformer.layers.0.mlp.dense_h_to_4h.weight, shape: torch.Size([4256, 4256]) + + ## merge or split on axis=1 + key: transformer.layers.0.attention.dense.weight, shape: torch.Size([4256, 1064]) + key: transformer.layers.0.mlp.dense_4h_to_h.weight, shape: torch.Size([4256, 4256]) + + ## no change required + key: transformer.layers.0.mlp.dense_4h_to_h.bias, shape: torch.Size([4256]) + key: transformer.final_layernorm.weight, shape: torch.Size([4256]) + key: transformer.final_layernorm.bias, shape: torch.Size([4256]) + key: transformer.layers.0.attention.dense.bias, shape: torch.Size([4256]) + key: transformer.layers.0.post_attention_layernorm.weight, shape: torch.Size([4256]) + key: transformer.layers.0.post_attention_layernorm.bias, shape: torch.Size([4256]) + key: transformer.layers.0.input_layernorm.weight, shape: torch.Size([4256]) + key: transformer.layers.0.input_layernorm.bias, shape: torch.Size([4256]) + key: position_embeddings.weight, shape: torch.Size([1024, 4256]) + """ + + def merge_query_key_value(self, param_list, ckpt_ver): + """ + Up to now we found 3 Q/K/V parameter formats in different Megatron checkpoint versions: + + 1. version 0, there is no version information saved in checkpoint. + format: [(3 * np * hn), h] + 2. version 1.0 + format: [(np * hn * 3), h] + 3. version 2.0 + format: [(np * 3 * hn), h] + + h: hidden size + n: number of attention heads + p: number of model parallel partitions + np: n/p + hn: h/n + """ + + new_qkv = None + if ckpt_ver == 0: + # [(3 * np * hn), h] + assert param_list[0].shape[0] % 3 == 0 + size_qkv = param_list[0].shape[0] // 3 + split_tensors = [torch.split(param, size_qkv, dim=0) for param in param_list] + + tensors = [] + for i in range(3): + tensor_tuple = [t[i] for t in split_tensors] + tensors.append(torch.cat(tensor_tuple, axis=0)) + new_qkv = torch.cat(tensors, axis=0) + elif ckpt_ver == 1.0 or ckpt_ver == 2.0: + # [(np * hn * 3), h] or [(np * 3 * hn), h] + new_qkv = torch.cat(param_list, axis=0) + else: + assert False, f'checkpoint version: {ckpt_ver} is not supported' + + return new_qkv + + def split_query_key_value(self, param, num_to_split, offset, ckpt_ver): + """ + Up to now we found 3 Q/K/V parameter formats in different Megatron checkpoint versions: + + 1. version 0, there is no version information saved in checkpoint. + format: [(3 * np * hn), h] + 2. version 1.0 + format: [(np * hn * 3), h] + 3. version 2.0 + format: [(np * 3 * hn), h] + + h: hidden size + n: number of attention heads + p: number of model parallel partitions + np: n/p + hn: h/n + """ + + new_qkv = None + if ckpt_ver == 0: + # [(3 * np * hn), h] + assert param.shape[0] % 3 == 0 + size_qkv = param.shape[0] // 3 + split_tensors = torch.split(param, size_qkv, dim=0) + + assert split_tensors[0].shape[0] % num_to_split == 0 + split_size = split_tensors[0].shape[0] // num_to_split + + tensors = [] + for i in range(3): + tensors.append(torch.split(split_tensors[i], split_size, dim=0)[offset]) + new_qkv = torch.cat(tensors, axis=0) + elif ckpt_ver == 1.0 or ckpt_ver == 2.0: + # [(np * hn * 3), h] or [(np * 3 * hn), h] + assert param.shape[0] % num_to_split == 0 + size_qkv = param.shape[0] // num_to_split + split_tensors = torch.split(param, size_qkv, dim=0) + new_qkv = split_tensors[offset] + else: + assert False, f'checkpoint version: {ckpt_ver} is not supported' + + return new_qkv + + def merge_state_dict(self, + mp_world_size, + mp_rank, + quantize=False, + quantize_bits=8, + groups=64, + mlp_extra_grouping=True): + self.sanity_check(self.ckpt_list[0]) + + sd_list = self.get_merge_state_dicts(mp_world_size, mp_rank) + ds_sd = copy.deepcopy(sd_list[0]) + new_client_sd = collections.OrderedDict() + + client_sd_list = [self.get_module(sd) for sd in sd_list] + keys = client_sd_list[0].keys() + + ckpt_ver = self.get_checkpoint_version(ds_sd) + logger.info(f"checkpoint version: {ckpt_ver}") + if quantize: + quantizer = WeightQuantization(mlp_extra_grouping=mlp_extra_grouping, mp_size=mp_world_size) + + for key in keys: + value_list = [sd[key] for sd in client_sd_list] + + if "attention.dense.weight" in key or "mlp.dense_4h_to_h.weight" in key: + if quantize: + value_list = quantizer.Quantize(value_list, quantize_bits, groups, key=key, merge_dim=1) + new_client_sd[key] = torch.cat(value_list, axis=1) + elif "attention.query_key_value" in key: + if quantize and "attention.query_key_value.weight" in key: + value_list = quantizer.Quantize(value_list, quantize_bits, groups, key=key) + new_client_sd[key] = torch.cat(value_list, axis=0) + else: + if quantize: + new_client_sd[key] = torch.cat(value_list, axis=0) + else: + new_client_sd[key] = self.merge_query_key_value(value_list, ckpt_ver) + elif "mlp.dense_h_to_4h.weight" in key or "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key: + if quantize and "mlp.dense_h_to_4h.weight" in key: + value_list = quantizer.Quantize(value_list, quantize_bits, groups, key=key) + new_client_sd[key] = torch.cat(value_list, axis=0) + else: + new_client_sd[key] = value_list[0] + if quantize: + all_scales = quantizer.merge_scales() + ds_sd = self.set_module(ds_sd, new_client_sd) + + return ds_sd, (all_scales if quantize else None), len(client_sd_list) + + def split_state_dict(self, + mp_world_size, + mp_rank, + quantize=False, + quantize_bits=8, + groups=64, + mlp_extra_grouping=True): + #self.sanity_check(self.ckpt_list[0]) + + sd, num_to_split, ckpt_offset = self.get_split_state_dict(mp_world_size, mp_rank) + ds_sd = copy.deepcopy(sd) + new_client_sd = collections.OrderedDict() + + client_sd = self.get_module(sd) + + ckpt_ver = self.get_checkpoint_version(ds_sd) + logger.info(f"checkpoint version: {ckpt_ver}") + + if quantize: + quantizer = WeightQuantization(mlp_extra_grouping=mlp_extra_grouping, mp_size=mp_world_size) + + for key in client_sd.keys(): + value = client_sd[key] + + if "attention.dense.weight" in key or "mlp.dense_4h_to_h.weight" in key: + assert value.shape[1] % num_to_split == 0 + split_size = value.shape[1] // num_to_split + if quantize: + q_vals = quantizer.Quantize([value], quantize_bits, groups, key) + value = q_vals[0] + new_client_sd[key] = torch.split(value, split_size, dim=1)[ckpt_offset] + elif "attention.query_key_value" in key: + if quantize and "attention.query_key_value.weight" in key: + q_vals = quantizer.Quantize([value], quantize_bits, groups, key) + value = q_vals[0] + new_client_sd[key] = self.split_query_key_value(value, num_to_split, ckpt_offset, ckpt_ver) + elif "mlp.dense_h_to_4h.weight" in key or "word_embeddings.weight" in key or "mlp.dense_h_to_4h.bias" in key or "final_linear.weight" in key: + assert value.shape[0] % num_to_split == 0 + split_size = value.shape[0] // num_to_split + if quantize and "mlp.dense_h_to_4h.weight" in key: + q_vals = quantizer.Quantize([value], quantize_bits, groups, key) + value = q_vals[0] + new_client_sd[key] = torch.split(value, split_size, dim=0)[ckpt_offset] + else: + new_client_sd[key] = value + + if quantize: + all_scales = quantizer.merge_scales_split(num_to_split) + + ds_sd = self.set_module(ds_sd, new_client_sd) + + return ds_sd, (all_scales if quantize else None) + + def sanity_check(self, ckpt_file_name): + keys_to_check = [ + "attention.dense.weight", "mlp.dense_4h_to_h.weight", "attention.query_key_value", + "mlp.dense_h_to_4h.weight", "mlp.dense_h_to_4h.bias" + ] + + sd = self.checkpoint_engine.load(ckpt_file_name, map_location=lambda storage, loc: storage) + + # partial_key is a sub-string of one key in the sd + def check_key_exist(partial_key, sd): + keys = sd.keys() + found = False + for k in keys: + if partial_key in k: + found = True + break + return found + + for key in keys_to_check: + assert check_key_exist(key, + self.get_module(sd)), f'key: {key} is not found in the checkpoint {ckpt_file_name}' + + def get_checkpoint_version(self, state_dict): + # Use 0 if version info doesn't exist + return self.version if self.version is not None else state_dict.get('checkpoint_version', 0) diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..208299fb8c50f73468d293b6fa5dca71649d62e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__init__.py @@ -0,0 +1,4 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00b711c9588a4dbdbf4204050e85bf805bde44c7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/aio_config.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/aio_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bb5e04b25402a7d847e9b5d1aeed53c325714df0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/aio_config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/async_swapper.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/async_swapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f1480680b0af974a90922ab5c51dea39ecd7d66b Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/async_swapper.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f49f42b6532415362ba4b333c3f261f0d7b9d89f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/optimizer_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/optimizer_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06c3ffafd6069681b35c12773589ccd7a2c78aec Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/optimizer_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/partitioned_optimizer_swapper.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/partitioned_optimizer_swapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99fc3c9b7598666f5d255a3bcd4786dda177aae1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/partitioned_optimizer_swapper.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/partitioned_param_swapper.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/partitioned_param_swapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8a4e3b1c628e06ad2da153155f1597e558b8939d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/partitioned_param_swapper.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/pipelined_optimizer_swapper.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/pipelined_optimizer_swapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d43394cb3fd39269dbb646369fbd81c8fcf65487 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/pipelined_optimizer_swapper.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..154b91de3ce666b5c1519bd05a6391720e57d428 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/aio_config.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/aio_config.py new file mode 100644 index 0000000000000000000000000000000000000000..df4a38380089ad87f6121b836603075e08689c49 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/aio_config.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.runtime.config_utils import get_scalar_param +from deepspeed.runtime.swap_tensor.constants import * + +AIO_DEFAULT_DICT = { + AIO_BLOCK_SIZE: AIO_BLOCK_SIZE_DEFAULT, + AIO_QUEUE_DEPTH: AIO_QUEUE_DEPTH_DEFAULT, + AIO_THREAD_COUNT: AIO_THREAD_COUNT_DEFAULT, + AIO_SINGLE_SUBMIT: AIO_SINGLE_SUBMIT_DEFAULT, + AIO_OVERLAP_EVENTS: AIO_OVERLAP_EVENTS_DEFAULT +} + + +def get_aio_config(param_dict): + if AIO in param_dict.keys() and param_dict[AIO] is not None: + aio_dict = param_dict[AIO] + return { + AIO_BLOCK_SIZE: get_scalar_param(aio_dict, AIO_BLOCK_SIZE, AIO_BLOCK_SIZE_DEFAULT), + AIO_QUEUE_DEPTH: get_scalar_param(aio_dict, AIO_QUEUE_DEPTH, AIO_QUEUE_DEPTH_DEFAULT), + AIO_THREAD_COUNT: get_scalar_param(aio_dict, AIO_THREAD_COUNT, AIO_THREAD_COUNT_DEFAULT), + AIO_SINGLE_SUBMIT: get_scalar_param(aio_dict, AIO_SINGLE_SUBMIT, AIO_SINGLE_SUBMIT_DEFAULT), + AIO_OVERLAP_EVENTS: get_scalar_param(aio_dict, AIO_OVERLAP_EVENTS, AIO_OVERLAP_EVENTS_DEFAULT) + } + + return AIO_DEFAULT_DICT diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/async_swapper.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/async_swapper.py new file mode 100644 index 0000000000000000000000000000000000000000..b808721537fef5e3907bd3b8819b2e2d0b2f6936 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/async_swapper.py @@ -0,0 +1,175 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Functionality of swapping tensors to/from (NVMe) storage devices. +""" +import torch + +from deepspeed import comm as dist +from deepspeed.utils.logging import logger +from deepspeed.runtime.swap_tensor.utils import swap_out_tensors, SwapBuffer +from deepspeed.accelerator import get_accelerator + +INVALID_BUFFER_INDEX = -1 +ASYNC_SWAPPER_WAIT_TIMER = 'async_swap_gradient_wait' + + +class AsyncTensorSwapper(object): + + def __init__(self, aio_handle, numel_alignment, timers): + self.free_buffer_index = [] + self.swapping_buffer_index = [] + self.ready_buffer_index = [] + self.current_buffer_index = INVALID_BUFFER_INDEX + self.all_buffers = [] + self.aio_handle = aio_handle + self.numel_alignment = numel_alignment + self.max_numel = 0 + self.num_pending_swaps = 0 + self.timers = timers + self.timer_names = set() + self.num_elements_swapped = 0 + self.dtype = None + + def has_buffers(self): + return len(self.all_buffers) > 0 + + def add_buffers(self, buffer_list): + assert len(self.all_buffers) == 0 + assert all([get_accelerator().is_pinned(buffer) for buffer in buffer_list]) + dtype = buffer_list[0].dtype + assert all([buffer.dtype == dtype for buffer in buffer_list]) + + self.dtype = dtype + self.all_buffers = [SwapBuffer(buffer) for buffer in buffer_list] + self.free_buffer_index += [i for i in range(len(self.all_buffers))] + self.max_numel = max([buffer.numel() for buffer in buffer_list]) + self.timer_names = set() + + def get_timer_names(self): + return list(self.timer_names) + + def release_buffers(self): + self._report_statistics('Swapped out[Before flush]') + self._flush_buffers_until_complete() + self._report_statistics('Swapped out[After flush]') + + pinned_buffers = [buf.buffer for buf in self.all_buffers] + self.all_buffers = [] + self.free_buffer_index = [] + self.current_buffer_index = INVALID_BUFFER_INDEX + self.num_elements_swapped = 0 + self.dtype = None + + return pinned_buffers + + def swap_out_tensors(self, tensor_list, path_list): + for tensor, swap_path in zip(tensor_list, path_list): + self._swap_out_tensor(tensor, swap_path) + + def _report_statistics(self, message): + if dist.get_rank() == 0: + element_size = torch.tensor([], dtype=self.dtype).element_size() + swapped_GB = (self.num_elements_swapped * element_size) / (1024**3) + logger.debug(f'{message} num_elems = {self.num_elements_swapped}, {swapped_GB:5.2f} GB') + + def _swap_out_tensor(self, tensor, swap_path): + assert len(self.all_buffers) > 0 + + aligned_numel = self._io_aligned_numel(tensor.numel()) + assert aligned_numel <= self.max_numel + + self._make_swap_space(aligned_numel) + assert self.current_buffer_index != INVALID_BUFFER_INDEX + + swap_buffer = self._get_current_buffer() + swap_buffer.insert_tensor(tensor, swap_path, aligned_numel) + + def _make_swap_space(self, numel): + if self.current_buffer_index == INVALID_BUFFER_INDEX: + self._allocate_buffer() + return + + if not self._get_current_buffer().has_space(numel): + if len(self.free_buffer_index) > 0: + self._flush_ready_buffers() + else: + self._flush_buffers_until_complete() + self._allocate_buffer() + + def _io_aligned_numel(self, numel): + remainder = numel % self.numel_alignment + return numel if remainder == 0 else (numel + self.numel_alignment - remainder) + + def _allocate_buffer(self): + assert self.current_buffer_index == INVALID_BUFFER_INDEX + assert len(self.all_buffers) > 0 + assert len(self.free_buffer_index) > 0 + self.current_buffer_index = self.free_buffer_index[-1] + self.free_buffer_index = self.free_buffer_index[:-1] + + def _flush_ready_buffers(self): + if self.current_buffer_index != INVALID_BUFFER_INDEX: + self.ready_buffer_index.append(self.current_buffer_index) + self.current_buffer_index = INVALID_BUFFER_INDEX + + self._swap_out_ready_buffers() + + def _flush_buffers_until_complete(self): + self._flush_ready_buffers() + assert len(self.ready_buffer_index) == 0 + + self._wait_for_swap_complete() + assert len(self.swapping_buffer_index) == 0 + assert len(self.free_buffer_index) == len(self.all_buffers) + + def _swap_out_ready_buffers(self): + for buffer_index in self.ready_buffer_index: + buffer = self._get_buffer(buffer_index) + swap_tensors = buffer.get_swap_tensors() + swap_paths = buffer.get_swap_paths() + self.num_pending_swaps += len(swap_tensors) + swap_out_tensors(self.aio_handle, swap_tensors, swap_paths) + + self.swapping_buffer_index += self.ready_buffer_index + self.ready_buffer_index = [] + + def _wait_for_swap_complete(self): + assert len(self.swapping_buffer_index) > 0 + + self._start_timer(ASYNC_SWAPPER_WAIT_TIMER) + assert self.aio_handle.wait() == self.num_pending_swaps + self._stop_timer(ASYNC_SWAPPER_WAIT_TIMER) + self.timer_names.add(ASYNC_SWAPPER_WAIT_TIMER) + + self.num_pending_swaps = 0 + + for buffer_index in self.swapping_buffer_index: + buffer = self._get_buffer(buffer_index) + self.num_elements_swapped += buffer.get_num_elem() + buffer.reset() + + self.free_buffer_index += self.swapping_buffer_index + assert len(self.free_buffer_index) <= len(self.all_buffers) + self.swapping_buffer_index = [] + + def _get_buffer(self, index): + assert index != INVALID_BUFFER_INDEX + return self.all_buffers[index] + + def _get_current_buffer(self): + return self._get_buffer(self.current_buffer_index) + + def _start_timer(self, name): + if self.timers: + self.timers(name).start() + + def _stop_timer(self, name): + if self.timers: + self.timers(name).stop() + + def _log_timers(self, name_list, force=False): + if self.timers and force: + self.timers.log(name_list) diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/constants.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..4c9722bc4e4f24a2de542d19faf09e591fbc6f02 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/constants.py @@ -0,0 +1,27 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +AIO +""" +AIO_FORMAT = ''' +"aio": { + "block_size": 1048576, + "queue_depth": 8, + "thread_count": 1, + "single_submit": false, + "overlap_events": true +} +''' +AIO = "aio" +AIO_BLOCK_SIZE = "block_size" +AIO_BLOCK_SIZE_DEFAULT = 1048576 +AIO_QUEUE_DEPTH = "queue_depth" +AIO_QUEUE_DEPTH_DEFAULT = 8 +AIO_THREAD_COUNT = "thread_count" +AIO_THREAD_COUNT_DEFAULT = 1 +AIO_SINGLE_SUBMIT = "single_submit" +AIO_SINGLE_SUBMIT_DEFAULT = False +AIO_OVERLAP_EVENTS = "overlap_events" +AIO_OVERLAP_EVENTS_DEFAULT = True diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/optimizer_utils.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/optimizer_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..389ad6ae1076fbbdf67d03edc438340b93faca2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/optimizer_utils.py @@ -0,0 +1,476 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Functionality of swapping tensors to/from (NVMe) storage devices. +""" + +import os +import torch + +from deepspeed import comm as dist +from deepspeed.utils.logging import logger +from deepspeed.runtime.swap_tensor.constants import * +from deepspeed.runtime.swap_tensor.utils import swap_in_tensors, swap_out_tensors, \ + MIN_AIO_BYTES, AIO_ALIGNED_BYTES, get_sized_buffers +from deepspeed.runtime.swap_tensor.utils import SwapBufferManager, SwapBufferPool +from deepspeed.accelerator import get_accelerator + + +class FlattenedTensorSwapInfo(object): + + def __init__(self, path, length, offset): + self.path = path + self.offset = offset + self.length = length + + +class OptimizerStateSwapInfo(object): + + def __init__(self, parameter, numel, base_folder): + self.tensors = [] + self.param_id = OptimizerSwapper.parameter_id(parameter) + self.swap_folder = base_folder + self.swap_paths = [] + self.swapped_gradients = {} + self.unswapped_gradients = {} + self.tensor_numel = numel + self.tensor_dtype = parameter.dtype + self.tensor_device = parameter.device + self.has_state_tensors = False + self._add_tensors([parameter]) + + def numel(self): + return self.tensor_numel + + def has_gradients(self): + return self.swapped_gradients or self.unswapped_gradients + + def _add_tensors(self, tensor_list): + for t in tensor_list: + self.tensors.append(t) + self.swap_paths.append(os.path.join(self.swap_folder, f'{OptimizerSwapper.parameter_id(t)}.tensor.swp')) + + def add_state_tensors(self, tensor_list): + self.has_state_tensors = True + self._add_tensors(tensor_list) + + def device(self): + return self.tensor_device + + def dtype(self): + return self.tensor_dtype + + def release_memory(self): + for tensor in self.tensors: + tensor.data = torch.Tensor() + + def get_or_create_gradient_paths(self, offsets, lengths): + gradient_paths = [] + for offset, length in zip(offsets, lengths): + if not offset in self.swapped_gradients.keys(): + path = os.path.join(self.swap_folder, f'{self.param_id}_gradient_{offset}_{length}.tensor.swp') + self.swapped_gradients[offset] = FlattenedTensorSwapInfo(path, length, offset) + + gradient_paths.append(self.swapped_gradients[offset].path) + + return gradient_paths + + def set_swap_buffers(self, buffers): + compute_lengths = [self.numel()] * len(self.tensors) + compute_buffers = get_sized_buffers(buffers, compute_lengths) + for t, buffer in zip(self.tensors, compute_buffers): + t.data = buffer.data + + def get_swap_gradient_buffers(self, swap_buffer): + assert self.numel() <= swap_buffer.numel() + return [swap_buffer.narrow(0, grad.offset, grad.length) for grad in self.swapped_gradients.values()] + + def get_swap_gradient_paths(self): + return [grad.path for grad in self.swapped_gradients.values()] + + def get_unpinned_state_tensors(self): + return [t for t in self.tensors if not get_accelerator().is_pinned(t)] + + def read_unswapped_gradients(self, dest_buffer): + num_elem_count = 0 + for offset, grad_partition in self.unswapped_gradients.items(): + dst_tensor = dest_buffer.narrow(0, offset, grad_partition.numel()) + dst_tensor.data.copy_(grad_partition.data) + num_elem_count += grad_partition.numel() + + return num_elem_count + + def release_unswapped_gradients(self): + self.unswapped_gradients = {} + + +SWAPPER_DEBUG_MODE = False +SWAP_OUT_GRADIENT_TIMER = 'swap_out_gradient' + + +class OptimizerSwapper(object): + + @staticmethod + def parameter_id(param): + return param.ds_id + + def __init__(self, swap_config, aio_config, base_folder, optimizer, largest_numel, device, dtype, timers): + self.swap_config = swap_config + self.aio_config = aio_config + + # NVMe swap management + self.swap_params_info = {} + self.swap_element_size = torch.tensor([], dtype=dtype).element_size() + self.swap_folder = os.path.join(base_folder, 'optimizer', f'rank{dist.get_rank()}') + os.makedirs(self.swap_folder, exist_ok=True) + + self.optimizer = optimizer + + # Read/Write alignment for each thread during Intra-request parallelism + self.min_aio_bytes = max(MIN_AIO_BYTES, aio_config[AIO_BLOCK_SIZE]) + self.aligned_bytes = AIO_ALIGNED_BYTES * aio_config[AIO_THREAD_COUNT] + self.numel_alignment = self.aligned_bytes // self.swap_element_size + + # Swap buffer management + self.largest_numel = self._io_aligned_numel(largest_numel) + self.dtype = dtype + self.swap_buffer_manager = SwapBufferManager(num_elems=self.largest_numel, + count=swap_config.buffer_count, + dtype=dtype) + + # Timers + self.timers = timers + self.timer_names = set() + + # Print exclusion list + self.print_exclude_list = [ + 'optimizer', + 'swap_buffer_manager', + 'swap_params_info', + 'timers', + 'timer_names', + ] + + def swappable_tensor(self, param=None, numel=None): + assert param is not None or numel is not None, "Either param or numel must be provided" + if param is not None: + return self.min_aio_bytes <= (param.numel() * self.swap_element_size) + return self.min_aio_bytes <= (numel * self.swap_element_size) + + def init_timers(self): + self.timer_names = set() + + def log_timers(self): + if self.timer_names: + self._log_timers(list(self.timer_names), force=True) + + def pre_backward(self): + self.init_timers() + + def post_backward(self): + pass + + def _flush_gradient_swapper(self, gradient_swapper): + if gradient_swapper.has_buffers(): + self._start_timer(SWAP_OUT_GRADIENT_TIMER) + pinned_buffers = gradient_swapper.release_buffers() + self.swap_buffer_manager.free(pinned_buffers) + self._stop_timer(SWAP_OUT_GRADIENT_TIMER) + self.timer_names.add(SWAP_OUT_GRADIENT_TIMER) + self.timer_names.update(gradient_swapper.get_timer_names()) + + def _swap_out_gradients(self, parameter, gradient_offsets, gradient_tensors, gradient_swapper): + if not OptimizerSwapper.parameter_id(parameter) in self.swap_params_info.keys(): + return + + swap_info = self.swap_params_info[OptimizerSwapper.parameter_id(parameter)] + + swappable_tensors = [] + swappable_offsets = [] + swappable_lengths = [] + + aligned_gradients, aligned_offsets = self._adjust_for_misaligned_lengths(tensors=gradient_tensors, + offsets=gradient_offsets) + + self._start_timer(SWAP_OUT_GRADIENT_TIMER) + for tensor, offset in zip(aligned_gradients, aligned_offsets): + if not self.swappable_tensor(param=tensor): + swap_info.unswapped_gradients[offset] = tensor + continue + + swappable_tensors.append(tensor) + swappable_offsets.append(offset) + swappable_lengths.append(tensor.numel()) + + if len(swappable_tensors) > 0: + if not gradient_swapper.has_buffers(): + pinned_buffers = self.swap_buffer_manager.allocate_all(num_elems=self.largest_numel, dtype=self.dtype) + + gradient_swapper.add_buffers(pinned_buffers) + + swappable_paths = swap_info.get_or_create_gradient_paths(swappable_offsets, swappable_lengths) + + gradient_swapper.swap_out_tensors(tensor_list=swappable_tensors, path_list=swappable_paths) + + self._stop_timer(SWAP_OUT_GRADIENT_TIMER) + self.timer_names.add(SWAP_OUT_GRADIENT_TIMER) + + def _initialize_from_swapped_fp16_params(self, aio_handle, fp16_partitions_info, fp16_num_elems, + fp16_pinned_buffers, fp32_parameters): + assert len(fp32_parameters) == len(fp16_partitions_info) + assert len(fp32_parameters) == len(fp16_num_elems) + assert all([get_accelerator().is_pinned(buffer) for buffer in fp16_pinned_buffers]) + + fp32_swap_paths = self._get_swap_paths(parameters=fp32_parameters, num_elems=fp16_num_elems) + + fp32_pinned_buffers = self.swap_buffer_manager.allocate_all(num_elems=self.largest_numel, dtype=self.dtype) + + fp16_buffer_numel = [buf.numel() for buf in fp16_pinned_buffers] + assert all([numel >= self.largest_numel for numel in fp16_buffer_numel]), \ + f"numel of fp16 buffers {fp16_buffer_numel} is too small for initializing fp32 params {self.largest_numel}" + + fp32_swap_buffers = SwapBufferPool(fp32_pinned_buffers) + fp16_swap_buffers = SwapBufferPool(fp16_pinned_buffers) + + curr_index = 0 + while curr_index < len(fp32_parameters): + fp16_pinned_tensors = self._swap_in_fp16_params(aio_handle=aio_handle, + fp16_num_elems=fp16_num_elems[curr_index:], + fp16_partitions_info=fp16_partitions_info[curr_index:], + fp16_swap_buffers=fp16_swap_buffers) + + if dist.get_rank() == 0 and SWAPPER_DEBUG_MODE: + for i, tensor in enumerate(fp16_pinned_tensors): + true_index = curr_index + i + logger.info( + f'swap_in_fp16_param: fp32_id = {OptimizerSwapper.parameter_id(fp32_parameters[true_index])} index = {true_index} orig_num_elem = {fp16_num_elems[true_index]}, swap_num_elem = {fp16_pinned_tensors[i].numel()}' + ) + + swap_out_count = self._swap_out_fp16_params(aio_handle=aio_handle, + fp32_swap_paths=fp32_swap_paths[curr_index:], + fp32_swap_buffers=fp32_swap_buffers, + fp16_pinned_tensors=fp16_pinned_tensors) + assert swap_out_count == len(fp16_pinned_tensors), \ + f"{swap_out_count} does not match {len(fp16_pinned_tensors)}" + + fp16_swap_buffers.reset() + fp32_swap_buffers.reset() + curr_index += swap_out_count + + self.swap_buffer_manager.free(fp32_pinned_buffers) + + def _swap_in_fp16_params(self, aio_handle, fp16_num_elems, fp16_partitions_info, fp16_swap_buffers): + assert len(fp16_num_elems) > 0 + + swapped_fp16_tensors = [] + swap_tensors = [] + swap_paths = [] + unswapped_srcs = [] + unswapped_dsts = [] + + for i, numel in enumerate(fp16_num_elems): + pinned_tensor, _ = fp16_swap_buffers.allocate_tensor(numel, None, numel) + if pinned_tensor is None: + break + + swapped_fp16_tensors.append(pinned_tensor) + offset = 0 + for tensor, partition_numel, partition_path in fp16_partitions_info[i]: + dst_tensor = pinned_tensor.narrow(0, offset, partition_numel) + if partition_path is None: + unswapped_srcs.append(tensor) + unswapped_dsts.append(dst_tensor) + else: + swap_paths.append(partition_path) + swap_tensors.append(dst_tensor) + offset += partition_numel + + assert len(swapped_fp16_tensors) + len(unswapped_srcs) > 0 + ret = swap_in_tensors(aio_handle, swap_tensors, swap_paths) + for src, dst in zip(unswapped_srcs, unswapped_dsts): + dst.data.copy_(src.data) + + assert len(swap_tensors) == aio_handle.wait() + + return swapped_fp16_tensors + + def _swap_out_fp16_params(self, aio_handle, fp32_swap_paths, fp32_swap_buffers, fp16_pinned_tensors): + + assert len(fp16_pinned_tensors) <= len(fp32_swap_paths) + swap_out_count = 0 + for i, fp16_tensor in enumerate(fp16_pinned_tensors): + if not fp32_swap_buffers.has_space(fp16_tensor.numel()): + fp32_swap_buffers.swap_out(aio_handle) + fp32_swap_buffers.reset() + + pinned_tensor, _ = fp32_swap_buffers.insert_tensor(fp16_tensor, fp32_swap_paths[i], + self._io_aligned_numel(fp16_tensor.numel())) + assert pinned_tensor is not None + swap_out_count += 1 + + if len(fp32_swap_buffers.get_swap_tensors()) > 0: + fp32_swap_buffers.swap_out(aio_handle) + + return swap_out_count + + def _initialize_parameters(self, parameters, src_tensors, aio_handle): + assert len(parameters) == len(src_tensors) + + swap_paths = self._get_swap_paths(parameters=parameters, num_elems=[src.numel() for src in src_tensors]) + + SWAP_INIT_TIMER = "swap_init_write" + self._start_timer(SWAP_INIT_TIMER) + + pinned_buffers = self.swap_buffer_manager.allocate_all(num_elems=self.largest_numel, dtype=self.dtype) + assert pinned_buffers is not None + + self._swap_out_unpinned_tensors(aio_handle=aio_handle, + unpinned_tensors=src_tensors, + dest_paths=swap_paths, + pinned_buffers=pinned_buffers) + + if dist.get_rank() == 0 and SWAPPER_DEBUG_MODE: + for i, tensor in enumerate(src_tensors): + logger.info( + f'copy_in_fp16_param: fp32_id = {OptimizerSwapper.parameter_id(parameters[i])} index = {i}, swap_num_elem = {src_tensors[i].numel()}' + ) + + self.swap_buffer_manager.free(pinned_buffers) + + self._stop_timer(SWAP_INIT_TIMER) + self._log_timers([SWAP_INIT_TIMER]) + + def _get_swap_paths(self, parameters, num_elems): + swap_info_list = [ + self._create_param_swap_info(parameter=p, + numel=numel) \ + for p, numel in zip(parameters, num_elems) + ] + assert len(swap_info_list) == len(num_elems) + + swap_paths = [info.swap_paths[0] for info in swap_info_list] + return swap_paths + + def _swap_out_unpinned_tensors(self, aio_handle, unpinned_tensors, dest_paths, pinned_buffers): + + swap_buffer_count = len(pinned_buffers) + unpinned_tensor_count = len(unpinned_tensors) + + for i in range(0, unpinned_tensor_count, swap_buffer_count): + swap_tensor_count = min((unpinned_tensor_count - i), swap_buffer_count) + + src_tensors = unpinned_tensors[i:(i + swap_tensor_count)] + compute_lengths = [t.numel() for t in src_tensors] + compute_buffers = get_sized_buffers(pinned_buffers, compute_lengths) + + for dst, src in zip(compute_buffers, src_tensors): + dst.data.copy_(src.data) + + swap_lengths = [self._io_aligned_numel(t.numel()) for t in src_tensors] + swap_buffers = get_sized_buffers(pinned_buffers, swap_lengths) + + swap_paths = dest_paths[i:(i + swap_tensor_count)] + swap_out_tensors(aio_handle, swap_buffers, swap_paths) + + assert aio_handle.wait() == swap_tensor_count + + def _adjust_for_misaligned_lengths(self, tensors, offsets): + new_tensors = [] + new_offsets = [] + + for orig_tensor, orig_offset in zip(tensors, offsets): + if not self.swappable_tensor(param=orig_tensor): + new_tensors.append(orig_tensor) + new_offsets.append(orig_offset) + continue + + remainder = orig_tensor.numel() % self.numel_alignment + if remainder == 0: + new_tensors.append(orig_tensor) + new_offsets.append(orig_offset) + continue + + # Split into two by making remainder a tensor + aligned_length = (orig_tensor.numel() // self.numel_alignment) * self.numel_alignment + new_tensors.append(orig_tensor.narrow(0, 0, aligned_length)) + new_offsets.append(orig_offset) + + # remainder tensor + new_tensors.append(orig_tensor.narrow(0, aligned_length, remainder)) + new_offsets.append(orig_offset + aligned_length) + + return new_tensors, new_offsets + + def _retrieve_unswapped_grad_partitions(self, swap_info, dest_buffer): + UNSWAPPED_READ_GRADIENTS = 'unswapped_read_gradients' + self._start_timer(UNSWAPPED_READ_GRADIENTS) + tensor_count = len(swap_info.unswapped_gradients) + num_elem_count = swap_info.read_unswapped_gradients(dest_buffer) + self._stop_timer(UNSWAPPED_READ_GRADIENTS) + self._log_timers([UNSWAPPED_READ_GRADIENTS]) + + # It should be safe to discard unswapped gradient partitions + swap_info.release_unswapped_gradients() + + if SWAPPER_DEBUG_MODE: + logger.info( + f'optimizer_retrieve_unswapped_gradients: param={swap_info.param_id} tensor_count={tensor_count} elem_count={num_elem_count}' + ) + + def _get_state_tensors(self, parameter): + if not parameter in self.optimizer.state: + return [] + + tensor_list = [] + for state_name, value in self.optimizer.state[parameter].items(): + if torch.is_tensor(value): + value.ds_id = state_name + '-' + parameter.ds_id + tensor_list.append(value) + + return tensor_list + + def _update_param_state_info(self, swap_info, parameter): + if not swap_info.has_state_tensors: + state_tensors = self._get_state_tensors(parameter) + if state_tensors: + swap_info.add_state_tensors(state_tensors) + + def _create_param_swap_info(self, parameter, numel): + param_id = OptimizerSwapper.parameter_id(parameter) + assert not param_id in self.swap_params_info + + self.swap_params_info[param_id] = OptimizerStateSwapInfo(parameter=parameter, + numel=numel, + base_folder=self.swap_folder) + swap_info = self.swap_params_info[param_id] + + self._update_param_state_info(swap_info, parameter) + + return swap_info + + def _get_param_swap_info(self, parameter): + param_id = OptimizerSwapper.parameter_id(parameter) + swap_info = self.swap_params_info.get(param_id, None) + + if swap_info is not None: + self._update_param_state_info(swap_info, parameter) + + return swap_info + + def _start_timer(self, name): + if self.timers: + self.timers(name).start() + + def _stop_timer(self, name): + if self.timers: + self.timers(name).stop() + + def _log_timers(self, name_list, force=False): + if self.timers and (SWAPPER_DEBUG_MODE or force): + self.timers.log(name_list) + + def _io_aligned_numel(self, numel): + remainder = numel % self.numel_alignment + return numel if remainder == 0 else (numel + self.numel_alignment - remainder) diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/partitioned_optimizer_swapper.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/partitioned_optimizer_swapper.py new file mode 100644 index 0000000000000000000000000000000000000000..e53a280befe40082e3a24e67808be6b655682021 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/partitioned_optimizer_swapper.py @@ -0,0 +1,219 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Functionality of swapping optimizer tensors to/from (NVMe) storage devices. +""" + +import torch + +from deepspeed.utils.logging import logger +from deepspeed.ops.op_builder import AsyncIOBuilder +from deepspeed import comm as dist + +from deepspeed.runtime.swap_tensor.constants import * +from deepspeed.runtime.swap_tensor.utils import swap_in_tensors, swap_out_tensors, print_object, \ + get_sized_buffers +from deepspeed.runtime.swap_tensor.async_swapper import AsyncTensorSwapper +from deepspeed.runtime.swap_tensor.optimizer_utils import OptimizerSwapper +from deepspeed.accelerator import get_accelerator + +DEBUG_MODE = False + +SWAP_IN_PARAM_TIMER = 'swap_in_param' +SWAP_OUT_PARAM_TIMER = 'swap_out_param' +SWAP_IN_GRADIENT_TIMER = 'swap_in_gradient' + + +class PartitionedOptimizerSwapper(OptimizerSwapper): + + def __init__(self, swap_config, aio_config, base_folder, optimizer, largest_numel, device, dtype, timers): + super(PartitionedOptimizerSwapper, self).__init__(swap_config, aio_config, base_folder, optimizer, + largest_numel, device, dtype, timers) + + aio_op = AsyncIOBuilder().load() + self.aio_handle = aio_op.aio_handle(aio_config[AIO_BLOCK_SIZE], aio_config[AIO_QUEUE_DEPTH], + aio_config[AIO_SINGLE_SUBMIT], aio_config[AIO_OVERLAP_EVENTS], + aio_config[AIO_THREAD_COUNT]) + + # Overlap swapping out + self.gradient_swapper = AsyncTensorSwapper(aio_handle=self.aio_handle, + numel_alignment=self.numel_alignment, + timers=self.timers) + + self.print_exclude_list += ['aio_handle', 'gradient_swapper', 'print_exclude_list'] + + if dist.get_rank() == 0: + print_object(obj=self, name='PartitionedOptimizerSwapper', exclude_list=self.print_exclude_list) + + def initialize_parameters(self, parameters, src_tensors): + self._initialize_parameters(parameters=parameters, src_tensors=src_tensors, aio_handle=self.aio_handle) + + def initialize_from_swapped_fp16_params(self, fp16_partitions_info, fp16_num_elems, fp16_pinned_buffers, + fp32_parameters): + self._initialize_from_swapped_fp16_params(aio_handle=self.aio_handle, + fp16_partitions_info=fp16_partitions_info, + fp16_num_elems=fp16_num_elems, + fp16_pinned_buffers=fp16_pinned_buffers, + fp32_parameters=fp32_parameters) + + def flush_gradients(self): + self._flush_gradient_swapper(self.gradient_swapper) + + def swap_in_optimizer_state(self, parameter, async_parameter=None): + swap_info = self._get_param_swap_info(parameter) + if swap_info is None: + return + + self._flush_gradient_swapper(self.gradient_swapper) + + required_buffer_count = len(swap_info.tensors) + (1 if swap_info.has_gradients() else 0) + aligned_numel = self._io_aligned_numel(swap_info.numel()) + pinned_buffers = self.swap_buffer_manager.allocate(num_elems=aligned_numel, + count=required_buffer_count, + dtype=parameter.dtype) + assert pinned_buffers is not None + self.allocated_swap_buffers = pinned_buffers.copy() + + self._start_timer(SWAP_IN_PARAM_TIMER) + self._swap_in_parameter(aio_handle=self.aio_handle, + parameter=parameter, + dest_buffers=pinned_buffers[:required_buffer_count]) + self._stop_timer(SWAP_IN_PARAM_TIMER) + self.timer_names.add(SWAP_IN_PARAM_TIMER) + + self._start_timer(SWAP_IN_GRADIENT_TIMER) + self._swap_in_gradients(aio_handle=self.aio_handle, parameter=parameter, dest_buffer=pinned_buffers[-1]) + self._stop_timer(SWAP_IN_GRADIENT_TIMER) + self.timer_names.add(SWAP_IN_GRADIENT_TIMER) + + def swap_out_optimizer_state(self, parameter, async_swap=False): + swap_info = self._get_param_swap_info(parameter=parameter) + + if swap_info is None: + return + + self._start_timer(SWAP_OUT_PARAM_TIMER) + pinned_tensors, pinned_paths, unpinned_tensors, unpinned_paths = self._separate_pinned_tensors(swap_info) + swap_bytes = sum([self._io_aligned_numel(t.numel()) * t.element_size() for t in swap_info.tensors]) + + WRITE_TIMER = 'swap_submit_write' + self._start_timer(WRITE_TIMER) + + swap_out_tensors(self.aio_handle, pinned_tensors, pinned_paths) + assert self.aio_handle.wait() == len(pinned_tensors) + for t in pinned_tensors: + t.data = torch.Tensor() + + if len(unpinned_tensors) > 0: + pinned_buffers = self.swap_buffer_manager.allocate_all(num_elems=self.largest_numel, dtype=self.dtype) + self._swap_out_unpinned_tensors(aio_handle=self.aio_handle, + unpinned_tensors=unpinned_tensors, + dest_paths=unpinned_paths, + pinned_buffers=pinned_buffers) + self.allocated_swap_buffers += pinned_buffers + + for t in unpinned_tensors: + t.data = torch.Tensor() + self._stop_timer(WRITE_TIMER) + + self.swap_buffer_manager.free(self.allocated_swap_buffers) + self.allocated_swap_buffers = [] + + self._stop_timer(SWAP_OUT_PARAM_TIMER) + self.timer_names.add(SWAP_OUT_PARAM_TIMER) + + self._log_timers([WRITE_TIMER]) + + if DEBUG_MODE and dist.get_rank() == 0: + logger.info(f'optimizer_param_swap_out: {(swap_bytes/(1024**3)):5.2f} GB') + + def swap_out_gradients(self, parameter, gradient_offsets, gradient_tensors): + self._swap_out_gradients(parameter=parameter, + gradient_offsets=gradient_offsets, + gradient_tensors=gradient_tensors, + gradient_swapper=self.gradient_swapper) + + def _swap_in_parameter(self, aio_handle, parameter, dest_buffers): + swap_info = self._get_param_swap_info(parameter) + if swap_info is None: + return + + assert len(swap_info.tensors) <= len(dest_buffers) + + swap_lengths = [self._io_aligned_numel(swap_info.numel())] * len(swap_info.tensors) + swap_buffers = get_sized_buffers(dest_buffers, swap_lengths) + + READ_TIMER = 'swap_submit_read_param' + WAIT_TIMER = 'swap_wait_read_param' + + self._start_timer(READ_TIMER) + swap_in_tensors(aio_handle, swap_buffers, swap_info.swap_paths) + self._stop_timer(READ_TIMER) + + swap_bytes = sum([buffer.numel() * buffer.element_size() for buffer in swap_buffers]) + + self._start_timer(WAIT_TIMER) + aio_handle.wait() + self._stop_timer(WAIT_TIMER) + + compute_lengths = [swap_info.numel()] * len(swap_info.tensors) + compute_buffers = get_sized_buffers(dest_buffers, compute_lengths) + for t, buffer in zip(swap_info.tensors, compute_buffers): + t.data = buffer.data + + self._log_timers([READ_TIMER, WAIT_TIMER]) + if DEBUG_MODE and dist.get_rank() == 0: + logger.info(f'optimizer_param_swap_in: {(swap_bytes/(1024**3)):5.2f} GB') + + def _separate_pinned_tensors(self, swap_info): + pinned_tensors = [] + pinned_paths = [] + + unpinned_tensors = [] + unpinned_paths = [] + + for tensor, path in zip(swap_info.tensors, swap_info.swap_paths): + if get_accelerator().is_pinned(tensor): + pinned_tensors.append(tensor) + pinned_paths.append(path) + else: + unpinned_tensors.append(tensor) + unpinned_paths.append(path) + + return pinned_tensors, pinned_paths, unpinned_tensors, unpinned_paths + + def _swap_in_pinned_gradients(self, aio_handle, parameter, gradient_tensor): + swap_info = self.swap_params_info[OptimizerSwapper.parameter_id(parameter)] + param_gradients = swap_info.swapped_gradients.values() + swap_buffers = [gradient_tensor.narrow(0, grad.offset, grad.length) for grad in param_gradients] + swap_paths = [grad.path for grad in param_gradients] + SWAP_READ_GRADIENTS = 'swap_submit_read_gradient' + SWAP_WAIT_GRADIENTS = 'swap_submit_wait_gradient' + + self._start_timer(SWAP_READ_GRADIENTS) + swap_in_tensors(aio_handle, swap_buffers, swap_paths) + self._stop_timer(SWAP_READ_GRADIENTS) + + self._start_timer(SWAP_WAIT_GRADIENTS) + assert len(swap_buffers) == aio_handle.wait() + self._stop_timer(SWAP_WAIT_GRADIENTS) + + self._log_timers([SWAP_READ_GRADIENTS, SWAP_WAIT_GRADIENTS]) + + def _swap_in_gradients(self, aio_handle, parameter, dest_buffer): + swap_info = self.swap_params_info.get(OptimizerSwapper.parameter_id(parameter), None) + if not (swap_info and swap_info.has_gradients()): + return + + assert get_accelerator().is_pinned(dest_buffer) + assert parameter.numel() <= dest_buffer.numel() + + parameter.grad = dest_buffer.narrow(0, 0, parameter.numel()) + + if swap_info.swapped_gradients: + self._swap_in_pinned_gradients(aio_handle, parameter, parameter.grad) + + if swap_info.unswapped_gradients: + self._retrieve_unswapped_grad_partitions(swap_info=swap_info, dest_buffer=parameter.grad) diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/partitioned_param_swapper.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/partitioned_param_swapper.py new file mode 100644 index 0000000000000000000000000000000000000000..fcc6a272883f2816b08ff42fbd3f1b2df18f3a37 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/partitioned_param_swapper.py @@ -0,0 +1,407 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Functionality of swapping tensors to/from (NVMe) storage devices. +""" + +import os +import shutil +from enum import Enum +import torch +from deepspeed import comm as dist +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import AsyncIOBuilder +from .constants import * +from .utils import swap_in_tensors, swap_out_tensors, MIN_AIO_BYTES, AIO_ALIGNED_BYTES, print_object, SwapBufferPool + + +def print_rank_0(message, debug=False, force=False): + if dist.get_rank() == 0 and (debug or force): + print(message) + + +class PartitionedParamStatus(Enum): + # Partitioned parameters are present and ready for use + AVAILABLE = 1 + + # partitioned params are in some non-memory device + NOT_AVAILABLE = 2 + + # partitioned params are being read from some non-memory device. + INFLIGHT = 3 + + +class AsyncPartitionedParameterSwapper(object): + + def __init__(self, ds_config, model_dtype): + + aio_op = AsyncIOBuilder().load(verbose=False) + self.aio_handle = aio_op.aio_handle + self.dtype = model_dtype + + #set swap buffers, create aio handles + self._configure_aio(ds_config) + + #mapping from param id to path + self.id_to_path = {} + + #mapping from pram_id to buffer id + self.param_id_to_buffer_id = {} + + # mapping from param_id to swap buffer + self.param_id_to_swap_buffer = {} + + #number of elements in the param + self.param_id_to_numel = {} + + self.pending_writes = 0 + self.pending_reads = 0 + + #keep track of async swap in params and buffers + self.inflight_params = [] + self.inflight_swap_in_buffers = [] + self.inflight_numel = 0 + + #keep track of available params + self.available_params = set() + self.available_numel = 0 + + # for swapping out from partitioned fp32 params + self.partitioned_swap_buffer = None + self.partitioned_swap_pool = None + + self.invalid_buffer = torch.tensor(1).half() + + if dist.get_rank() == 0: + exclude_list = ['aio_read_handle', 'aio_write_handle', 'buffers'] + print_object(obj=self, name='AsyncPartitionedParameterSwapper', exclude_list=exclude_list) + + def available_swap_in_buffers(self): + return len(self.available_buffer_ids) + + def _configure_aio(self, ds_config): + self.swap_config = ds_config.zero_config.offload_param + torch_dtype_string = str(self.dtype).split(".")[1] + self.swap_folder = os.path.join(self.swap_config.nvme_path, 'zero_stage_3', f'{torch_dtype_string}params', + f'rank{dist.get_rank()}') + shutil.rmtree(self.swap_folder, ignore_errors=True) + os.makedirs(self.swap_folder, exist_ok=True) + + self.swap_element_size = torch.tensor([], dtype=self.dtype).element_size() + + self.aio_config = ds_config.aio_config + + # Read/Write alignment for each thread during Intra-request parallelism + self.min_aio_bytes = max(MIN_AIO_BYTES, self.aio_config[AIO_BLOCK_SIZE]) + self.aligned_bytes = AIO_ALIGNED_BYTES * self.aio_config[AIO_THREAD_COUNT] + self.numel_alignment = self.aligned_bytes // self.swap_element_size + + self.elements_per_buffer = self.swap_config.buffer_size + self.aligned_elements_per_buffer = self._io_aligned_numel(self.elements_per_buffer) + self.param_buffer_count = self.swap_config.buffer_count + + self.available_buffer_ids = [i for i in range(self.param_buffer_count)] + self.reserved_buffer_ids = [] + self.buffers = get_accelerator().pin_memory(torch.empty(int(self.aligned_elements_per_buffer * + self.param_buffer_count), + dtype=self.dtype, + requires_grad=False), + align_bytes=0) + + self.aio_read_handle = self.aio_handle(self.aio_config[AIO_BLOCK_SIZE], self.aio_config[AIO_QUEUE_DEPTH], + self.aio_config[AIO_SINGLE_SUBMIT], self.aio_config[AIO_OVERLAP_EVENTS], + self.aio_config[AIO_THREAD_COUNT]) + + self.aio_write_handle = self.aio_handle(self.aio_config[AIO_BLOCK_SIZE], self.aio_config[AIO_QUEUE_DEPTH], + self.aio_config[AIO_SINGLE_SUBMIT], + self.aio_config[AIO_OVERLAP_EVENTS], self.aio_config[AIO_THREAD_COUNT]) + + self.swap_out_params = [] + + #Check if partitioned param or numel in a tensor is swappable or not + def swappable_tensor(self, param=None, numel=None): + if param is not None: + assert numel is None, "Both parma and numel cannot be provided" + numel = param.ds_tensor.ds_numel + if numel is not None: + return self.min_aio_bytes <= numel * self.swap_element_size + assert False, "Either param or numel must be provided" + + def get_path(self, param, must_exist=False): + paths = self._get_swap_paths([param], must_exist=must_exist) + return paths[0] + + def _get_swap_paths(self, params, must_exist=False): + paths = [] + for param in params: + param_id = param.ds_id + if param_id in self.id_to_path.keys(): + param_path = self.id_to_path[param_id] + else: + assert not must_exist, f"Path for param id {param_id} does not exist" + param_path = os.path.join(self.swap_folder, f'{param_id}_param.tensor.swp') + + self.id_to_path[param_id] = param_path + paths.append(param_path) + + return paths + + def _get_swap_buffers(self, params): + buffers = [] + for param in params: + param_id = param.ds_id + assert param_id in self.param_id_to_swap_buffer.keys(), \ + f'param {param_id} has not been assigned a swap buffer' + buffers.append(self.param_id_to_swap_buffer[param_id]) + + return buffers + + def _track_numel(self, params): + for param in params: + assert param.ds_tensor is not None, "Partitioned tensor is None" + self.param_id_to_numel[param.ds_id] = param.ds_tensor.ds_numel + + def _allocate_and_return_buffers_for_swap_in(self, params): + compute_buffers = [] + swap_buffers = [] + + for param in params: + param_id = param.ds_id + assert param_id in self.param_id_to_numel.keys(), f" Number of elements in param {param_id} is unknown" + assert param_id not in self.param_id_to_buffer_id.keys( + ), f"param {param_id} already assigned swap buffer id {self.param_id_to_buffer_id[param_id]}" + assert param_id not in self.param_id_to_swap_buffer.keys( + ), f"param {param_id} has already been assigned a swap buffer" + + buffer_id = self.available_buffer_ids.pop() + print_rank_0(f"param {param.ds_id} is assigned swap in buffer id {buffer_id} ") + self.param_id_to_buffer_id[param_id] = buffer_id + aligned_swap_numel = self._io_aligned_numel(self.param_id_to_numel[param_id]) + swap_buffer = self.buffers.narrow(0, int(buffer_id * self.aligned_elements_per_buffer), aligned_swap_numel) + + self.param_id_to_swap_buffer[param_id] = swap_buffer + compute_buffer = swap_buffer.narrow(0, 0, self.param_id_to_numel[param_id]) + compute_buffers.append(compute_buffer) + swap_buffers.append(swap_buffer) + + return compute_buffers, swap_buffers + + #waits for inflight nvme write to complete + def synchronize_writes(self): + if self.pending_writes == 0: + return + assert self.pending_writes == self.aio_write_handle.wait() + self.pending_writes = 0 + self.remove_partition_and_release_buffers(self.swap_out_params) + self.swap_out_params = [] + + #waits for inflight nvme reads to complete + def synchronize_reads(self): + if self.pending_reads == 0: + return + + assert self.pending_reads == self.aio_read_handle.wait() + + self.pending_reads = 0 + + for param, swap_in_buffer in zip(self.inflight_params, self.inflight_swap_in_buffers): + param_id = param.ds_id + compute_buffer = swap_in_buffer.narrow(0, 0, self.param_id_to_numel[param_id]) + param.ds_tensor.data = compute_buffer.data + param.ds_tensor.status = PartitionedParamStatus.AVAILABLE + + self.available_params.update([param.ds_id for param in self.inflight_params]) + self.available_numel += self.inflight_numel + + self.inflight_params = [] + self.inflight_swap_in_buffers = [] + self.inflight_numel = 0 + + #Removes the memory assignment and releases the buffers + #Should only be executed after swapping out the tensors + def remove_partition_and_release_buffers(self, params): + for param in params: + param_id = param.ds_id + + if param_id in self.param_id_to_buffer_id.keys(): + + buffer_id = self.param_id_to_buffer_id[param_id] + + assert buffer_id is not None, "Missing buffer id for releasing" + + self.available_buffer_ids.append(buffer_id) + del self.param_id_to_buffer_id[param_id] + del self.param_id_to_swap_buffer[param_id] + print_rank_0(f"param {param.ds_id} releases buffer id {buffer_id} ") + + if param_id in self.available_params: + self.available_params.remove(param_id) + self.available_numel -= self.param_id_to_numel[param_id] + + param.ds_tensor.data = self.invalid_buffer.data + param.ds_tensor.status = PartitionedParamStatus.NOT_AVAILABLE + + #writes from in memory to nvme. Does not release the buffers + def _swap_out(self, params, async_op=True): + + swap_out_paths = self._get_swap_paths(params) + swap_out_params = self._get_swap_buffers(params) + self._track_numel(params) + + swap_out_tensors(self.aio_write_handle, swap_out_params, swap_out_paths) + + self.pending_writes += len(swap_out_params) + self.swap_out_params += params + + if not async_op: + self.synchronize_writes() + + #blocking swap out followed by releasing the memory buffers + def swap_out_and_release(self, params, async_op=False, force_buffer_release=False): + if async_op: + assert force_buffer_release, "Should not release preallocated buffers without completing the swap out. Set force_buffer_release to True to do it anyways" + self._swap_out(params, async_op=async_op) + + # book keeping function for inflight swap in + def _update_inflight_swap_in(self, params, swap_in_buffers, inflight_numel): + self.inflight_params.extend(params) + self.inflight_swap_in_buffers.extend(swap_in_buffers) + self.inflight_numel += inflight_numel + + for param in params: + param.ds_tensor.status = PartitionedParamStatus.INFLIGHT + + self.pending_reads += len(params) + + #assigns an in memory buffer and swaps in from nvme + def swap_in(self, params, async_op=True, swap_in_buffers=None): + + assert all([param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE + for param in params]), "Some params are already available or in flight" + swap_in_paths = self._get_swap_paths(params) + + if swap_in_buffers is None: + if len(self.available_buffer_ids) < len(swap_in_paths): + ids = [p.ds_id for p in params] + print_rank_0( + f'Not enough swap in buffers {len(self.available_buffer_ids)} for {len(swap_in_paths)} params, ids = {ids}', + force=True) + print_rank_0( + f'Num inflight: params {len(self.inflight_params)}, buffers {len(self.inflight_swap_in_buffers)}, numel = {self.inflight_numel}', + force=True) + print_rank_0( + f'Num available params: count = {len(self.available_params)}, ids = {self.available_params}, numel = {self.available_numel}', + force=True) + + assert len(swap_in_paths) <= len( + self.available_buffer_ids + ), f"Not enough buffers {len(self.available_buffer_ids)} for swapping {len(swap_in_paths)}" + compute_buffers, swap_in_buffers = self._allocate_and_return_buffers_for_swap_in(params) + inflight_numel = sum([t.numel() for t in compute_buffers]) + else: + inflight_numel = sum([t.numel() for t in swap_in_buffers]) + + swap_in_tensors(self.aio_read_handle, swap_in_buffers, swap_in_paths) + + self._update_inflight_swap_in(params, swap_in_buffers, inflight_numel) + + if not async_op: + self.synchronize_reads() + + # Enables swapping into buffer that is out the control of swapper. This is always synchronous + def swap_into_buffer(self, param, dest_buffer): + assert param.ds_tensor.status == PartitionedParamStatus.NOT_AVAILABLE, f"param {param.ds_id} is already available or inflight" + + require_swap_buffer = not (get_accelerator().is_pinned(dest_buffer) + and self._is_io_aligned(dest_buffer.numel())) + + if require_swap_buffer: + assert len(self.available_buffer_ids) > 0, f"No buffer available to swap param {param.ds_id}." + compute_buffers, swap_in_buffers = self._allocate_and_return_buffers_for_swap_in([param]) + inflight_numel = compute_buffers[0].numel() + else: + swap_in_buffers = [dest_buffer] + inflight_numel = dest_buffer.numel() + + swap_in_paths = self._get_swap_paths([param]) + + swap_in_tensors(self.aio_read_handle, swap_in_buffers, swap_in_paths) + self._update_inflight_swap_in([param], swap_in_buffers, inflight_numel) + self.synchronize_reads() + + if require_swap_buffer: + dest_buffer.data.copy_(param.ds_tensor.data) + # Release swap buffer memory assignment. Note, this will mark the parameter not available. + self.remove_partition_and_release_buffers([param]) + + #assign a buffer to a param and return the buffer + def get_buffer(self, param, numel): + param_id = param.ds_id + + assert self.available_swap_in_buffers( + ) > 0, f"No swap buffers to allocate for fp16 param {param_id} of numel = {numel}" + assert numel < self.elements_per_buffer, f"More elements {numel} than buffer size {self.elements_per_buffer}" + + self.param_id_to_numel[param_id] = numel + buffer_id = self.available_buffer_ids.pop() + self.param_id_to_buffer_id[param_id] = buffer_id + aligned_swap_numel = self._io_aligned_numel(self.param_id_to_numel[param_id]) + swap_buffer = self.buffers.narrow(0, int(buffer_id * self.aligned_elements_per_buffer), aligned_swap_numel) + + self.param_id_to_swap_buffer[param_id] = swap_buffer + compute_buffer = swap_buffer.narrow(0, 0, self.param_id_to_numel[param_id]) + print_rank_0(f"param {param.ds_id} is assigned swap in buffer id {buffer_id}") + return compute_buffer + + def reserve_available_buffers(self): + buffers = [] + for id in self.available_buffer_ids: + buffers.append( + self.buffers.narrow(0, int(id * self.aligned_elements_per_buffer), + int(self.aligned_elements_per_buffer))) + self.reserved_buffer_ids.append(id) + + self.available_buffer_ids = [] + return buffers + + def release_reserved_buffers(self): + for id in self.reserved_buffer_ids: + self.available_buffer_ids.append(id) + self.reserved_buffer_ids = [] + + def _io_aligned_numel(self, numel): + remainder = numel % self.numel_alignment + return numel if remainder == 0 else (numel + self.numel_alignment - remainder) + + def _is_io_aligned(self, numel): + return (numel % self.numel_alignment) == 0 + + def reserve_partitioned_swap_space(self, partition_num_elems): + aligned_numel = sum([self._io_aligned_numel(numel) for numel in partition_num_elems]) + self.partitioned_swap_buffer = get_accelerator().pin_memory(torch.zeros(aligned_numel, + device='cpu', + dtype=self.dtype), + align_bytes=0) + self.partitioned_swap_pool = SwapBufferPool([self.partitioned_swap_buffer]) + + def swap_out_partitioned_params(self, dst_fp16_params, src_fp32_params): + assert self.partitioned_swap_buffer is not None, f'partitioned swap buffers for fp16 params not initialized' + assert self.partitioned_swap_pool is not None, f'partitioned swap pool for fp16 params not initialized' + assert len(dst_fp16_params) == len(src_fp32_params), \ + f'mismatch in number of fp16 params {len(dst_fp16_params)} and fp32 params {len(src_fp32_params)}' + + fp16_swap_paths = self._get_swap_paths(dst_fp16_params, must_exist=True) + self.synchronize_writes() + self.partitioned_swap_pool.reset() + for i, fp32_tensor in enumerate(src_fp32_params): + swap_tensor, _ = self.partitioned_swap_pool.insert_tensor(fp32_tensor, fp16_swap_paths[i], + self._io_aligned_numel(fp32_tensor.numel())) + assert swap_tensor is not None + dst_fp16_params[i].ds_tensor.status = PartitionedParamStatus.AVAILABLE + + self.partitioned_swap_pool.swap_out(self.aio_write_handle) + + for param in dst_fp16_params: + param.ds_tensor.status = PartitionedParamStatus.NOT_AVAILABLE diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/pipelined_optimizer_swapper.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/pipelined_optimizer_swapper.py new file mode 100644 index 0000000000000000000000000000000000000000..03dfe663fcb6d8e1cbfb2ac4b0484e7335156b65 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/pipelined_optimizer_swapper.py @@ -0,0 +1,234 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Functionality of swapping optimizer tensors to/from (NVMe) storage devices. +""" + +from deepspeed.ops.op_builder import AsyncIOBuilder +from deepspeed import comm as dist + +from deepspeed.runtime.swap_tensor.constants import * +from deepspeed.runtime.swap_tensor.utils import swap_in_tensors, swap_out_tensors, print_object +from deepspeed.runtime.swap_tensor.async_swapper import AsyncTensorSwapper +from deepspeed.runtime.swap_tensor.utils import get_sized_buffer +from deepspeed.runtime.swap_tensor.optimizer_utils import OptimizerSwapper + + +class OptimizerSwapOp(object): + + def __init__(self, aio_handle, read_op, param_info, allocated_buffers, state_buffers, num_ops): + self.aio_handle = aio_handle + self.read_op = read_op + self.param_info = param_info + self.allocated_buffers = allocated_buffers + self.state_buffers = state_buffers + self.wait_required = True + self.num_ops = num_ops + + def is_parameter(self, parameter): + return OptimizerSwapper.parameter_id(parameter) == self.param_info.param_id + + def wait(self): + assert self.wait_required + assert self.aio_handle.wait() == self.num_ops + self.wait_required = False + + +SYNC_SWAP_IN = 'sync_swap_in' +ASYNC_SWAP_IN = 'async_swap_in' +SYNC_SWAP_OUT = 'sync_swap_out' +ASYNC_SWAP_OUT = 'async_swap_out' + +SWAP_IN_STATE_TIMER = 'swap_in_state' +SWAP_OUT_STATE_TIMER = 'swap_out_state' +SWAP_OUT_GRADIENT_TIMER = 'swap_out_gradient' +ASYNC_SWAP_IN_STATE_TIMER = "async_swap_in_state" +ASYNC_SWAP_OUT_STATE_TIMER = 'async_swap_out_state' + + +class PipelinedOptimizerSwapper(OptimizerSwapper): + + def __init__(self, swap_config, aio_config, base_folder, optimizer, largest_numel, device, dtype, timers): + super(PipelinedOptimizerSwapper, self).__init__(swap_config, aio_config, base_folder, optimizer, largest_numel, + device, dtype, timers) + + aio_op = AsyncIOBuilder().load() + self.write_aio_handle = aio_op.aio_handle(aio_config[AIO_BLOCK_SIZE], aio_config[AIO_QUEUE_DEPTH], + aio_config[AIO_SINGLE_SUBMIT], aio_config[AIO_OVERLAP_EVENTS], + aio_config[AIO_THREAD_COUNT]) + + self.read_aio_handle = aio_op.aio_handle(aio_config[AIO_BLOCK_SIZE], aio_config[AIO_QUEUE_DEPTH], + aio_config[AIO_SINGLE_SUBMIT], aio_config[AIO_OVERLAP_EVENTS], + aio_config[AIO_THREAD_COUNT]) + + # Overlap gradient swap out + self.gradient_swapper = AsyncTensorSwapper(aio_handle=self.write_aio_handle, + numel_alignment=self.numel_alignment, + timers=self.timers) + + self.async_swap_in = swap_config.pipeline_read + self.async_swap_out = swap_config.pipeline_write + + self.swap_ops = {SYNC_SWAP_IN: None, ASYNC_SWAP_IN: None, SYNC_SWAP_OUT: None, ASYNC_SWAP_OUT: None} + + self.print_exclude_list += [ + 'gradient_swapper', 'read_aio_handle', 'write_aio_handle', 'swap_ops', 'print_exclude_list' + ] + + if dist.get_rank() == 0: + print_object(obj=self, name='PipelinedOptimizerSwapper', exclude_list=self.print_exclude_list) + + def initialize_parameters(self, parameters, src_tensors): + self._initialize_parameters(parameters=parameters, src_tensors=src_tensors, aio_handle=self.write_aio_handle) + + def initialize_from_swapped_fp16_params(self, fp16_partitions_info, fp16_num_elems, fp16_pinned_buffers, + fp32_parameters): + self._initialize_from_swapped_fp16_params(aio_handle=self.write_aio_handle, + fp16_partitions_info=fp16_partitions_info, + fp16_num_elems=fp16_num_elems, + fp16_pinned_buffers=fp16_pinned_buffers, + fp32_parameters=fp32_parameters) + + def flush_gradients(self): + self._flush_gradient_swapper(self.gradient_swapper) + + def swap_in_optimizer_state(self, parameter, async_parameter): + assert parameter is not None + assert self.swap_ops[SYNC_SWAP_IN] is None + + self._flush_gradient_swapper(self.gradient_swapper) + + self._start_timer(SWAP_IN_STATE_TIMER) + + if self.swap_ops[ASYNC_SWAP_IN]: + assert self.swap_ops[ASYNC_SWAP_IN].is_parameter(parameter) + self.swap_ops[SYNC_SWAP_IN] = self.swap_ops[ASYNC_SWAP_IN] + self.swap_ops[ASYNC_SWAP_IN] = None + else: + self.swap_ops[SYNC_SWAP_IN] = self._swap_in_optimizer_state(aio_handle=self.read_aio_handle, + parameter=parameter) + + if self.swap_ops[SYNC_SWAP_IN]: + self.swap_ops[SYNC_SWAP_IN].wait() + + if self.async_swap_in and async_parameter is not None: + assert self.swap_ops[ASYNC_SWAP_IN] is None + self.swap_ops[ASYNC_SWAP_IN] = self._swap_in_optimizer_state(aio_handle=self.read_aio_handle, + parameter=async_parameter) + + self._stop_timer(SWAP_IN_STATE_TIMER) + self.timer_names.add(SWAP_IN_STATE_TIMER) + + def swap_out_optimizer_state(self, parameter, async_swap): + self._start_timer(SWAP_OUT_STATE_TIMER) + + if self.swap_ops[ASYNC_SWAP_OUT]: + self._start_timer(ASYNC_SWAP_OUT_STATE_TIMER) + self._complete_swap_out(ASYNC_SWAP_OUT) + self._stop_timer(ASYNC_SWAP_OUT_STATE_TIMER) + self.timer_names.add(ASYNC_SWAP_OUT_STATE_TIMER) + + assert self.swap_ops[SYNC_SWAP_IN] is not None + assert not self.swap_ops[SYNC_SWAP_IN].wait_required + swap_op = self._swap_out_optimizer_state(aio_handle=self.write_aio_handle, + parameter=parameter, + swap_in_op=self.swap_ops[SYNC_SWAP_IN]) + self.swap_ops[SYNC_SWAP_IN] = None + + if self.async_swap_out and async_swap: + self.swap_ops[ASYNC_SWAP_OUT] = swap_op + else: + self.swap_ops[SYNC_SWAP_OUT] = swap_op + self._complete_swap_out(SYNC_SWAP_OUT) + + self._stop_timer(SWAP_OUT_STATE_TIMER) + self.timer_names.add(SWAP_OUT_STATE_TIMER) + + def swap_out_gradients(self, parameter, gradient_offsets, gradient_tensors): + self._swap_out_gradients(parameter=parameter, + gradient_offsets=gradient_offsets, + gradient_tensors=gradient_tensors, + gradient_swapper=self.gradient_swapper) + + def _complete_swap_out(self, swap_out_type): + self.swap_ops[swap_out_type].wait() + self.swap_buffer_manager.free(self.swap_ops[swap_out_type].allocated_buffers) + self.swap_ops[swap_out_type] = None + + def _swap_out_optimizer_state(self, aio_handle, parameter, swap_in_op): + assert swap_in_op.is_parameter(parameter) + + allocated_buffers = swap_in_op.allocated_buffers.copy() + swap_buffers = swap_in_op.state_buffers.copy() + + param_info = swap_in_op.param_info + self._update_param_state_info(param_info, parameter) + unpinned_tensors = param_info.get_unpinned_state_tensors() + + if len(unpinned_tensors) > 0: + new_alloc_buffers = self.swap_buffer_manager.allocate(num_elems=self._io_aligned_numel(param_info.numel()), + count=len(unpinned_tensors), + dtype=param_info.dtype()) + assert new_alloc_buffers is not None + + allocated_buffers += new_alloc_buffers + swap_buffers += new_alloc_buffers + + for pinned_dst, unpinned_src in zip(new_alloc_buffers, unpinned_tensors): + dst = get_sized_buffer(pinned_dst, unpinned_src.numel()) + dst.data.copy_(unpinned_src.data) + + swap_paths = param_info.swap_paths.copy() + assert len(swap_paths) == len(swap_buffers) + + swap_out_tensors(aio_handle, swap_buffers, swap_paths) + + swap_out_op = OptimizerSwapOp(aio_handle=aio_handle, + param_info=param_info, + read_op=False, + allocated_buffers=allocated_buffers, + state_buffers=swap_buffers, + num_ops=len(swap_buffers)) + + return swap_out_op + + def _swap_in_optimizer_state(self, aio_handle, parameter): + param_info = self._get_param_swap_info(parameter) + if param_info is None: + return None + + required_buffer_count = len(param_info.tensors) + (1 if param_info.has_gradients() else 0) + aligned_numel = self._io_aligned_numel(param_info.numel()) + allocated_buffers = self.swap_buffer_manager.allocate(num_elems=aligned_numel, + count=required_buffer_count, + dtype=parameter.dtype) + assert allocated_buffers is not None, \ + f"PipelinedOptimizerSwapper ran out of swap buffers, try increasing 'buffer_count'" + + state_buffers = allocated_buffers[:len(param_info.tensors)] + param_info.set_swap_buffers(state_buffers) + + swap_buffers = state_buffers.copy() + swap_paths = param_info.swap_paths.copy() + + if param_info.has_gradients(): + parameter.grad = allocated_buffers[-1].narrow(0, 0, param_info.numel()) + if param_info.swapped_gradients: + swap_buffers += param_info.get_swap_gradient_buffers(parameter.grad) + swap_paths += param_info.get_swap_gradient_paths() + + swap_in_tensors(aio_handle, swap_buffers, swap_paths) + + if param_info.unswapped_gradients: + self._retrieve_unswapped_grad_partitions(swap_info=param_info, dest_buffer=parameter.grad) + + swap_in_op = OptimizerSwapOp(aio_handle=aio_handle, + param_info=param_info, + read_op=True, + allocated_buffers=allocated_buffers, + state_buffers=state_buffers, + num_ops=len(swap_buffers)) + + return swap_in_op diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/utils.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..90b2d9b8bd31e9b2bf845608820fd6cacf9404fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/swap_tensor/utils.py @@ -0,0 +1,240 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Functionality of swapping tensors to/from (NVMe) storage devices. +""" + +import torch +from deepspeed.utils.logging import logger +from deepspeed.accelerator import get_accelerator + +from deepspeed import comm as dist + +MIN_AIO_BYTES = 1024**2 +AIO_ALIGNED_BYTES = 1024 + + +def swap_in_tensors(swap_handle, tensor_buffers, swap_paths): + for buffer, path in zip(tensor_buffers, swap_paths): + assert (swap_handle.async_pread(buffer, path) == 0) + + +def swap_out_tensors(swap_handle, tensor_buffers, swap_paths): + for buffer, path in zip(tensor_buffers, swap_paths): + assert (swap_handle.async_pwrite(buffer, path) == 0) + + +def print_object(obj, name, exclude_list=[]): + logger.info('{}:'.format(name)) + for arg in sorted(vars(obj)): + if not arg in exclude_list: + dots = '.' * (29 - len(arg)) + logger.info(' {} {} {}'.format(arg, dots, getattr(obj, arg))) + + +class SwapBuffer(object): + + def __init__(self, buffer): + self.buffer = buffer + self.reset() + + def reset(self): + self.offset = 0 + self.swap_tensors = {} + self.compute_tensors = {} + self.swap_paths = {} + self.num_elem = 0 + + def insert_tensor(self, tensor, swap_path, aligned_numel): + swap_tensor, compute_tensor = self.allocate_tensor(swap_path, tensor.numel(), aligned_numel) + compute_tensor.data.copy_(tensor.data) + return swap_tensor, compute_tensor + + def allocate_tensor(self, swap_path, numel, aligned_numel): + assert self.has_space(aligned_numel) + assert not self.offset in self.swap_tensors + + allocate_offset = self.offset + swap_tensor = self.buffer.narrow(0, allocate_offset, aligned_numel) + dest_tensor = swap_tensor.narrow(0, 0, numel) + + self.swap_tensors[allocate_offset] = swap_tensor + self.compute_tensors[allocate_offset] = dest_tensor + self.swap_paths[allocate_offset] = swap_path + self.offset += aligned_numel + self.num_elem += numel + + return self.swap_tensors[allocate_offset], self.compute_tensors[allocate_offset] + + def has_space(self, numel): + return (self.offset + numel) <= self.buffer.numel() + + def get_swap_tensors(self): + return [tensor for tensor in self.swap_tensors.values()] + + def get_swap_paths(self): + return [path for path in self.swap_paths.values()] + + def get_compute_tensors(self): + return [tensor for tensor in self.compute_tensors.values()] + + def get_num_elem(self): + return self.num_elem + + def get_swap_tensor(self, offset): + return self.swap_tensors.get(offset, None) + + def get_compute_tensor(self, offset): + return self.compute_tensors.get(offset, None) + + def get_swap_path(self, offset): + return self.swap_paths(offset, None) + + +class SwapBufferPool(object): + + def __init__(self, buffers): + assert all([get_accelerator().is_pinned(buf) for buf in buffers]) + self.buffers = [SwapBuffer(buf) for buf in buffers] + self.current_index = 0 + + def reset(self): + self.current_index = 0 + for buffer in self.buffers: + buffer.reset() + + def allocate_tensor(self, numel, swap_path, aligned_numel): + if self.has_space(aligned_numel): + swap_tensor, compute_tensor = self._get_current_buffer().allocate_tensor(swap_path, numel, aligned_numel) + return swap_tensor, compute_tensor + + return None, None + + def insert_tensor(self, tensor, swap_path, aligned_numel): + if self.has_space(aligned_numel): + swap_tensor, compute_tensor = self._get_current_buffer().insert_tensor(tensor, swap_path, aligned_numel) + return swap_tensor, compute_tensor + + return None, None + + def get_swap_tensors(self): + swap_tensors = [] + for buffer in self._get_used_buffers(): + swap_tensors += buffer.get_swap_tensors() + + return swap_tensors + + def get_swap_paths(self): + swap_paths = [] + for buffer in self._get_used_buffers(): + swap_paths += buffer.get_swap_paths() + + return swap_paths + + def get_compute_tensors(self): + compute_tensors = [] + for buffer in self._get_used_buffers(): + compute_tensors += buffer.get_compute_tensors() + + return compute_tensors + + def has_space(self, numel): + if self._get_current_buffer().has_space(numel): + return True + + if self.current_index == len(self.buffers) - 1: + return False + + self.current_index += 1 + return self._get_current_buffer().has_space(numel) + + def swap_out(self, aio_handle, async_op=False): + swap_tensors = self.get_swap_tensors() + swap_paths = self.get_swap_paths() + assert all([p is not None for p in swap_paths]) + + swap_out_tensors(aio_handle, swap_tensors, swap_paths) + + if not async_op: + assert len(swap_tensors) == aio_handle.wait() + + def swap_in(self, aio_handle, async_op=False): + swap_tensors = self.get_swap_tensors() + swap_paths = self.get_swap_paths() + assert all([p is not None for p in swap_paths]) + + swap_in_tensors(aio_handle, swap_tensors, swap_paths) + + if not async_op: + assert len(swap_tensors) == aio_handle.wait() + + def _get_current_buffer(self): + return self.buffers[self.current_index] + + def _get_used_buffers(self): + return self.buffers[:self.current_index + 1] + + +class SwapBufferManager(object): + + def __init__(self, num_elems, count, dtype): + self.num_elems = num_elems + self.count = count + self.dtype = dtype + self.all_buffers = [ + get_accelerator().pin_memory(torch.zeros(num_elems, device='cpu', dtype=dtype), align_bytes=0) + for _ in range(count) + ] + self.free_buffer_index = [i for i in range(count)] + self.used_buffer_index = {} + self.gigabytes = (self.all_buffers[0].element_size() * num_elems * count) / (1024**3) + + if dist.get_rank() == 0: + exclude_list = ['all_buffers'] + print_object(obj=self, name='SwapBufferManager', exclude_list=exclude_list) + + def allocate(self, num_elems, count, dtype): + assert dtype == self.dtype + assert num_elems <= self.num_elems + if count > len(self.free_buffer_index): + return None + + used_indices = self.free_buffer_index[-count:] + self.free_buffer_index = self.free_buffer_index[:-count] + + buffers = [] + for i in used_indices: + tmp_buffer = self.all_buffers[i].narrow(0, 0, num_elems) + buffers.append(tmp_buffer) + self.used_buffer_index[id(tmp_buffer)] = i + return buffers + + def allocate_all(self, num_elems, dtype): + return self.allocate(num_elems=num_elems, count=len(self.free_buffer_index), dtype=dtype) + + def free(self, buffers): + buffer_ids = [] + for buf in buffers: + buffer_ids.append(id(buf)) + + assert all([b_id in self.used_buffer_index for b_id in buffer_ids]) + + for b_id in buffer_ids: + self.free_buffer_index.append(self.used_buffer_index[b_id]) + del (self.used_buffer_index[b_id]) + + +def get_sized_buffer(buffer, num_elems): + assert num_elems <= buffer.numel(), \ + f'num_elems {num_elems} > buffer {buffer.numel()}' + return buffer.narrow(0, 0, num_elems) if num_elems < buffer.numel() else buffer + + +def get_sized_buffers(buffer_list, num_elems_list): + swap_buffers = [ + get_sized_buffer(buffer, num_elems) \ + for buffer, num_elems in zip(buffer_list, num_elems_list) + ] + return swap_buffers diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/utils.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7744b2ee8b98d1bb3a14496d2e7085bff4e71dd5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/utils.py @@ -0,0 +1,1065 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Copyright NVIDIA/Megatron + +Helper functions and classes from multiple sources. +""" + +from collections.abc import Iterable +from deepspeed.moe.utils import is_moe_param +import os +import psutil +import gc +from math import sqrt + +import torch +from deepspeed import comm as dist +try: + from torch._six import inf +except ModuleNotFoundError: + from torch import inf + +from deepspeed.utils import groups, logger +from deepspeed.utils.bwc import (bwc_tensor_model_parallel_rank, bwc_pipeline_parallel_world_size, + bwc_pipeline_parallel_group) +from deepspeed.runtime.constants import PIPE_REPLICATED +from numpy import prod +from deepspeed.accelerator import get_accelerator + +from deepspeed.module_inject.policy import transpose +from torch.nn import functional as F + +torch_memory_reserved = get_accelerator().memory_reserved +torch_max_memory_reserved = get_accelerator().max_memory_reserved + + +class DummyOptim(): + """ + Dummy optimizer presents model parameters as a param group, this is + primarily used to allow ZeRO-3 without an optimizer + """ + + def __init__(self, params): + self.param_groups = [] + self.param_groups.append({'params': params}) + + +graph_cache = {} + + +def graph_process(replay_first_step, func, *args, **kwargs): + # `func` should only contain operations on the GPU + # Please ensure that the memory address of the data required by 'func' remains constant + if func.__name__ not in graph_cache: + cuda_stream = get_accelerator().Stream() + cuda_stream.wait_stream(get_accelerator().current_stream()) + with get_accelerator().stream(cuda_stream): + func(*args, **kwargs) + get_accelerator().current_stream().wait_stream(cuda_stream) + graph_cache[func.__name__] = get_accelerator().create_graph() + with get_accelerator().capture_to_graph(graph_cache[func.__name__]): + func(*args, **kwargs) + if replay_first_step: + get_accelerator().replay_graph(graph_cache[func.__name__]) + else: + get_accelerator().replay_graph(graph_cache[func.__name__]) + + +def noop_decorator(func): + return func + + +class noop_context(object): + + def __init__(self): + pass + + def __enter__(self): + pass + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + + +def ensure_directory_exists(filename): + """Create the directory path to ``filename`` if it does not already exist. + + Args: + filename (str): A file path. + """ + dirname = os.path.dirname(filename) + os.makedirs(dirname, exist_ok=True) + + +def set_random_seed(seed): + """Set the random seed for common PRNGs used during training: random, numpy, and torch. + + Args: + seed (int): the seed to use + """ + import numpy + import random + random.seed(seed) + numpy.random.seed(seed) + torch.manual_seed(seed) + + +def is_model_parallel_parameter(p) -> bool: + if hasattr(p, 'model_parallel') and p.model_parallel: + return True + + if hasattr(p, 'tensor_model_parallel') and p.tensor_model_parallel: + return True + + return False + + +def copy_to_device(item, device, criterion_func): + """ + Return a copy of tensor on specified device. + Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts. + Parameters: + item: tensor to copy or (possibly nested) container of tensors to copy. + device: target device + criterion_func: Function to restrict copy operation to items meet criterion + + Returns: + None + """ + if criterion_func(item): + return item.to(device) + elif isinstance(item, list): + return [copy_to_device(v, device, criterion_func) for v in item] + elif isinstance(item, tuple): + return tuple([copy_to_device(v, device, criterion_func) for v in item]) + elif isinstance(item, dict): + return {k: copy_to_device(v, device, criterion_func) for k, v in item.items()} + else: + return item + + +def move_to_device(item, device, criterion_func): + """ + Move tensor on to specified device by changing the storage. + Works on individual tensors, and tensors contained/nested in lists, tuples, and dicts. + Parameters: + item: tensor to move or (possibly nested) container of tensors to move. + device: target device + criterion_func: Function to restrict move operation to items meet criterion + + Returns: + None + """ + if criterion_func(item): + device_copy = item.to(device) + item.data = device_copy.data + return item + elif isinstance(item, list): + return [move_to_device(v, device, criterion_func) for v in item] + elif isinstance(item, tuple): + return tuple([move_to_device(v, device, criterion_func) for v in item]) + elif isinstance(item, dict): + return {k: move_to_device(v, device, criterion_func) for k, v in item.items()} + else: + return item + + +def get_norm_with_moe_layers_fast(all_groups_norm, group): + # This implementation standardizes the grad_norm across ranks. A more precise implementation can be found in 'get_norm_with_moe_layers'. + # Need to allreduce (avg) the norms across different ranks because moe params will not be synced during allreduce + scaled_norm = all_groups_norm * 1.0 / float(dist.get_world_size(group=group)) + scaled_norm_tensor = torch.tensor(scaled_norm, device=get_accelerator().current_device(), dtype=torch.float) + dist.all_reduce(scaled_norm_tensor, group=group) + all_groups_norm = scaled_norm_tensor.item() + #print(f"old = {all_groups_norm_old} and new = {all_groups_norm} at rank: {deepspeed.comm.get_rank()}") + return all_groups_norm + + +class CheckOverflow(object): + '''Checks for overflow in gradient across parallel process''' + + def __init__(self, param_groups=None, mpu=None, zero_reduce_scatter=False, deepspeed=None): + self.mpu = mpu + self.params = [] if param_groups else None + self.zero_reduce_scatter = zero_reduce_scatter + self.deepspeed = deepspeed + self.has_moe_params = False + if param_groups: + for group in param_groups: + for param in group: + self.params.append(param) + if is_moe_param(param): + self.has_moe_params = True + + def check_using_norm(self, norm_group, reduce_overflow=True): + # TODO: I don't think reduce_overflow is needed if mpu is None + overflow = -1 in norm_group + overflow_gpu = get_accelerator().FloatTensor([overflow]) + if self.has_moe_params: + # In this case, we need to do an all_reduce across + # the expert_parallel_group, so that if there was + # an overflow due to expert weights, we detect it + + # Only need to check groups.get_largest_expert_parallel_group() + dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=groups._get_max_expert_parallel_group()) + if self.mpu is not None: + dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.mpu.get_model_parallel_group()) + elif reduce_overflow: + dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX) + dist.barrier() + overflow = overflow_gpu[0].item() + return bool(overflow) + + def check(self, param_groups=None): + params = [] + has_moe_params = False + if param_groups is None: + params = self.params + has_moe_params = self.has_moe_params + else: + assert param_groups is not None, \ + "self.params and param_groups both cannot be none" + + for group in param_groups: + for param in group: + params.append(param) + if is_moe_param(param): + has_moe_params = True + + return self.has_overflow(params, has_moe_params=has_moe_params) + + # `params` is a list / generator of torch.Variable + def has_overflow_serial(self, params): + for i, p in enumerate(params): + if p.grad is not None and self._has_inf_or_nan(p.grad.data, i): + return True + return False + + def has_overflow(self, params, has_moe_params=None): + if has_moe_params is None: + has_moe_params = self.has_moe_params + overflow = self.has_overflow_serial(params) + # Since each model parallel GPU carries only part of the model, + # make sure overflow flag is synced across all the model parallel GPUs + overflow_gpu = get_accelerator().ByteTensor([overflow]) + # deepspeed.comm.all_reduce(overflow_gpu, + # op=deepspeed.comm.ReduceOp.MAX, + # group=mpu.get_model_parallel_group()) + if has_moe_params: + # All reduce this across expert_parallel_group, so that if an expert + # overflows, we detect it here + dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=groups._get_max_expert_parallel_group()) + if self.zero_reduce_scatter: + dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=dist.get_world_group()) + elif self.mpu is not None: + if self.deepspeed is not None: + using_pipeline = hasattr(self.deepspeed, 'pipeline_enable_backward_allreduce') + if (using_pipeline and self.deepspeed.pipeline_enable_backward_allreduce is False) or ( + not using_pipeline and self.deepspeed.enable_backward_allreduce is False): + dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.mpu.get_data_parallel_group()) + dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=self.mpu.get_model_parallel_group()) + elif self.deepspeed is not None and self.deepspeed.enable_backward_allreduce is False: + dist.all_reduce(overflow_gpu, op=dist.ReduceOp.MAX, group=dist.get_world_group()) + + overflow = overflow_gpu[0].item() + return bool(overflow) + + # `x` is a torch.Tensor + @staticmethod + def _has_inf_or_nan(x, i): + try: + # if x is half, the .float() incurs an additional deep copy, but it's necessary if + # Pytorch's .sum() creates a one-element tensor of the same type as x + # (which is true for some recent version of pytorch). + cpu_sum = float(x.float().sum()) + # More efficient version that can be used if .sum() returns a Python scalar + # cpu_sum = float(x.sum()) + except RuntimeError as instance: + # We want to check if inst is actually an overflow exception. + # RuntimeError could come from a different error. + # If so, we still want the exception to propagate. + if "value cannot be converted" not in instance.args[0]: + raise + return True + else: + if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum: + return True + return False + + +def _handle_overflow(cpu_sum, x, i): + import math + rank = dist.get_rank() + if rank == 0: + t_i = -1 + for v_i, v in enumerate(x.data.contiguous().view(-1)): + if not math.isfinite(float(v)): + t_i = v_i + break + logger.info(f"rank {rank} detected overflow {cpu_sum} in tensor {i}:{t_i} shape {x.shape}") + + +def get_global_norm(norm_list): + """ Compute total from a list of norms + """ + total_norm = 0.0 + for norm in norm_list: + total_norm += norm**2.0 + # logger.info(f'norm_list = {norm_list} global = {sqrt(total_norm)}') + return sqrt(total_norm) + + +def clip_grad_norm_(parameters, max_norm, norm_type=2, mpu=None): + """Clips gradient norm of an iterable of parameters. + + This has been adapted from Nvidia megatron. We add norm averaging + to consider MoE params when calculating norm as they will result + in different norms across different ranks. + + This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and + added functionality to handle model parallel parameters. Note that + the gradients are modified in place. + + Arguments: + parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a + single Tensor that will have gradients normalized + max_norm (float or int): max norm of the gradients + norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for + infinity norm. + + Returns: + Total norm of the parameters (viewed as a single vector). + """ + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = list(filter(lambda p: p.grad is not None, parameters)) + norm_type = float(norm_type) + all_norms = [] + if norm_type == inf: + for p in parameters: + all_norms.append(p.grad.data.abs().max().float()) + total_norm = torch.stack(all_norms).max() + total_norm = total_norm.to(get_accelerator().current_device_name()) + # Take max across all GPUs. + if mpu is not None: + dist.all_reduce(total_norm, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group()) + else: + total_norm = 0 + for p in parameters: + if mpu is not None: + if (mpu.get_model_parallel_rank() == 0) or is_model_parallel_parameter(p): + param_norm = p.grad.data.detach().float().norm(norm_type) + all_norms.append(param_norm) + else: + param_norm = p.grad.data.detach().float().norm(norm_type) + all_norms.append(param_norm) + if len(all_norms) > 0: + total_norm = torch.stack(all_norms).square().sum().float() + else: + total_norm = get_accelerator().FloatTensor([0.0]) + total_norm = total_norm.to(get_accelerator().current_device_name()) + # Sum across all model parallel GPUs. + if mpu is not None: + dist.all_reduce(total_norm, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group()) + total_norm = total_norm.pow(1. / norm_type) + + # Need to average total_norm across different GPUs due to the presence of moe params + pg = groups._get_data_parallel_group() + scaled_norm = total_norm * 1.0 / float(dist.get_world_size(group=pg)) + scaled_norm_tensor = scaled_norm + + dist.all_reduce(scaled_norm_tensor, group=pg) + total_norm = scaled_norm_tensor + total_norm = total_norm.to(parameters[0].device) + + max_norm = torch.tensor([float(max_norm)], device=total_norm.device) + clip_coef = max_norm / (total_norm + 1e-6) + tmp_tensor = torch.tensor([1.0], device=clip_coef.device) + clip_coef = torch.min(tmp_tensor, clip_coef) + for p in parameters: + p.grad.data.mul_(clip_coef) + return total_norm + + +def get_flattened_grad_norm(parameters, norm_type=2, mpu=None, grad_norm_mask=None): + """Get grad norm of an iterable of parameters. + + This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and + added functionality to handle model parallel parameters. Note that + the gradients are modified in place. Taken from Nvidia Megatron. + + Arguments: + parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a + single Tensor that will have gradients normalized + norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for + infinity norm. + grad_norm_mask (List[Tensor]): A list of Tensor, where + each Tensor is a 2D Tensor containing ranges of [start_index, end_index]. + Returns: + Total norm of the parameters (viewed as a single vector). + """ + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = list(filter(lambda p: p.grad is not None, parameters)) + + norm_type = float(norm_type) + if norm_type == inf: + total_norm = max(p.grad.data.abs().max() for p in parameters) + total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) + # Take max across all GPUs. + if mpu is not None: + dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group()) + total_norm = total_norm_cuda[0].item() + else: + total_norm = 0. + for idx, p in enumerate(parameters): + # Use grad_norm_mask to avoid redundant computation of flattened gradient norm + if grad_norm_mask is not None and len(grad_norm_mask[idx]) > 0: + + # A loop-free implementation to create a mask tensor based on a range list + # which is logically equivalent to the following implementation. + # # mask_tensor_ = torch.zeros_like(p, device=p.device, dtype=bool) + # # for mask_idx in grad_norm_mask[idx]: + # # mask_tensor_[mask_idx[0]:mask_idx[1]] = True + cum_sum_pairs = torch.tensor([1, -1], device=get_accelerator().current_device(), + dtype=p.dtype).repeat(grad_norm_mask[idx].shape[0], 1) + mask_tensor = torch.zeros(p.shape[0] + 1, device=get_accelerator().current_device(), dtype=p.dtype) + mask_tensor = mask_tensor.scatter_(0, grad_norm_mask[idx].view(-1), + cum_sum_pairs.view(-1)).cumsum(0).bool()[:-1] + + param_norm = torch.masked_fill(p.grad.data, mask_tensor, 0).float().norm(norm_type) + + else: + param_norm = p.grad.data.float().norm(norm_type) + total_norm += param_norm.item()**norm_type + + # Sum across all model parallel GPUs. + total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) + if mpu is not None: + dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group()) + total_norm = total_norm_cuda[0].item()**(1. / norm_type) + + if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm: + total_norm = -1 + + return total_norm + + +def get_grad_zeros(parameters, mpu=None): + """Compute the number of grads with zero values. + + This is adapted from get_grad_norm + + Arguments: + parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a + single Tensor that will have gradients normalized + + Returns: + Total number of params with zero values (viewed as a single vector). + """ + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + parameters = list(filter(lambda p: p.grad is not None, parameters)) + + total_zeros = 0. + tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=mpu) + for p in parameters: + # Pipeline parallelism may replicate parameters. Avoid multi-counting. + if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated: + continue + + # Filter to avoid over-counting replicated tensors from tensor + # model parallelism + if (tensor_mp_rank > 0) and not is_model_parallel_parameter(p): + continue + + count_zeros = p.grad.numel() - torch.count_nonzero(p.grad) + total_zeros += count_zeros.item() + + # Sum across all model parallel GPUs. + total_zeros_cuda = get_accelerator().FloatTensor([float(total_zeros)]) + if mpu is not None: + dist.all_reduce(total_zeros_cuda, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group()) + total_zeros = total_zeros_cuda[0].item() + + return total_zeros + + +def get_weight_norm(parameters, norm_type=2, mpu=None): + """Get norm of an iterable of parameters. + + This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and + added functionality to handle model parallel parameters. Note that + the gradients are modified in place. Taken from Nvidia Megatron. + + Arguments: + parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a + single Tensor that will have gradients normalized + norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for + infinity norm. + + Returns: + Total norm of the parameters (viewed as a single vector). + -1 if the norm value is NaN or Inf. + """ + if isinstance(parameters, torch.Tensor): + parameters = [parameters] + + norm_type = float(norm_type) + if norm_type == inf: + total_norm = max(p.data.abs().max() for p in parameters) + total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) + # Take max across all GPUs. + if mpu is not None: + dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group()) + total_norm = total_norm_cuda[0].item() + else: + total_norm = 0. + tensor_mp_rank = bwc_tensor_model_parallel_rank(mpu=mpu) + for p in parameters: + # Pipeline parallelism may replicate parameters. Avoid multi-counting. + if hasattr(p, PIPE_REPLICATED) and p.ds_pipe_replicated: + continue + + # Filter to avoid over-counting replicated tensors from tensor + # model parallelism + if (tensor_mp_rank > 0) and not is_model_parallel_parameter(p): + continue + + param_norm = p.data.float().norm(norm_type) + total_norm += param_norm**norm_type + + # Sum across all model parallel GPUs. + total_norm_cuda = get_accelerator().FloatTensor([float(total_norm)]) + if mpu is not None: + dist.all_reduce(total_norm_cuda, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group()) + total_norm = total_norm_cuda[0].item()**(1. / norm_type) + + if total_norm == float('inf') or total_norm == -float('inf') or total_norm != total_norm: + total_norm = -1 + + return total_norm + + +def prefix_sum_inc(weights): + """ Compute an inclusive prefix sum. + + Example: + >>> prefix_sum_inc([3,4,5]) + [3, 7, 12] + """ + weights_ = [w for w in weights] + for x in range(1, len(weights_)): + weights_[x] += weights_[x - 1] + return weights_ + + +def partition_uniform(num_items, num_parts): + import numpy + parts = [0] * (num_parts + 1) + # First check for the trivial edge case + if num_items <= num_parts: + for p in range(num_parts + 1): + parts[p] = min(p, num_items) + return parts + + chunksize = num_items // num_parts + residual = num_items - (chunksize * num_parts) + + parts = numpy.arange(0, (num_parts + 1) * chunksize, chunksize) + + for i in range(residual): + parts[i + 1:] += 1 + parts = parts.tolist() + + return parts + + +def partition_balanced(weights, num_parts): + """ + use dynamic programming solve `The Linear Partition Problem`. + see https://www8.cs.umu.se/kurser/TDBAfl/VT06/algorithms/BOOK/BOOK2/NODE45.HTM + """ + import numpy as np + n = len(weights) + m = num_parts + + if n <= m: + return partition_uniform(n, m) + + dp_max = np.full((n + 1, m + 1), np.inf) + dp_min = np.full((n + 1, m + 1), np.inf) + dp_cost = np.full((n + 1, m + 1), np.inf) + position = np.zeros((n + 1, m + 1), dtype=int) + prefix_sum = np.zeros((n + 1)) + prefix_sum[1:] = np.cumsum(weights) + + dp_max[0, 0] = 0 + dp_cost[0, 0] = 0 + for i in range(1, n + 1): + for j in range(1, min(i, m) + 1): + for k in range(i): + max_sum = max(dp_max[k, j - 1], prefix_sum[i] - prefix_sum[k]) + min_sum = min(dp_min[k, j - 1], prefix_sum[i] - prefix_sum[k]) + cost = max_sum - min_sum + if dp_cost[i, j] >= cost: + dp_cost[i, j] = cost + dp_max[i, j] = max_sum + dp_min[i, j] = min_sum + position[i, j] = k + + parts = [n] + for i in reversed(range(1, m + 1)): + parts.append(position[parts[-1], i]) + parts.reverse() + + return parts + + +class PartitionedTensor: + + def __init__(self, tensor, group, partition_meta=None): + super().__init__() + + self.group = group + self.num_parts = dist.get_world_size(group=self.group) + self.rank = dist.get_rank(group=self.group) + self.orig_size = list(tensor.size()) + self.orig_device = tensor.device + self.local_data, self.partition = self._partition_tensor(tensor) + self.even_split = tensor.numel() % self.num_parts == 0 + + @classmethod + def from_meta(cls, meta, local_part, group, device=get_accelerator().device_name()): + assert meta.dtype == torch.long + dummy = torch.ones(dist.get_world_size(group=group)) + part_obj = cls(tensor=dummy, group=group) + + meta = meta.tolist() + + # [N, list0, ..., listN-1] + part_obj.orig_size = meta[1:(1 + meta[0])] + meta = meta[1 + meta[0]:] + + part_obj.orig_device = device + part_obj.local_data = local_part.detach() + + part_obj.group = group + + # Partition is encoded like the rowptr of a CSR matrix: + # [num_parts, rank, 0, part_1, ..., part_num_parts] + # TODO: support shuffle between different partition granularities + assert part_obj.num_parts == meta[0] + assert part_obj.rank == meta[1] + part_obj.partition = meta[2:] # length num_parts+1 + + return part_obj + + def _partition_tensor(self, tensor): + partition = partition_uniform(num_items=tensor.numel(), num_parts=self.num_parts) + start = partition[self.rank] + length = partition[self.rank + 1] - start + tensor_part = tensor.detach().contiguous().view(-1).narrow(0, start=start, length=length).clone() + + return tensor_part, partition + + def full(self, device=None): + if device is None: + device = self.orig_device + + # Allocate the full tensor as a flat buffer. + full_numel = prod(self.full_size()) + flat_tensor = torch.zeros([full_numel], dtype=self.local_data.dtype, device=device) + if self.even_split: + # Collect the full tensor + dist.all_gather_into_tensor(flat_tensor, self.local_data, group=self.group) + else: + for part_id in range(self.num_parts): + part_size = self.partition[part_id + 1] - self.partition[part_id] + buf = flat_tensor.narrow(0, start=self.partition[part_id], length=part_size) + if part_id == self.rank: + buf.copy_(self.local_data) + dist.broadcast(buf, part_id, self.group) + return flat_tensor.view(self.full_size()).clone().detach() + + def to_meta(self): + """Returns a torch.LongTensor that encodes partitioning information. + + Can be used along with ``data()`` to serialize a ``PartitionedTensor`` for + communication. + + Returns: + torch.LongTensor: a tensor encoding the meta-information for the partitioning + """ + meta = [] + meta.append(len(self.orig_size)) + meta += list(self.orig_size) + meta.append(self.num_parts) + meta.append(self.rank) + meta += self.partition + return torch.LongTensor(data=meta).to(self.orig_device) + + def data(self): + return self.local_data + + def local_size(self): + return self.local_data.size() + + def full_size(self): + return self.orig_size + + +mem_alloced = 0 +mem_cached = 0 + + +def memory_status(msg, print_rank=-1, reset_max=False): + global mem_alloced, mem_cached + + rank = dist.get_rank() + if print_rank != -1 and rank != print_rank: + return + + get_accelerator().synchronize() + + if reset_max: + get_accelerator().reset_max_memory_cached() + get_accelerator().reset_max_memory_allocated() + + new_alloced = get_accelerator().memory_allocated() + new_cached = get_accelerator().memory_cached() + + delta_alloced = new_alloced - mem_alloced + delta_cached = new_cached - mem_cached + + mem_cached = new_cached + mem_alloced = new_alloced + + max_alloced = get_accelerator().max_memory_allocated() + max_cached = get_accelerator().max_memory_cached() + + # convert to GB for printing + new_alloced /= 1024**3 + new_cached /= 1024**3 + delta_alloced /= 1024**3 + delta_cached /= 1024**3 + max_alloced /= 1024**3 + max_cached /= 1024**3 + + print( + f'RANK={rank} MEMSTATS', msg, f'device={get_accelerator().current_device_name()} ' + f'current alloc={new_alloced:0.4f}GB (delta={delta_alloced:0.4f}GB max={max_alloced:0.4f}GB) ' + f'current cache={new_cached:0.4f}GB (delta={delta_cached:0.4f}GB max={max_cached:0.4f}GB)') + + +def get_ma_status(): + if dist.is_initialized() and not dist.get_rank() == 0: + return 0 + return get_accelerator().memory_allocated() + + +def empty_cache(): + get_accelerator().empty_cache() + get_accelerator().reset_peak_memory_stats() + + +def see_memory_usage(message, force=False): + if not force: + return + if dist.is_initialized() and not dist.get_rank() == 0: + return + + # python doesn't do real-time garbage collection so do it explicitly to get the correct RAM reports + gc.collect() + + # Print message except when distributed but not rank 0 + logger.info(message) + logger.info(f"MA {round(get_accelerator().memory_allocated() / (1024 * 1024 * 1024),2 )} GB \ + Max_MA {round(get_accelerator().max_memory_allocated() / (1024 * 1024 * 1024),2)} GB \ + CA {round(torch_memory_reserved() / (1024 * 1024 * 1024),2)} GB \ + Max_CA {round(torch_max_memory_reserved() / (1024 * 1024 * 1024))} GB ") + + vm_stats = psutil.virtual_memory() + used_GB = round(((vm_stats.total - vm_stats.available) / (1024**3)), 2) + logger.info(f'CPU Virtual Memory: used = {used_GB} GB, percent = {vm_stats.percent}%') + + # get the peak memory to report correct data, so reset the counter for the next call + get_accelerator().reset_peak_memory_stats() + + +def call_to_str(base, *args, **kwargs): + """Construct a string representation of a call. + + Args: + base (str): name of the call + args (tuple, optional): args to ``base`` + kwargs (dict, optional): kwargs supplied to ``base`` + + Returns: + str: A string representation of base(*args, **kwargs) + """ + name = f'{base}(' + if args: + name += ', '.join(repr(arg) for arg in args) + if kwargs: + name += ', ' + if kwargs: + name += ', '.join(f'{key}={repr(arg)}' for key, arg in kwargs.items()) + name += ')' + return name + + +def get_only_unique_item(items): + item_set = set(items) + if len(item_set) != 1: + raise RuntimeError(f"expected there to be only one unique element in {items}") + unique_item, = item_set + + return unique_item + + +def get_global_norm_of_tensors(input_tensors, norm_type=2, mpu=None, use_graph=False, moe_ep_group=None): + """Get norm of an iterable of tensors. + + This is adapted from torch.nn.utils.clip_grad.clip_grad_norm_ and + added functionality to handle model parallel parameters. Taken from Nvidia Megatron. + + Arguments: + input_tensors (Iterable[Tensor]): an iterable of Tensors will have norm computed + norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for + infinity norm. + + Returns: + Total norm of the tensors (viewed as a single vector). + """ + assert isinstance(input_tensors, Iterable), f'expected Iterable type not {type(input_tensors)}' + assert all([torch.is_tensor(t) for t in input_tensors]), f'expected list of only tensors' + + norm_type = float(norm_type) + all_norms = [] + if norm_type == inf: + for t in input_tensors: + all_norms.append(t.data.abs().max().float()) + total_norm = torch.stack(all_norms).max() + device_total_norm = total_norm.to(get_accelerator().current_device_name()) + # Max across model parallel + if mpu is not None: + # For MoE grads, max over model parallel only if MoE-TP is enabled + if moe_ep_group is None or groups._get_expert_model_parallel_world_size() > 1: + dist.all_reduce(device_total_norm, op=dist.ReduceOp.MAX, group=mpu.get_model_parallel_group()) + # If MoE grads and MoE-TP disabled, max over pipeline parallel + elif bwc_pipeline_parallel_world_size(mpu) > 1: + dist.all_reduce(device_total_norm, op=dist.ReduceOp.MAX, group=bwc_pipeline_parallel_group(mpu)) + + # MoE grads: max across expert parallel group + if moe_ep_group is not None: + dist.all_reduce(device_total_norm, op=dist.ReduceOp.MAX, group=moe_ep_group) + total_norm = device_total_norm.to(input_tensors[0].device) + else: + + if 'norm_tensors_compute_buffer' not in graph_cache or len( + graph_cache['norm_tensors_compute_buffer']) != len(input_tensors): + graph_cache['norm_tensors_compute_buffer'] = [ + torch.empty([], dtype=torch.float, device=get_accelerator().current_device_name()) + for t in input_tensors + ] + compute_buffer = graph_cache['norm_tensors_compute_buffer'] + + def _norm_tensors(tensor_list, _compute_buffer, _norm_type): + for i, t in enumerate(tensor_list): + _compute_buffer[i].data.copy_(t.data.float().norm(_norm_type)**_norm_type) + if i != 0: + _compute_buffer[0].data.add_(_compute_buffer[i].data) + + if use_graph: + graph_process(False, _norm_tensors, input_tensors, compute_buffer, norm_type) + else: + _norm_tensors(input_tensors, compute_buffer, norm_type) + + device_total_norm = compute_buffer[0].float().detach() + + # Sum across model parallel + if mpu is not None: + # For MoE grads, sum over model parallel only if MoE-TP is enabled + if moe_ep_group is None or groups._get_expert_model_parallel_world_size() > 1: + dist.all_reduce(device_total_norm, op=dist.ReduceOp.SUM, group=mpu.get_model_parallel_group()) + # If MoE grads and MoE-TP disabled, sum over pipeline parallel + elif bwc_pipeline_parallel_world_size(mpu) > 1: + dist.all_reduce(device_total_norm, op=dist.ReduceOp.SUM, group=bwc_pipeline_parallel_group(mpu)) + + # MoE grads: sum across expert parallel group + if moe_ep_group is not None: + dist.all_reduce(device_total_norm, op=dist.ReduceOp.SUM, group=moe_ep_group) + total_norm = device_total_norm.to(input_tensors[0].device).pow(1. / norm_type) + + inf_or_nan = total_norm.isinf().logical_or(total_norm.isnan()) + total_norm.masked_fill_(inf_or_nan, -1) + + return total_norm + + +def clip_tensors_by_global_norm(input_tensors, max_norm=1.0, global_norm=None, mpu=None, eps=1e-6, use_graph=False): + """Clip list of tensors by global norm. + Args: + input_tensors: List of tensors to be clipped + global_norm (float, optional): Precomputed norm. Defaults to None. + mpu (optional): model parallelism unit. Defaults to None. + eps (float, optional): epsilon value added to grad norm. Defaults to 1e-6 + Returns: + float: the global norm + """ + if global_norm is None: + global_norm = get_global_norm_of_tensors(input_tensors, mpu=mpu, use_graph=use_graph) + clip_coef = max_norm / (global_norm + eps) + if clip_coef < 1: + if use_graph: + + def clip_tensors(_tensor_list, _clip_coef_tensor): + for t in _tensor_list: + t.detach().mul_(_clip_coef_tensor) + + if 'clip_coef_tensor' not in graph_cache: + # Alloc memory + graph_cache['clip_coef_tensor'] = torch.tensor(clip_coef, + dtype=torch.float32).to(get_accelerator().device_name()) + clip_coef_tensor = graph_cache['clip_coef_tensor'] + clip_coef_tensor.copy_(torch.tensor(clip_coef, dtype=torch.float32)) + graph_process(False, clip_tensors, input_tensors, clip_coef_tensor) + + else: + for t in input_tensors: + t.detach().mul_(clip_coef) + return global_norm + + +def align_dense_tensors(tensor_list, alignment): + num_elements = sum(t.numel() for t in tensor_list) + remaining = num_elements % alignment + + if remaining: + elements_to_add = alignment - remaining + pad_tensor = torch.zeros(elements_to_add, device=tensor_list[0].device, dtype=tensor_list[0].dtype) + padded_tensor_list = tensor_list + [pad_tensor] + else: + padded_tensor_list = tensor_list + + return padded_tensor_list + + +def all_gather_into_tensor_dp_groups(groups_flat, partitioned_param_groups, dp_process_group): + for group_id, (group_flat, partitioned_params) in enumerate(zip(groups_flat, partitioned_param_groups)): + partition_id = dist.get_rank(group=dp_process_group[group_id]) + dp_world_size = dist.get_world_size(group=dp_process_group[group_id]) + if dp_world_size == 1: + # no groups share optimizer states + # pipeline parallel with bf16 will default call this even if dp size = 1. + continue + dist.all_gather_into_tensor(group_flat, partitioned_params[partition_id], dp_process_group[group_id]) + + +def all_gather_dp_groups(groups_flat, partitioned_param_groups, dp_process_group, start_alignment_factor, + allgather_bucket_size): + if dist.has_all_gather_into_tensor(): + return all_gather_into_tensor_dp_groups(groups_flat, partitioned_param_groups, dp_process_group) + + for group_id, partitioned_params in enumerate(partitioned_param_groups): + # Sequential AllGather Best of both worlds + partition_id = dist.get_rank(group=dp_process_group[group_id]) + dp_world_size = dist.get_world_size(group=dp_process_group[group_id]) + + if dp_world_size == 1: + # no groups share optimizer states + # pipeline parallel with bf16 will default call this even if dp size = 1. + continue + num_shards = max(1, partitioned_params[partition_id].numel() * dp_world_size // allgather_bucket_size) + + shard_size = partitioned_params[partition_id].numel() // num_shards + + # Enforce nccl/rccl alignment of start location of each shard + shard_size = shard_size - (shard_size % start_alignment_factor) + + num_elements = shard_size + + assert shard_size * num_shards <= partitioned_params[partition_id].numel() + + for shard_id in range(num_shards): + + if shard_id == (num_shards - 1): + num_elements = partitioned_params[partition_id].numel() - shard_id * shard_size + + shard_list = [] + for dp_id in range(dp_world_size): + curr_shard = partitioned_params[dp_id].narrow(0, shard_id * shard_size, num_elements).detach() + shard_list.append(curr_shard) + + dist.all_gather(shard_list, shard_list[partition_id], dp_process_group[group_id]) + + +class TLinear(torch.nn.Linear): + + def __init__(self, orig_layer, name=""): + self.name = name + super().__init__(orig_layer.weight.shape[1], orig_layer.weight.shape[0], bias=(orig_layer.bias is not None)) + self.weight.data = transpose(orig_layer.weight.data) + self.bias = orig_layer.bias + self._fwd_func = self._fwd_bias_add if self.bias is not None else self._fwd + + def _fwd(self, input): + return F.linear(input, self.weight) + + def _fwd_bias_add(self, input): + return F.linear(input, self.weight, bias=self.bias) + + def forward(self, input): + return self._fwd_func(input) + + +def get_inactive_params(param_list): + from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus + return [param for param in param_list if (hasattr(param, 'ds_id') and \ + param.ds_status == ZeroParamStatus.NOT_AVAILABLE)] + + +def get_norm_with_moe_layers(non_expert_norm, mpu, expert_tensors, norm_type=2): + """ Compute the global norm with MoE experts + + Inputs: + non_expert_norm (float) : the calculated norm of the non-expert params + expert_tensors (Dict[ep_name, List[Tensor]): Dictionary of expert group name to list of grad tensors + norm_type (int): the norm to use + + Returns: + if norm is (-/+) inf, returns -1 + otherwise the global norm (float) + """ + + def to_tensor(v): + return get_accelerator().FloatTensor(float(v)).detach() + + group_norms = [non_expert_norm] + for exp_name, tensors in expert_tensors.items(): + group_norm = get_global_norm_of_tensors(input_tensors=tensors, + mpu=mpu, + norm_type=norm_type, + use_graph=False, + moe_ep_group=groups._get_expert_parallel_group(exp_name)) + group_norms.append(group_norm) + + # check if all norms are valid + group_norms = torch.stack([to_tensor(norm) for norm in group_norms]) + if group_norms.eq(-1).any(): + return -1 + + # combine norms + if norm_type == inf: + total_norm = group_norms.max().item() + else: + total_norm = group_norms.pow(norm_type).sum() + total_norm = total_norm.item()**(1. / norm_type) + if total_norm == float('inf') or total_norm == -float('inf'): + total_norm = -1 + + return total_norm diff --git a/venv/lib/python3.10/site-packages/deepspeed/runtime/weight_quantizer.py b/venv/lib/python3.10/site-packages/deepspeed/runtime/weight_quantizer.py new file mode 100644 index 0000000000000000000000000000000000000000..95d737614e594ee200d13617654714d6ae9d26f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/runtime/weight_quantizer.py @@ -0,0 +1,153 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from ..module_inject.replace_policy import HFBertLayerPolicy, replace_policies +from deepspeed.accelerator import get_accelerator + + +class WeightQuantization(object): + + def __init__(self, mlp_extra_grouping=True, mp_size=1): + self.dense_scales = [] + self.qkv_scales = [] + self.mlp4hh_scales = [] + self.mlph4h_scales = [] + self.mlp_extra_grouping = mlp_extra_grouping + self.mp_size = mp_size + + def quantize_data(self, data, quantize_bits, groups, key=None): + data_groups = torch.split(data.float().view(-1), data.numel() // groups) + max_d = [max(g.max(), g.min().abs()) for g in data_groups] + data_scale = [float(1 << quantize_bits) / (2 * mx + 1e-5) for mx in max_d] + data_int = [(g * s) for g, s in zip(data_groups, data_scale)] + data_int = [ + di.round().clamp(-(1 << (quantize_bits - 1)), (((1 << (quantize_bits - 1)) - 1))) for di in data_int + ] + data_int = torch.cat(data_int).reshape(data.shape) + data_int = data_int.to(torch.int8) + data_scale = torch.cat([s.unsqueeze(0).unsqueeze(0) for s in data_scale]) + return data_int, data_scale + + def is_mlp(self, data, merge_count=1): + return ((self.mp_size *data.shape[0] * merge_count) / data.shape[1] == 4 or \ + (self.mp_size *data.shape[1] * merge_count) / data.shape[0] == 4) + + def is_qkv(self, data): + return ((self.mp_size * data.shape[0]) / data.shape[1] == 3 or \ + (self.mp_size * data.shape[1]) / data.shape[0] == 3) + + def Quantize(self, value_list, quantize_bits, groups, key, merge_dim=0): + if self.mlp_extra_grouping and self.is_mlp(value_list[0], merge_count=len(value_list)): + groups *= 2 + q_scale = [] + index = 0 + for data in value_list: + data_int, data_scale = self.quantize_data(data, quantize_bits, groups, key) + q_scale.append(data_scale) + value_list[index] = data_int + index += 1 + q_scale = (1 / + torch.cat(q_scale, dim=merge_dim).to(get_accelerator().current_device_name()).view(-1).unsqueeze(0)) + if "mlp.dense_4h_to_h.weight" in key: + self.mlp4hh_scales.append(q_scale) + elif "mlp.dense_h_to_4h.weight" in key: + self.mlph4h_scales.append(q_scale) + elif "attention.query_key_value.weight" in key: + self.qkv_scales.append(q_scale) + else: + self.dense_scales.append(q_scale) + return value_list + + def merge_layer_scales(self, layer_scales): + max_dim = max([s.shape[-1] for s in layer_scales]) + layer_scales = [ + torch.cat((s, torch.zeros((1, max_dim - s.shape[-1]), device=get_accelerator().current_device_name())), + dim=-1) if s.shape[-1] < max_dim else s for s in layer_scales + ] + return torch.cat(layer_scales).unsqueeze(0) + + def merge_scales(self): + all_scales = [] + for dense_scale, qkv_scale, m4hh_scale, mh4h_scale in \ + zip(self.dense_scales, self.qkv_scales, self.mlp4hh_scales, self.mlph4h_scales): + all_scales.append(self.merge_layer_scales([qkv_scale, dense_scale, mh4h_scale, m4hh_scale])) + return torch.cat(all_scales) + + def merge_scales_split(self, split_count): + all_scales = [[] for _ in range(split_count)] + for dense_scale, qkv_scale, m4hh_scale, mh4h_scale in \ + zip(self.dense_scales, self.qkv_scales, self.mlp4hh_scales, self.mlph4h_scales): + dense_scale = torch.split(dense_scale, dense_scale.numel() // split_count) + qkv_scale = torch.split(qkv_scale, qkv_scale.numel() // split_count) + m4hh_scale = torch.split(m4hh_scale, m4hh_scale.numel() // split_count) + mh4h_scale = torch.split(mh4h_scale, mh4h_scale.numel() // split_count) + for s in range(split_count): + all_scales[s].append( + torch.cat([ + torch.cat((qkv_scale[s], torch.zeros_like(qkv_scale[s])), dim=1), + torch.cat((dense_scale[s], torch.zeros_like(dense_scale[s])), dim=1), mh4h_scale[s], + m4hh_scale[s] + ]).unsqueeze(0)) + for scales_a in all_scales: + torch.cat(scales_a) + return all_scales + + def sd_quantize_megatron(self, sd, quantize_bits, groups): + keys = sd.keys() + for key in keys: + value_list = [sd[key]] + if "attention.dense.weight" in key or "mlp.dense_4h_to_h.weight" in key or \ + "mlp.dense_h_to_4h.weight" in key or "attention.query_key_value.weight" in key: + value_list = self.Quantize(value_list, quantize_bits, groups, key=key) + sd[key] = value_list[0] + + all_scales = self.merge_scales() + return sd, all_scales + + def model_quantize(self, model, quantize_policy, quantize_bits, groups): + all_scales = [] + + def quantize_fn(layer, policy_cls): + policy = policy_cls(layer) + + _, qkvw, _, dense_w, _, _ = policy.attention() + _, _h4h_w, _, _4hh_w, _ = policy.mlp() + keys = [qkvw, dense_w, _h4h_w, _4hh_w] + layer_scales = [] + + for key in range(len(keys)): + if self.mlp_extra_grouping and self.is_mlp(keys[key]): + data_quantized, data_scale = self.quantize_data(keys[key], quantize_bits, groups * 2) + elif policy_cls is HFBertLayerPolicy and self.is_qkv(keys[key]): + data_quantized, data_scale = self.quantize_data(keys[key], quantize_bits, groups * 3) + else: + data_quantized, data_scale = self.quantize_data(keys[key], quantize_bits, groups) + keys[key].copy_(data_quantized) + layer_scales.append((1 / data_scale.to(get_accelerator().current_device_name()).view(-1).unsqueeze(0))) + all_scales.append(self.merge_layer_scales(layer_scales)) + return layer + + def _quantize_module(model, policies): + for name, child in model.named_children(): + if child.__class__ in policies: + quantize_fn, replace_policy = policies[child.__class__] + setattr(model, name, quantize_fn(child, replace_policy)) + else: + _quantize_module(child, policies) + + return model + + policy = {} + if quantize_policy is not None: + for layer_name, replace_policy in quantize_policy.items(): + policy.update({layer_name: (quantize_fn, replace_policy)}) + else: + for plcy in replace_policies: + policy.update({plcy._orig_layer_class: (quantize_fn, plcy)}) + + quantized_module = _quantize_module(model, policy) + + return quantized_module, torch.cat(all_scales) diff --git a/venv/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 b/venv/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..47f28619b3aab72df5480d0140d9f43ea3859d30 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numpy.libs/libgfortran-040039e1.so.5.0.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14afb3129b1a8b50bc40a3b0820c7f1152ea9bc10121aab152943f7057472886 +size 2686065 diff --git a/venv/lib/python3.10/site-packages/numpy/core/_multiarray_umath.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/numpy/core/_multiarray_umath.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e544283ad4443a5efbd9e3cfed8bbb16eb31feba --- /dev/null +++ b/venv/lib/python3.10/site-packages/numpy/core/_multiarray_umath.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe5efe31c55326b072c8fb239a225819211826cb45cd3c74ed0af0030e70f3a1 +size 7426841 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/images/china.jpg b/venv/lib/python3.10/site-packages/sklearn/datasets/images/china.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9e885acfcf3f5a562290d081e204046372238233 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/images/china.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8378025ad2519d649d02e32bd98990db4ab572357d9f09841c2fbfbb4fefad29 +size 196653 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/api-v1-jd-42585.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/api-v1-jd-42585.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..2d2b568ee4692ad61c20ab051723efceb318b816 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/api-v1-jd-42585.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ccbf138e0663895f9cf511136bc6395c153f6238af2eacb6a367e86e15d1a71 +size 1492 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/api-v1-jdf-42585.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/api-v1-jdf-42585.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..9564b2e437ee328b195f6289af99be51032c64d0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_42585/api-v1-jdf-42585.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0985045a454c8186b4e690ebefb6cea1ef7c13292c98d50abda470a0ff3ad425 +size 312