diff --git a/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/backend.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/backend.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f076f2ea2fb1a96192a46c1a64c49d3c033933c Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/backend.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/ccl.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/ccl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d584a071065cbdd10b6d782aa5226c2f3a50adb3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/ccl.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc061af0f2946131bb4dadf9e00b6788ec919d08 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/torch.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/torch.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b31ee3497113fb63e325691b069a01a9239fdea6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/torch.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/linear/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/linear/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a27f1c3eaee7cfcd51fe2138b66fa90128cb655c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/linear/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .optimized_linear import OptimizedLinear +from .config import LoRAConfig, QuantizationConfig diff --git a/venv/lib/python3.10/site-packages/deepspeed/linear/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/linear/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4b83a11cd2a516c753d39ecebac8404c6c511ac Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/linear/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/linear/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/linear/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a3c57981e1905735f93d6e38da062a18e7834da4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/linear/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/linear/__pycache__/optimized_linear.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/linear/__pycache__/optimized_linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..945d510b04c46a5153d32dfb4cca446902351c2b Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/linear/__pycache__/optimized_linear.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/linear/__pycache__/quantization.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/linear/__pycache__/quantization.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a979205546c5930627f4a7195d82e6b51e7cf85 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/linear/__pycache__/quantization.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/linear/config.py b/venv/lib/python3.10/site-packages/deepspeed/linear/config.py new file mode 100644 index 0000000000000000000000000000000000000000..ae9050a3c92b0fb28badc63b6d9e319a3011df62 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/linear/config.py @@ -0,0 +1,39 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from dataclasses import dataclass + + +@dataclass +class LoRAConfig: + """ + Configuration settings for LoRAOptimizedLinear. + + Attributes: + lora_r (int): LoRA attention dimension, also know as the rank. Defaults is 64. + lora_alpha (float): LoRA scaling factor, default is 16. + base_weight_sharding (int): The degree to which the base weights are sharded, + should typically be set to the data-parallel world size to maximize the memory + reduction benefits. Defaults to 1, which means this feature is disabled. + """ + lora_r: int = 64 + lora_alpha: float = 16. + base_weight_sharding: int = 1 + + +@dataclass +class QuantizationConfig: + """ + Configuration settings for quantization for LoRAOptimizedLinear, QuantizedLinear, + and QuantizedParameter + + Attributes: + q_bits (int): The number of bits used for quantization. Default is 8. + mantissa_bits (int): The number of bits reserved for the mantissa in fixed-point quantization. Default is 3. + group_size (int): The size of the group used for quantization. Default is 512. + """ + q_bits: int = 8 + mantissa_bits: int = 3 + group_size: int = 512 diff --git a/venv/lib/python3.10/site-packages/deepspeed/linear/optimized_linear.py b/venv/lib/python3.10/site-packages/deepspeed/linear/optimized_linear.py new file mode 100644 index 0000000000000000000000000000000000000000..138bd493ffc78937a2224ee55bb9e4d6d1f2a404 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/linear/optimized_linear.py @@ -0,0 +1,150 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import math +import torch.nn as nn +import torch.nn.functional as F +from dataclasses import is_dataclass +from deepspeed.accelerator import get_accelerator +import deepspeed.comm as dist + +from .config import LoRAConfig, QuantizationConfig +from .quantization import QuantizedParameter, QuantizedLinear + + +class OptimizedLinear(nn.Module): + """ + Optimized version of nn.Linear that adds features such as: + * LoRA w. base weight sharding + * FP [6,8,12] quantization + + Arguments: + input_dim: Required: size of each input sample + output_dim: Required: size of each output sample + bias: Optional: If set to False, the layer will not learn an additive bias. Default: False + lora_config: Optional: LoRAConfig defining lora features and base-weight-sharding degree + quantization_config: Optional: QuantizationConfig defining quantization features + dtype: Optional: parameter dtype, only supports bfloat16 currently + + Returns: + Returns a new nn.Module depending on the input config. Either native + torch.nn.Linear, QuantizedLinear, or the full-featured DSOptimizedLinear. + """ + + def __new__(self, + input_dim: int, + output_dim: int, + bias: bool = False, + lora_config: LoRAConfig = None, + quantization_config: QuantizationConfig = None, + dtype=torch.bfloat16): + + if quantization_config is not None and not is_dataclass(quantization_config): + raise ValueError(f"Expecting QuantizationConfig but received {type(quantization_config)}") + if lora_config is not None and not is_dataclass(lora_config): + raise ValueError(f"Expecting LoRAConfig but received {type(lora_config)}") + if lora_config is None and quantization_config is None: + # Everything disabled, fall back to normal nn.Linear + self = nn.Linear(input_dim, output_dim, bias=bias, dtype=dtype) + + elif lora_config: + # lora enabled, quantization may or may not be + self = LoRAOptimizedLinear(input_dim=input_dim, + output_dim=output_dim, + bias=bias, + lora_config=lora_config, + quantization_config=quantization_config, + dtype=dtype) + + elif quantization_config: + # only quantization enabled, no lora + self = QuantizedLinear(input_dim=input_dim, + output_dim=output_dim, + bias=bias, + quantization_config=quantization_config, + dtype=dtype) + return self + + +class LoRAOptimizedLinear(nn.Module): + + def __init__(self, + input_dim: int, + output_dim: int, + bias: bool = False, + lora_config: LoRAConfig = None, + quantization_config: QuantizationConfig = None, + device=None, + dtype=torch.bfloat16): + super().__init__() + self.input_dim = input_dim + self.output_dim = output_dim + self.bias = bias + self.lora_config = lora_config + self.quantization_config = quantization_config + device = get_accelerator().current_device() if device is None else device + assert self.lora_config is not None, "DSOptimizedLinear requires a LoRA config" + + self.zero_shards = self.lora_config.base_weight_sharding + self.sharded_weight_size = int(float(self.input_dim) // self.zero_shards) + w = torch.nn.Parameter(torch.empty((self.output_dim, self.sharded_weight_size), dtype=dtype)) + torch.nn.init.xavier_uniform_(w) + + if self.quantization_config is not None: + assert dtype == torch.bfloat16, "only bfloat16 is supported when using quantization" + self.base_weight = QuantizedParameter(w, quantization_config=quantization_config) + else: + self.base_weight = w + + self.base_weight.requires_grad = False + + # Use RS lora for now. + self.lora_scaling_factor = self.lora_config.lora_alpha / math.sqrt(self.lora_config.lora_r) + # Keeping lora weights in bf16 precision for ease of training. + self.lora_weight_1 = nn.Linear(self.input_dim, + self.lora_config.lora_r, + bias=self.bias, + device=device, + dtype=dtype) + self.lora_weight_2 = nn.Linear(self.lora_config.lora_r, + self.output_dim, + bias=self.bias, + device=device, + dtype=dtype) + self.lora_weight_1.weight.requires_grad = True + self.lora_weight_2.weight.requires_grad = True + + def full_weight(self): + # This assumes weights are evenly sharded across gpus. which might not be correct. + # in that case, we should flatten before all_gather. + local_weight = self.base_weight.dequantized() if isinstance(self.base_weight, + QuantizedParameter) else self.base_weight + tensor_list = [ + torch.zeros_like(local_weight, device=local_weight.device, dtype=local_weight.dtype) + for _ in range(self.zero_shards) + ] + dist.all_gather(tensor_list, local_weight) + weight = nn.Parameter(torch.cat([tensor for tensor in tensor_list], dim=1)) + return weight + + def linear_without_F_linear(self, input, weight): + output = torch.mm(input.reshape(-1, input.shape[-1]), weight) + output = output.view(*input.shape[:-1], weight.shape[1]) + return output + + def forward(self, input_tensor): + # Gather the sharded base weight + if self.zero_shards > 1: + with torch.no_grad(): + base_weight = self.full_weight() + elif self.quantization_config: + base_weight = self.base_weight.dequantized() + else: + base_weight = self.base_weight + + base_weight_output = F.linear(input_tensor, base_weight) + lora_output = self.lora_weight_2(self.lora_weight_1(input_tensor)) + return base_weight_output + self.lora_scaling_factor * lora_output diff --git a/venv/lib/python3.10/site-packages/deepspeed/linear/quantization.py b/venv/lib/python3.10/site-packages/deepspeed/linear/quantization.py new file mode 100644 index 0000000000000000000000000000000000000000..f5343af45fb8987f045d8874f45959be6fab8cc3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/linear/quantization.py @@ -0,0 +1,137 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import copy +import torch +import torch.nn as nn +import torch.nn.functional as F + +from typing import Optional + +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.fp_quantizer import Quantizer, FP_Quantize +from .config import QuantizationConfig + + +class QuantizedParameter(nn.Parameter): + """ + Quantized parameter class that implements weight quantization. Weights + are stored in quantized form on GPUs, and can be dequantized on-the-fly when + needed by the model. The weights are actually quantized during any `.to(device)`. + + Arguments: + data (Tensor): parameter tensor. + requires_grad (bool, optional): if the parameter requires gradient. Defaults + to False and is not supported to be True. Argument provided only for interface + compatibility with torch.nn.Parameter. + quantization_config (QuantizationConfig, optional): + quantizer (Quantizer, optional): Defaults to FP_Quantize but can be any quantizer + that implements deepspeed.ops.fp_quantizer.Quantizer. This argument is also + required since the quantizer is stashed in the Parameter itself, some models + may clone the Parameter by passing an attribute __dict__. For an example, see + tests/unit/linear/test_quant_param.py::TestQuantParam::test_hf_clone + """ + + def __new__( + cls, + data: Optional[torch.Tensor] = None, + requires_grad: bool = False, # quantized weights must be frozen + quantization_config: QuantizationConfig = None, + quantizer: Quantizer = None, + ): + if requires_grad: + raise ValueError(f"requires_grad=True is not supported with QuantizedParameter") + if data is None: + data = torch.empty(0) + self = torch.Tensor._make_subclass(cls, data, requires_grad) + self.quantization_config = QuantizationConfig() if quantization_config is None else quantization_config + if quantizer is not None: + self.quantizer = quantizer + else: + # if FPQuantizerBuilder is not compatible in this env this init will fail + self.quantizer = FP_Quantize(group_size=self.quantization_config.group_size) + self._ensure_quantized(self) + return self + + def _ensure_quantized(self, tensor: torch.Tensor): + # If the tensor is on the accelerator and is not quantized, then quantize it in-place. + if get_accelerator().on_accelerator(tensor) and tensor.dtype != torch.int8: + with get_accelerator().stream(get_accelerator().current_stream(tensor.device)): + tensor.data = self.quantizer.quantize(tensor.data, + q_bits=self.quantization_config.q_bits, + q_mantisa_bits=self.quantization_config.mantissa_bits) + assert tensor.dtype == torch.int8 + + def dequantized(self) -> torch.Tensor: + """ + Return a tensor containing the dequantized weights of this parameter. + """ + if get_accelerator().on_accelerator(self.data) and self.data.dtype == torch.int8: + with get_accelerator().stream(get_accelerator().current_stream(self.data.device)): + return self.quantizer.dequantize(self.data, + q_bits=self.quantization_config.q_bits, + q_mantisa_bits=self.quantization_config.mantissa_bits) + return self.data + + def __getstate__(self): + state = self.__dict__ + state["data"] = self.data + state["quantization_config"] = self.quantization_config + state["requires_grad"] = self.requires_grad + return state + + def __setstate__(self, state): + self.quantizer = state["quantizer"] + self.quantization_config = state["quantization_config"] + self.data = state["data"] + self.requires_grad = state["requires_grad"] + + def __deepcopy__(self, memo): + new_instance = type(self).__new__(type(self)) + state = self.__getstate__() + new_instance.__setstate__(state) + new_instance.quantizer = copy.deepcopy(state["quantizer"]) + new_instance.quantization_config = copy.deepcopy(state["quantization_config"]) + new_instance.data = copy.deepcopy(state["data"]) + return new_instance + + def __copy__(self): + new_instance = type(self).__new__(type(self)) + state = self.__getstate__() + new_instance.__setstate__(state) + return new_instance + + def cuda(self, device=None, non_blocking=False): + return self.to(device="cuda" if device is None else device, non_blocking=non_blocking) + + def to(self, *args, **kwargs): + """ + Move the parameter to the given device. Then, if the device is a cuda device, + quantize it. + """ + tensor = super().to(*args, **kwargs) + self._ensure_quantized(tensor) + return tensor + + +class QuantizedLinear(nn.Linear): + """ + Linear layer that implements weight quantization. Parameters + are stored via `QuantizedParameter` and are dequantized on-the-fly during any + forward pass. + """ + + def __init__(self, + input_dim: int, + output_dim: int, + bias: bool = False, + quantization_config: QuantizationConfig = None, + dtype=torch.bfloat16): + super().__init__(input_dim, output_dim, bias=bias, dtype=dtype) + assert dtype == torch.bfloat16, "currently only supports bfloat16 dtype" + self.weight = QuantizedParameter(self.weight.data, quantization_config=quantization_config) + + def forward(self, input: torch.Tensor) -> torch.Tensor: + return F.linear(input, self.weight.dequantized(), self.bias) diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ad95c58f76090188b5d9c73e1e3df4eb2b2c678f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .transformers.ds_transformer import DeepSpeedTransformerInference +from .transformers.clip_encoder import DSClipEncoder diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5a77d74ae6402f6261920c4ab2bbf3152777314 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5067f71c8faf166bc78e88f9b62e8627dda7c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fec0b3d0f981de022e5ab05df7f0bfd392464c60 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/__pycache__/unet.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/__pycache__/unet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbf1c269427777687a489133ec8f67bbe95dcca3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/__pycache__/unet.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/__pycache__/vae.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/__pycache__/vae.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c205a1f62f06da780e277b7c6c92362e9ff5f92 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/__pycache__/vae.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/unet.py b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/unet.py new file mode 100644 index 0000000000000000000000000000000000000000..8d5ddd95437a42de5cb8552c529d20e422c792f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/unet.py @@ -0,0 +1,81 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from deepspeed.accelerator import get_accelerator +from ..features.cuda_graph import CUDAGraph + + +class DSUNet(CUDAGraph, torch.nn.Module): + + def __init__(self, unet, enable_cuda_graph=True): + super().__init__(enable_cuda_graph=enable_cuda_graph) + self.unet = unet + # SD pipeline accesses this attribute + self.in_channels = unet.in_channels + self.device = self.unet.device + self.dtype = self.unet.dtype + self.config = self.unet.config + self.fwd_count = 0 + self.unet.requires_grad_(requires_grad=False) + self.unet.to(memory_format=torch.channels_last) + self.cuda_graph_created = False + + def _graph_replay(self, *inputs, **kwargs): + for i in range(len(inputs)): + if torch.is_tensor(inputs[i]): + self.static_inputs[i].copy_(inputs[i]) + for k in kwargs: + if torch.is_tensor(kwargs[k]): + self.static_kwargs[k].copy_(kwargs[k]) + get_accelerator().replay_graph(self._cuda_graphs) + return self.static_output + + def forward(self, *inputs, **kwargs): + if self.enable_cuda_graph: + if self.cuda_graph_created: + outputs = self._graph_replay(*inputs, **kwargs) + else: + self._create_cuda_graph(*inputs, **kwargs) + outputs = self._graph_replay(*inputs, **kwargs) + return outputs + else: + return self._forward(*inputs, **kwargs) + + def _create_cuda_graph(self, *inputs, **kwargs): + # warmup to create the workspace and cublas handle + cuda_stream = torch.cuda.Stream() + cuda_stream.wait_stream(torch.cuda.current_stream()) + with torch.cuda.stream(cuda_stream): + for i in range(3): + ret = self._forward(*inputs, **kwargs) + torch.cuda.current_stream().wait_stream(cuda_stream) + + # create cuda_graph and assign static_inputs and static_outputs + self._cuda_graphs = get_accelerator().create_graph() + self.static_inputs = inputs + self.static_kwargs = kwargs + + with get_accelerator().capture_to_graph(self._cuda_graphs): + self.static_output = self._forward(*self.static_inputs, **self.static_kwargs) + + self.cuda_graph_created = True + + def _forward(self, + sample, + timestamp, + encoder_hidden_states, + return_dict=True, + cross_attention_kwargs=None, + timestep_cond=None, + added_cond_kwargs=None): + if cross_attention_kwargs: + return self.unet(sample, + timestamp, + encoder_hidden_states, + return_dict, + cross_attention_kwargs=cross_attention_kwargs) + else: + return self.unet(sample, timestamp, encoder_hidden_states, return_dict) diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/vae.py b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/vae.py new file mode 100644 index 0000000000000000000000000000000000000000..ce50ade647a8b85c1858e9d47e1f6a1133379467 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/vae.py @@ -0,0 +1,151 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from deepspeed.accelerator import get_accelerator +from ..features.cuda_graph import CUDAGraph + + +class DSVAE(CUDAGraph, torch.nn.Module): + + def __init__(self, vae, enable_cuda_graph=True): + super().__init__(enable_cuda_graph=enable_cuda_graph) + self.vae = vae + self.config = vae.config + self.device = self.vae.device + self.dtype = self.vae.dtype + self.vae.requires_grad_(requires_grad=False) + self.decoder_cuda_graph_created = False + self.encoder_cuda_graph_created = False + self.all_cuda_graph_created = False + + def _graph_replay_decoder(self, *inputs, **kwargs): + for i in range(len(inputs)): + if torch.is_tensor(inputs[i]): + self.static_decoder_inputs[i].copy_(inputs[i]) + for k in kwargs: + if torch.is_tensor(kwargs[k]): + self.static_decoder_kwargs[k].copy_(kwargs[k]) + get_accelerator().replay_graph(self._decoder_cuda_graph) + return self.static_decoder_output + + def _decode(self, x, return_dict=True, generator=None): + return self.vae.decode(x, return_dict=return_dict) + + def _create_cuda_graph_decoder(self, *inputs, **kwargs): + # warmup to create the workspace and cublas handle + cuda_stream = torch.cuda.Stream() + cuda_stream.wait_stream(torch.cuda.current_stream()) + with torch.cuda.stream(cuda_stream): + for i in range(3): + ret = self._decode(*inputs, **kwargs) + torch.cuda.current_stream().wait_stream(cuda_stream) + + # create cuda_graph and assign static_inputs and static_outputs + self._decoder_cuda_graph = get_accelerator().create_graph() + self.static_decoder_inputs = inputs + self.static_decoder_kwargs = kwargs + + with get_accelerator().capture_to_graph(self._decoder_cuda_graph): + self.static_decoder_output = self._decode(*self.static_decoder_inputs, **self.static_decoder_kwargs) + + self.decoder_cuda_graph_created = True + + def decode(self, *inputs, **kwargs): + if self.enable_cuda_graph: + if self.decoder_cuda_graph_created: + outputs = self._graph_replay_decoder(*inputs, **kwargs) + else: + self._create_cuda_graph_decoder(*inputs, **kwargs) + outputs = self._graph_replay_decoder(*inputs, **kwargs) + return outputs + else: + return self._decode(*inputs, **kwargs) + + def _graph_replay_encoder(self, *inputs, **kwargs): + for i in range(len(inputs)): + if torch.is_tensor(inputs[i]): + self.static_encoder_inputs[i].copy_(inputs[i]) + for k in kwargs: + if torch.is_tensor(kwargs[k]): + self.static_encoder_kwargs[k].copy_(kwargs[k]) + get_accelerator().replay_graph(self._encoder_cuda_graph) + return self.static_encoder_output + + def _encode(self, x, return_dict=True): + return self.vae.encode(x, return_dict=return_dict) + + def _create_cuda_graph_encoder(self, *inputs, **kwargs): + # warmup to create the workspace and cublas handle + cuda_stream = torch.cuda.Stream() + cuda_stream.wait_stream(torch.cuda.current_stream()) + with torch.cuda.stream(cuda_stream): + for i in range(3): + ret = self._encode(*inputs, **kwargs) + torch.cuda.current_stream().wait_stream(cuda_stream) + + # create cuda_graph and assign static_inputs and static_outputs + self._encoder_cuda_graph = get_accelerator().create_graph() + self.static_encoder_inputs = inputs + self.static_encoder_kwargs = kwargs + + with get_accelerator().capture_to_graph(self._encoder_cuda_graph): + self.static_encoder_output = self._encode(*self.static_encoder_inputs, **self.static_encoder_kwargs) + + self.encoder_cuda_graph_created = True + + def encode(self, *inputs, **kwargs): + if self.enable_cuda_graph: + if self.encoder_cuda_graph_created: + outputs = self._graph_replay_encoder(*inputs, **kwargs) + else: + self._create_cuda_graph_encoder(*inputs, **kwargs) + outputs = self._graph_replay_encoder(*inputs, **kwargs) + return outputs + else: + return self._encode(*inputs, **kwargs) + + def _graph_replay(self, *inputs, **kwargs): + for i in range(len(inputs)): + if torch.is_tensor(inputs[i]): + self.static_inputs[i].copy_(inputs[i]) + for k in kwargs: + if torch.is_tensor(kwargs[k]): + self.static_kwargs[k].copy_(kwargs[k]) + get_accelerator().replay_graph(self._all_cuda_graph) + return self.static_output + + def forward(self, *inputs, **kwargs): + if self.enable_cuda_graph: + if self.cuda_graph_created: + outputs = self._graph_replay(*inputs, **kwargs) + else: + self._create_cuda_graph(*inputs, **kwargs) + outputs = self._graph_replay(*inputs, **kwargs) + return outputs + else: + return self._forward(*inputs, **kwargs) + + def _create_cuda_graph(self, *inputs, **kwargs): + # warmup to create the workspace and cublas handle + cuda_stream = torch.cuda.Stream() + cuda_stream.wait_stream(torch.cuda.current_stream()) + with torch.cuda.stream(cuda_stream): + for i in range(3): + ret = self._forward(*inputs, **kwargs) + torch.cuda.current_stream().wait_stream(cuda_stream) + + # create cuda_graph and assign static_inputs and static_outputs + self._all_cuda_graph = get_accelerator().create_graph() + self.static_inputs = inputs + self.static_kwargs = kwargs + + with get_accelerator().capture_to_graph(self._all_cuda_graph): + self.static_output = self._forward(*self.static_inputs, **self.static_kwargs) + + self.all_cuda_graph_created = True + + def _forward(self, sample, timestamp, encoder_hidden_states, return_dict=True): + return self.vae(sample, timestamp, encoder_hidden_states, return_dict) diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/features/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/features/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5067f71c8faf166bc78e88f9b62e8627dda7c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/features/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/features/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/features/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de90235b29bb33d8cb6c49a276f25940357ca0a6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/features/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/features/__pycache__/cuda_graph.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/features/__pycache__/cuda_graph.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..147fe05b16ef0bc7f673e17ddd45bc4ec71dc8e8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/features/__pycache__/cuda_graph.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/features/cuda_graph.py b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/features/cuda_graph.py new file mode 100644 index 0000000000000000000000000000000000000000..fc3a2951665426b5c98112ce307e395993644f8b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/features/cuda_graph.py @@ -0,0 +1,27 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from abc import ABC, abstractmethod + + +class CUDAGraph(ABC): + + def __init__(self, enable_cuda_graph=False): + super().__init__() + self.enable_cuda_graph = enable_cuda_graph + + @abstractmethod + def _create_cuda_graph(self): + """ + Create CUDA graph(s) + """ + raise NotImplementedError + + @abstractmethod + def _graph_replay(self): + """ + Replay CUDA graph(s) + """ + raise NotImplementedError diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5067f71c8faf166bc78e88f9b62e8627dda7c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d589b208f02064281179a2465e74b15b12f46c17 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/clip_encoder.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/clip_encoder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bda0bc76bc38ab43f8c3168cfbe1947499ca6947 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/clip_encoder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa1cedc30ac94874a21503b12b4bf7288c94cfe6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_bert.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_bert.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a4dc1e0d0fa53963945b62f5c02810fba18907f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_bert.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_bloom.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_bloom.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9959befb48ba7f088aef46229e40ed552ec8aafc Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_bloom.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_gpt.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_gpt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cebcf850559144d0c92971e13fe388be1d28b7ee Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_gpt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_llama2.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_llama2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5a69a1334ae18fab9ab9229cf8068e8331b4671 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_llama2.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_megatron_gpt.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_megatron_gpt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12f51a8db21a4135113ab70c152485b9b61fe363 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_megatron_gpt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_opt.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_opt.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..446e26e979882fbefa35d6aaef54e4ff82e7c6cd Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_opt.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_transformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..917cf87ed9c72d47b5374fb26eab97adef0257d0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_transformer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/clip_encoder.py b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/clip_encoder.py new file mode 100644 index 0000000000000000000000000000000000000000..848a5b48dcf1336a4c80c20a9e21388cdff9e514 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/clip_encoder.py @@ -0,0 +1,77 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from deepspeed.accelerator import get_accelerator +from ..features.cuda_graph import CUDAGraph + + +class DSClipEncoder(CUDAGraph, torch.nn.Module): + + def __init__(self, enc, enable_cuda_graph=False): + super().__init__(enable_cuda_graph=enable_cuda_graph) + enc.text_model._build_causal_attention_mask = self._build_causal_attention_mask + self.enc = enc + self.device = self.enc.device + self.dtype = self.enc.dtype + self.cuda_graph_created = [False, False] + self.static_inputs = [None, None] + self.static_kwargs = [None, None] + self.static_output = [None, None] + self._cuda_graphs = [None, None] + self.iter = 0 + self.config = self.enc.config + + def _build_causal_attention_mask(self, bsz, seq_len, dtype): + mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype, device=get_accelerator().current_device_name()) + mask.fill_(torch.tensor(torch.finfo(dtype).min)) + mask.triu_(1) + mask = mask.unsqueeze(1) + return mask + + def _graph_replay(self, *inputs, **kwargs): + for i in range(len(inputs)): + if torch.is_tensor(inputs[i]): + self.static_inputs[self.iter][i].copy_(inputs[i]) + for k in kwargs: + if torch.is_tensor(kwargs[k]): + self.static_kwargs[self.iter][k].copy_(kwargs[k]) + get_accelerator().replay_graph(self._cuda_graphs[self.iter]) + return self.static_output[self.iter] + + def forward(self, *inputs, **kwargs): + if self.enable_cuda_graph: + if self.cuda_graph_created[self.iter]: + outputs = self._graph_replay(*inputs, **kwargs) + else: + self._create_cuda_graph(*inputs, **kwargs) + outputs = self._graph_replay(*inputs, **kwargs) + self.iter = (self.iter + 1) % 2 + return outputs + else: + return self.enc(*inputs, **kwargs) + + def _create_cuda_graph(self, *inputs, **kwargs): + # warmup to create the workspace and cublas handle + cuda_stream = torch.cuda.Stream() + cuda_stream.wait_stream(torch.cuda.current_stream()) + with torch.cuda.stream(cuda_stream): + for i in range(3): + ret = self._forward(*inputs, **kwargs) + torch.cuda.current_stream().wait_stream(cuda_stream) + + # create cuda_graph and assign static_inputs and static_outputs + self._cuda_graphs[self.iter] = get_accelerator().create_graph() + self.static_inputs[self.iter] = inputs + self.static_kwargs[self.iter] = kwargs + + with get_accelerator().capture_to_graph(self._cuda_graphs[self.iter]): + self.static_output[self.iter] = self._forward(*self.static_inputs[self.iter], + **self.static_kwargs[self.iter]) + + self.cuda_graph_created[self.iter] = True + + def _forward(self, *inputs, **kwargs): + return self.enc(*inputs, **kwargs) diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_base.py b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_base.py new file mode 100644 index 0000000000000000000000000000000000000000..b4b113904997fd124aecdfe51529aab6c9e1b25c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_base.py @@ -0,0 +1,15 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch.nn as nn + + +class DeepSpeedTransformerBase(nn.module): + + def __init__(self): + pass + + # this would be the new clean base class that will replace DeepSpeedTransformerInference. + # we currently don't know how this will look like but keeping it here as a placeholder. diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_bert.py b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_bert.py new file mode 100644 index 0000000000000000000000000000000000000000..13075553ec8b6d818744cc5404ef4db31283ac8e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_bert.py @@ -0,0 +1,20 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference + + +class DeepSpeedBERTInference(DeepSpeedTransformerInference): + """Initialize the DeepSpeed BERT Transformer Layer. + """ + + def __init__(self, + config, + mp_group=None, + quantize_scales=None, + quantize_groups=1, + merge_count=1, + mlp_extra_grouping=False): + super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping) diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_bloom.py b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_bloom.py new file mode 100644 index 0000000000000000000000000000000000000000..c48c7ed58ae5e2746c513a1acea509bbfd129784 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_bloom.py @@ -0,0 +1,20 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference + + +class DeepSpeedBloomInference(DeepSpeedTransformerInference): + """Initialize the DeepSpeed Bloom Transformer Layer. + """ + + def __init__(self, + config, + mp_group=None, + quantize_scales=None, + quantize_groups=1, + merge_count=1, + mlp_extra_grouping=False): + super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping) diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_gpt.py b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_gpt.py new file mode 100644 index 0000000000000000000000000000000000000000..3c3baed1f6186a89ed2b852f27ad56a5ab00d9f6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_gpt.py @@ -0,0 +1,20 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference + + +class DeepSpeedGPTInference(DeepSpeedTransformerInference): + """Initialize the DeepSpeed GPT Transformer Layer. + """ + + def __init__(self, + config, + mp_group=None, + quantize_scales=None, + quantize_groups=1, + merge_count=1, + mlp_extra_grouping=False): + super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping) diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_llama2.py b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_llama2.py new file mode 100644 index 0000000000000000000000000000000000000000..7d9eb4113a8a52103a0b0aebeadd74741507fabd --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_llama2.py @@ -0,0 +1,69 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from deepspeed import comm as dist +from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference + +inference_module = None + + +class DeepSpeedLlama2Inference(DeepSpeedTransformerInference): + """Initialize the DeepSpeed OPT Transformer Layer. + """ + + def __init__(self, + config, + mp_group=None, + quantize_scales=None, + quantize_groups=1, + merge_count=1, + mlp_extra_grouping=False): + super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping) + + def forward(self, *args, **kwargs): + + input = args[0] + input_mask = None + # Allocate memory only on first layer forward + if self.config.layer_id == 0 and self._alloc_workspace: + self.allocate_workspace(self.config.hidden_size, self.config.heads, + input.size()[1], + input.size()[0], DeepSpeedTransformerInference.layer_id, self.config.mp_size, + self.config.bigscience_bloom, + dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens, + self.config.min_out_tokens) + self._alloc_workspace = False + + get_present = True + + # We set the prev key/value to None when there is a prompt + if input.shape[1] > 1: + self.layer_past = None + layer_past = self.layer_past + + input_type = input.dtype + + if (self.config.dtype in [torch.float16, torch.bfloat16, torch.int8]) \ + and input.dtype == torch.float: + target_dtype = torch.half if self.dtype == torch.int8 else self.dtype + input = input.to(target_dtype) + + with torch.no_grad(): + attention_output, key, value, context_outputtn_ctx, inp_norm = \ + self.attention(input, + input_mask, + None, + layer_past, + get_present, + None, None, None, + self.norm_w, + self.norm_b, + None) + self.layer_past = (key, value) + output = self.mlp(attention_output, input, inp_norm, self.attention.attn_ob) + + output = output.to(input_type) + return output diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_megatron_gpt.py b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_megatron_gpt.py new file mode 100644 index 0000000000000000000000000000000000000000..055ed6d27d7db4cb39cf868d5c189ef517f4a08a --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_megatron_gpt.py @@ -0,0 +1,20 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference + + +class DeepSpeedMegatronGPTInference(DeepSpeedTransformerInference): + """Initialize the DeepSpeed Megatron GPT Transformer Layer. + """ + + def __init__(self, + config, + mp_group=None, + quantize_scales=None, + quantize_groups=1, + merge_count=1, + mlp_extra_grouping=False): + super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping) diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_opt.py b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_opt.py new file mode 100644 index 0000000000000000000000000000000000000000..7bc5524d71c7155fad0b40eafeca490705f3e324 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_opt.py @@ -0,0 +1,20 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference + + +class DeepSpeedOPTInference(DeepSpeedTransformerInference): + """Initialize the DeepSpeed OPT Transformer Layer. + """ + + def __init__(self, + config, + mp_group=None, + quantize_scales=None, + quantize_groups=1, + merge_count=1, + mlp_extra_grouping=False): + super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping) diff --git a/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_transformer.py b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..d87d0de997b5155449388dac7c0d2a116b676e91 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_transformer.py @@ -0,0 +1,199 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import torch.nn as nn +from deepspeed import comm as dist +from deepspeed.utils.logging import log_dist + +from deepspeed.ops.transformer.inference.ds_mlp import DeepSpeedMLP +from deepspeed.ops.transformer.inference.ds_attention import DeepSpeedSelfAttention, BloomSelfAttention +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import InferenceBuilder +import deepspeed +if deepspeed.HAS_TRITON: + from deepspeed.ops.transformer.inference.triton.mlp import TritonMLP + from deepspeed.ops.transformer.inference.triton.attention import TritonSelfAttention + +inference_module = None + + +class DeepSpeedTransformerInference(nn.Module): + """Initialize the DeepSpeed Transformer Layer. + Arguments: + layer_id: The layer index starting from 0, e.g. if model has 24 transformer layers, + layer_id will be 0,1,2...23 when each layer object is instantiated + config: An object of DeepSpeedInferenceConfig + mp_group: Model parallelism group initialized on the modeling side. + quantize_scales: This argument groups all the layers' scales used for quantization + quantize_groups: Number of groups used for quantizing the model + merge_count: Shows the number of model-parallel checkpoints merged before running inference. + We use this argument to control the quantization scale for the model parameters if a bigger + quantize-grouping than 1 is used. + mlp_extra_grouping: This flag is used to show a 2x higher number of groups used for the MLP part + of a Transformer layer. We use this feature for quantization to reduce the convergence impact + for specific downstream tasks. + """ + layer_id = 0 + + def __init__(self, + config, + mp_group=None, + quantize_scales=None, + quantize_groups=1, + merge_count=1, + mlp_extra_grouping=False): + super(DeepSpeedTransformerInference, self).__init__() + + self.config = config + self.config.layer_id = DeepSpeedTransformerInference.layer_id + DeepSpeedTransformerInference.layer_id += 1 + + data_type = torch.half if self.config.dtype == torch.int8 else self.config.dtype + global inference_module + if inference_module is None: + builder = InferenceBuilder() + inference_module = builder.load() + + if DeepSpeedTransformerInference.layer_id == 1: + log_dist(f"DeepSpeed-Inference config: {self.config.__dict__}", [0]) + if deepspeed.HAS_TRITON and self.config.use_triton: + log_dist(f"Injecting Triton kernels ...", [0]) + + if self.config.bigscience_bloom: + self.attention = BloomSelfAttention(self.config, mp_group, quantize_scales, quantize_groups, merge_count) + assert not self.config.use_triton + else: + if deepspeed.HAS_TRITON and self.config.use_triton: + self.attention = TritonSelfAttention(self.config) + else: + self.attention = DeepSpeedSelfAttention(self.config, mp_group, quantize_scales, quantize_groups, + merge_count) + + if deepspeed.HAS_TRITON and self.config.use_triton: + self.mlp = TritonMLP(self.config) + else: + self.mlp = DeepSpeedMLP(self.config, mp_group, quantize_scales, quantize_groups, merge_count, + mlp_extra_grouping) + + device = get_accelerator().current_device_name() # if config.bigscience_bloom else 'cpu' + if self.config.set_empty_params: + self.norm_w = None + self.norm_b = None + else: + self.norm_w = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type, device=device), + requires_grad=False) + self.norm_b = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type, device=device), + requires_grad=False) + self.layer_past = None + try: + if config.dtype == torch.float32: + self.allocate_workspace = inference_module.allocate_workspace_fp32 + elif config.dtype == torch.bfloat16: + self.allocate_workspace = inference_module.allocate_workspace_bf16 + else: + self.allocate_workspace = inference_module.allocate_workspace_fp32 + self._alloc_workspace = True + except AttributeError: + self.allocate_workspace = None + self._alloc_workspace = False + + @classmethod + def reset_cache(cls): + if inference_module is not None: + inference_module.reset_cache() + + def forward( + self, + input=None, + input_mask=None, + attention_mask=None, + attn_mask=None, + head_mask=None, + layer_past=None, + get_key_value=False, + get_present=False, + encoder_output=None, + enc_dec_attn_mask=None, + x=None, + encoder_hidden_states=None, + encoder_attention_mask=None, + use_cache=False, + alibi=None, + output_attentions=False, + # TODO(arashb): 'layer_head_mask' and 'past_key_value' are only added to satisfy the OPT models API. + # This needs to be redesigned later! + layer_head_mask=None, + past_key_value=None, + **kwargs): + + if x is not None: + input = x + if "hidden_states" in kwargs: + input = kwargs["hidden_states"] + + input_mask = (input_mask if attn_mask is None else attn_mask) if attention_mask is None else attention_mask + + # Allocate memory only on first layer forward + if self.config.layer_id == 0 and self._alloc_workspace: + self.allocate_workspace(self.config.hidden_size, self.config.heads, + input.size()[1], + input.size()[0], DeepSpeedTransformerInference.layer_id, self.config.mp_size, + self.config.bigscience_bloom, + dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens, + self.config.min_out_tokens) + self._alloc_workspace = False + + get_present = (get_present or get_key_value or use_cache) + input_mask = input_mask if attention_mask is None else attention_mask + + # We set the prev key/value to None when there is a prompt + if input.shape[1] > 1: + self.layer_past = None + layer_past = layer_past if layer_past is not None else self.layer_past + head_mask = layer_head_mask if layer_head_mask is not None else head_mask + + attn_mask = None + if isinstance(input, tuple): + attn_mask = input[1] + input = input[0] + input_type = input.dtype + + if (self.config.dtype in [torch.float16, torch.bfloat16, torch.int8]) \ + and input.dtype == torch.float: + target_dtype = torch.half if self.config.dtype == torch.int8 else self.config.dtype + input = input.to(target_dtype) + + with torch.no_grad(): + attention_output, key, value, context_outputtn_ctx, inp_norm = \ + self.attention(input, + input_mask, + head_mask, + layer_past, + get_present, + encoder_hidden_states, + encoder_attention_mask, + output_attentions, + self.norm_w, + self.norm_b, + alibi) + + presents = (key, value) + self.layer_past = presents if layer_past is None else None + output = self.mlp(attention_output, input, inp_norm, self.attention.attn_ob) + + if not self.config.pre_layer_norm: + output = inference_module.layer_norm(output, self.norm_w, self.norm_b, self.config.epsilon) + + output = output.to(input_type) + if get_present: + output = (output, presents) + + if self.config.return_single_tuple: + return (output, ) + elif self.config.return_tuple: + return output if type(output) is tuple else (output, attn_mask) + else: + return output diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4bdabf383b26ee23b286baa4387ff932b80d1614 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__init__.py @@ -0,0 +1,10 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .replace_module import replace_transformer_layer, revert_transformer_layer, ReplaceWithTensorSlicing, GroupQuantizer, generic_injection +from .module_quantize import quantize_transformer_layer +from .replace_policy import HFBertLayerPolicy +from .layers import LinearAllreduce, LinearLayer, EmbeddingLayer, Normalize +from .policy import DSPolicy diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/auto_tp_model_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/auto_tp_model_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fbfb34d1d4346c5ccf69e0bbd34127ac100f2ce5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/auto_tp_model_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/fusedqkv_utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/fusedqkv_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..359fcfc334622680320adf07331dde0db2319780 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/fusedqkv_utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/module_quantize.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/module_quantize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f351727cad513d68a7b0ac386cfd7720eb328321 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/module_quantize.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/auto_tp.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/auto_tp.py new file mode 100644 index 0000000000000000000000000000000000000000..88f7086518e8eabd95ee0e1f6fe779f5fc40335f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/auto_tp.py @@ -0,0 +1,491 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# Automatic Tensor Parallelism +import re + +from torch import nn +from .replace_policy import replace_policies +from typing import Optional +import torch +from deepspeed import comm as dist +from .layers import LinearAllreduce, LinearLayer, LmHeadLinearAllreduce +from deepspeed.accelerator import get_accelerator +from .fusedqkv_utils import require_tp_fused_qkvw, prepare_tp_fused_qkvw +from deepspeed.module_inject.tp_shard import get_shard_size, get_shard_size_list + + +def move(tensor, device): + if tensor.is_meta: + return torch.empty_like(tensor, device=device) + else: + # Using new tensors help in freeing memory (after split for example) was done before by calling clone(). + # Using copy=True instead of clone() will help in case of cpu --> cpu. + # Otherwise to() will not create a new copy for the view of the full tensor, and it will not be de-referenced. + return tensor.to(device, copy=True) + + +class ReplaceWithTensorSlicing: + + def __init__(self, mp_group=None, mp_size=1, out_dim=1, in_dim=0): + if mp_group is not None: + self.gpu_index = dist.get_rank(group=mp_group) + else: + self.gpu_index = 0 + self.out_dim = out_dim + self.in_dim = in_dim + self.mp_size = mp_size + + def merge_assert(self, dim1, dim2): + assert dim1 > dim2, \ + 'Merging tensors is not allowed here! Please use deepspeed load_checkpoint\ + for merging your checkpoints before replacing the transformer layer with\ + inference-kernels' + + def strided_copy(self, + dst: Optional[torch.Tensor], + src: Optional[torch.Tensor], + num_splits: int, + int8: bool = False, + allocate_tensor: bool = False): + if src is None: + return src + src_shape = src.shape + dst_shape = dst.shape + + outer_dim = 0 if int8 else -1 + + if allocate_tensor: + dst = torch.empty_like(dst) + + src_split = torch.split(src.data, src.shape[outer_dim] // num_splits, dim=outer_dim) + if (len(src_shape) == 2 and len(dst_shape) == 2): + if src_shape[outer_dim] == dst_shape[self.out_dim]: + try: + dst = dst.reshape(-1).data.copy_(src.data.reshape(-1)).reshape(src.shape) + except: + print(dst.shape, src.shape) + exit() + dst = torch.nn.parameter.Parameter(dst, requires_grad=False) + if hasattr(src, 'scale'): + dst.scale = src.scale + return dst + self.merge_assert(src_shape[outer_dim], dst_shape[self.out_dim]) + qkv_size = dst_shape[self.out_dim] // num_splits + qkv_split = [torch.split(src_s, qkv_size, dim=outer_dim) for src_s in src_split] + weight_split = [ + torch.cat([qkv_s[i] for qkv_s in qkv_split], axis=outer_dim) for i in range(len(qkv_split[0])) + ] + dst = dst.reshape(-1).data.copy_(weight_split[self.gpu_index].contiguous().reshape(-1)).reshape( + weight_split[self.gpu_index].shape) + else: + if src_shape[0] == dst_shape[0]: + return torch.nn.parameter.Parameter(src) + qkv_size = dst_shape[0] // num_splits + qkv_split = [torch.split(src_s, qkv_size, dim=0) for src_s in src_split] + bias_split = [torch.cat([qkv_s[i] for qkv_s in qkv_split], axis=0) for i in range(len(qkv_split[0]))] + dst.data.copy_(bias_split[self.gpu_index].contiguous()) + + dst = torch.nn.parameter.Parameter(dst, requires_grad=False) + if hasattr(src, 'scale'): + dst.scale = src.scale + return dst + + def copy(self, dst, src, int8=False, allocate_tensor=False): + if src is None: + return src + assert not dst.data.is_meta # the torch.Tensor.copy_ method used below will silently fail on meta tensors + if allocate_tensor: + dst = torch.empty_like(dst) + outer_dim = 0 if int8 else 1 + inner_dim = 1 if int8 else 0 + src_shape = src.shape + dst_shape = dst.shape + if (len(src_shape) == 2 and len(dst_shape) == 2): + + if src_shape[inner_dim] == dst_shape[self.in_dim] and src_shape[outer_dim] == dst_shape[self.out_dim]: + dst = dst.reshape(-1).data.copy_(src.data.reshape(-1)).reshape(src.shape) + else: + if src_shape[inner_dim] != dst_shape[self.in_dim]: + self.merge_assert(src_shape[inner_dim], dst_shape[self.in_dim]) + dst.data.copy_(src[:, self.gpu_index * dst_shape[self.in_dim]: (self.gpu_index + 1) * dst_shape[self.in_dim]] if inner_dim == 1 else \ + src[self.gpu_index * dst_shape[self.in_dim]: (self.gpu_index + 1) * dst_shape[self.in_dim], :]) + else: + self.merge_assert(src_shape[outer_dim], dst_shape[self.out_dim]) + dst.data.copy_(src[:, self.gpu_index * dst_shape[self.out_dim]: (self.gpu_index + 1) * dst_shape[self.out_dim]] if outer_dim == 1 else \ + src[self.gpu_index * dst_shape[self.out_dim]: (self.gpu_index + 1) * dst_shape[self.out_dim], :]) + else: + if src_shape[0] == dst_shape[0]: + dst = src if src.dtype == dst.dtype else dst.data.copy_(src) + else: + dst.data.copy_(src[self.gpu_index * dst_shape[-1]:(self.gpu_index + 1) * dst_shape[-1]]) + dst = torch.nn.parameter.Parameter(dst, requires_grad=False) + if hasattr(src, 'scale'): + dst.scale = src.scale + return dst + + +class Loading(): + + def is_load_module(module): + load_layers = [nn.Linear, nn.Embedding, nn.LayerNorm] + load_layer_names = [ + "LPLayerNorm", "SharedEmbedding", "OPTLearnedPositionalEmbedding", "LlamaRMSNorm", "FalconLinear", + "MistralRMSNorm", "T5LayerNorm", "MixtralRMSNorm" + ] + return module.__class__ in load_layers or module._get_name() in load_layer_names + + def load_buffer(module, state_dict, prefix): + for name in module._buffers.keys(): + if module._buffers[name].data.is_meta: + module._buffers[name] = torch.nn.parameter.Parameter( + data=torch.empty_like(module._buffers[name].data, device="cpu"), + requires_grad=module._buffers[name].data.requires_grad) + if prefix + name in state_dict.keys(): + module._buffers[name].data.copy_(state_dict[prefix + name]) + + def load(module, state_dict, prefix, mp_group=None): + mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group) + if hasattr(module, 'weight'): + if module.weight.data.is_meta: + # meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here + module.weight = torch.nn.parameter.Parameter(data=torch.empty_like(module.weight.data, device="cpu"), + requires_grad=module.weight.data.requires_grad) + if 'query_key_value' in prefix: + module.weight = mp_replace.strided_copy(module.weight.data, + state_dict[prefix + 'weight'], + num_splits=3) + else: + module.weight = mp_replace.copy(module.weight.data, state_dict[prefix + 'weight']) + else: + if hasattr(module, 'norm') and hasattr(module.norm, 'weight'): + if module.norm.weight.data.is_meta: + # meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here + module.norm.weight = torch.nn.parameter.Parameter( + data=torch.empty_like(module.norm.weight.data, device="cpu"), + requires_grad=module.norm.weight.data.requires_grad) + module.norm.weight = mp_replace.copy(module.norm.weight.data, state_dict[prefix + 'weight']) + + if prefix + 'bias' in state_dict.keys(): + if hasattr(module, 'bias'): + if module.bias.data.is_meta: + # meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here + module.bias = torch.nn.parameter.Parameter(data=torch.empty_like(module.bias.data, device="cpu"), + requires_grad=module.bias.data.requires_grad) + module.bias = mp_replace.copy(module.bias, state_dict[prefix + 'bias']) + else: + if hasattr(module, 'norm') and hasattr(module.norm, 'bias'): + if module.norm.bias.data.is_meta: + # meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here + module.norm.bias = torch.nn.parameter.Parameter( + data=torch.empty_like(module.norm.bias.data, device="cpu"), + requires_grad=module.norm.bias.data.requires_grad) + module.norm.bias = mp_replace.copy(module.norm.bias, state_dict[prefix + 'bias']) + + +class AutoTP(): + + def __init__(self, module, all_reduce_linears, prefix, state_dict, linear_layer_setting, orig_layer_impl): + self.module = module + self.all_reduce_linears = all_reduce_linears + self.prefix = prefix + self.state_dict = state_dict + + self.mp_size = None + self.mp_group = None + self.linear_layer_setting = linear_layer_setting + self.orig_layer_impl = orig_layer_impl + self.linear_policies = None + self.conv_linear_layer = False + + def in_module_list(module, module_list): + for item in module_list: + if type(item).__name__ == type(module).__name__: + return True + return False + + def get_module_list(model): + mlist = [] + for child in model.children(): + if isinstance(child, nn.ModuleList): + for module in child.children(): + if not mlist: + mlist = [module] + elif not AutoTP.in_module_list(module, mlist): + mlist = mlist + [module] + else: + mlist = mlist + AutoTP.get_module_list(child) + return mlist + + def supported(model): + unsupported = ['deberta', 'flaubert', 'fsmt', 'gpt2', 'led', 'longformer', 'xlm', 'xlnet'] + model = str(model) + key = re.search(r": (.*?)Model", model) + if key is None: + key = re.search(r": (.*?)Stack", model) + if key is None: + key = re.match(r"(.*?)Model", model) + assert key is not None, "Not able to determine model policy automatically. Please provide policy." + if key.group(1).lower() in unsupported: + return False + return True + + def get_layers(parent, module): + layer_list = [] + for key, submodule in module._modules.items(): + if isinstance(submodule, nn.Linear): + layer_list = layer_list + [parent + "." + key] + elif isinstance(submodule, nn.LayerNorm) or key == 'LayerNorm' or key == 'layer_norm': + layer_list = layer_list + ["ln"] + else: + layer_list = layer_list + AutoTP.get_layers(key, submodule) + return layer_list + + def update_policy_list(policy_list, new_module, new_gems): + if len(policy_list): + for i, policy in enumerate(policy_list): + # if module already exists in policy, combine gems and remove duplicates + if policy[0] == type(new_module): + new_gems = set(new_gems + policy[1]) + policy_list[i] = tuple([type(new_module), new_gems]) + return policy_list + policy_list.append(tuple([type(new_module), new_gems])) + return policy_list + + def kernel_supported(module_list): + policy = [] + for plcy in replace_policies: + # instantiate a throw-away policy in order to populate the _orig_layer_class + _ = plcy(None) + if isinstance(plcy._orig_layer_class, list): + for orig_layer_class in plcy._orig_layer_class: + policy.append(orig_layer_class) + elif plcy._orig_layer_class is not None: + policy.append(plcy._orig_layer_class) + for child in module_list: + if child.__class__ in policy: + return True + return False + + def tp_parser(model): + policy_list = [] + module_list = [] + layer_list = [] + gem_list = [] + + module_list = AutoTP.get_module_list(model) + assert AutoTP.supported(model), "AutoTP not supported for model. Please use kernel injection since container policy for model exists." \ + if AutoTP.kernel_supported(module_list) else "AutoTP not supported for model. Please provide policy." + norm_layer_name_list = ['LayerNorm', 'layer_norm', 'ln_1', 'ln_2'] + #ln_1 , ln_2 for Qwen + for module in module_list: + for key, submodule in module._modules.items(): + if isinstance(submodule, nn.Linear): + layer_list = layer_list + ["." + key] + elif isinstance(submodule, nn.LayerNorm) or key in norm_layer_name_list: + layer_list = layer_list + ["ln"] + else: + layer_list = layer_list + AutoTP.get_layers(key, submodule) + for i, layer in enumerate(layer_list): + if layer == 'ln': + if layer_list[i - 1] != 'ln': + gem_list = gem_list + [layer_list[i - 1]] + elif 'out_proj' in layer: + gem_list = gem_list + [layer] + elif 'o_proj' in layer: + gem_list = gem_list + [layer] + elif 'down_proj' in layer: + gem_list = gem_list + [layer] + elif 'attention.dense' in layer and 'GPTNeoX' in str(model): + gem_list = gem_list + [layer] + elif 'self_attention.dense' in layer and 'falcon' in str( + type(module)): # this is a hack to get the right linear layer for this model! + gem_list = gem_list + [layer] + # Mixtral-7x8b used w2*act(w1*w3) linear. need to replace w2 to linearallreduce. + elif 'w2' in layer and 'Mixtral' in str(type(module)): + gem_list = gem_list + [layer] + + layer_list = [] + if gem_list != []: + gem_list = list(set(gem_list)) + policy_list = AutoTP.update_policy_list(policy_list, module, gem_list) + gem_list = [] + assert len(policy_list), "AutoTP not supported for model. Please use kernel injection since container policy for model exists." \ + if AutoTP.kernel_supported(module_list) else "Not able to determine model policy automatically. Please provide policy." + return policy_list + + def set_tensor_parallel_config(self, mp_size, mp_group): + self.mp_size = mp_size + self.mp_group = mp_group + + def _replace(self, child, name, conv_linear_layer): + if getattr(child, "replaced", False) == True: + return + weight_shape = child.weight.shape + mp_replace = ReplaceWithTensorSlicing(mp_group=self.mp_group) + # For mixtral-7x8b, need to skip MoE gate linear replace. + if name == "block_sparse_moe.gate": + return child + if name in self.all_reduce_linears: + # if conv_linear_layer [weight_shape[1], weight_shape[0] // mp_size] + # else [weight_shape[0], weight_shape[1] // mp_size] + + if self.conv_linear_layer: + child.weight.data = child.weight.data.transpose(-1, -2).contiguous() + data = child.weight.data.split(get_shard_size_list( + weight_shape[0] if self.conv_linear_layer else weight_shape[1], self.mp_size, name), + dim=1) + data_dc = move(data[mp_replace.gpu_index], get_accelerator().current_device_name()).detach() + del data + + setattr(child, "replaced", True) + if name == "lm_head" or name == 'embed_out': + return LmHeadLinearAllreduce( + torch.nn.parameter.Parameter(data_dc, requires_grad=False), dist.get_rank(), dist.get_world_size(), + child.bias if child.bias is None else torch.nn.parameter.Parameter( + move(child.bias, + get_accelerator().current_device_name())), self.mp_group) + return LinearAllreduce(torch.nn.parameter.Parameter(data_dc, requires_grad=False), child.bias if child.bias is None else \ + torch.nn.parameter.Parameter(move(child.bias, get_accelerator().current_device_name())), self.mp_group) + else: + + # if conv_linear_layer [weight_shape[1], weight_shape[0] // mp_size] + # else [weight_shape[0] // mp_size, weight_shape[1]] + if self.conv_linear_layer: + child.weight.data = child.weight.data.transpose(-1, -2).contiguous() + + if require_tp_fused_qkvw(name, self.mp_size): + #Check and handle fused qkv for TP + #The copy is a regular copy, The shape of dst and src is the same + data_dc = move( + prepare_tp_fused_qkvw(self.module, child.weight.data, self.mp_size, mp_replace.gpu_index), + get_accelerator().current_device_name()) + + bias_data_dc = None if child.bias is None else move( + prepare_tp_fused_qkvw(self.module, child.bias.data, self.mp_size, mp_replace.gpu_index), + get_accelerator().current_device_name()) + else: + data = child.weight.data.split(get_shard_size_list(weight_shape[0], self.mp_size, name), + dim=1 if self.conv_linear_layer else 0) + data_dc = move(data[mp_replace.gpu_index], get_accelerator().current_device_name()).detach() + del data + + if child.bias is not None: + bias_data = child.bias.data.split(get_shard_size_list( + weight_shape[1] if self.conv_linear_layer else weight_shape[0], self.mp_size, name), + dim=0) + bias_data = move(bias_data[mp_replace.gpu_index], get_accelerator().current_device_name()) + bias_data_dc = torch.nn.parameter.Parameter(bias_data, requires_grad=False) + del bias_data + else: + bias_data_dc = None + + setattr(child, "replaced", True) + return LinearLayer(weight=torch.nn.parameter.Parameter(data_dc, requires_grad=False), bias=bias_data_dc) + + def _slice_embedding(self, child, name, conv_linear_layer): + if getattr(child, "replaced", False) == True: + return + mp_replace = ReplaceWithTensorSlicing(mp_group=self.mp_group) + + if hasattr(child.weight, 'ds_tensor'): + data = child.weight.ds_tensor.data.split(get_shard_size_list(child.weight.shape[1], self.mp_size), dim=1) + else: + data = child.weight.data.split(get_shard_size_list(child.weight.shape[1], self.mp_size, name), dim=1) + data = data[mp_replace.gpu_index].to(get_accelerator().current_device_name()) + data = torch.nn.parameter.Parameter(data, requires_grad=False) + + new_embedding = nn.Embedding(child.weight.shape[0], get_shard_size(child.weight.shape[1], self.mp_size, name)) + new_embedding.weight.data.copy_(data) + setattr(child, "replaced", True) + return new_embedding + + def update_mp_params(self, child): + if getattr(child, "replaced", False) == True: + return + for param in [ + "n_heads", "inner_dim", "num_heads", "num_kv", "num_attention_heads", "num_attn_heads", + "all_head_size", "embed_dim", "hidden_size", "num_key_value_heads", "num_kv_heads", "kv_n_heads", + "d_model" + ]: + if hasattr(child, param): + param_val = getattr(child, param) + setattr(child, param, get_shard_size(param_val, self.mp_size)) + setattr(child, "replaced", True) + + def update_linear_policies(self): + self.conv_linear_layer = False + if self.linear_layer_setting is not None: + self.linear_policies = {self.linear_layer_setting[0]: self._replace} + if len(self.linear_layer_setting) == 2: + self.linear_policies.update({self.linear_layer_setting[1]: self._slice_embedding}) + else: + import transformers + if self.orig_layer_impl is transformers.models.gpt2.modeling_gpt2.GPT2Block: + try: + self.conv_linear_layer = True + self.linear_policies = {transformers.pytorch_utils.Conv1D: self._replace} + except ImportError: + self.linear_policies = {nn.Linear: self._replace} + else: + self.linear_policies = {nn.Linear: self._replace, nn.Embedding: self._slice_embedding} + + def _replace_module(self, r_module, prev_name='', prev_class_name=''): + for name, child in r_module.named_children(): + if prev_class_name == "": + class_name = prev_name + elif prev_name == "": + class_name = prev_class_name + else: + class_name = prev_class_name + '.' + prev_name + checking_key = self.prefix + '.' + class_name + '.' + name + '.' if class_name != "" else self.prefix + '.' + name + '.' + if Loading.is_load_module(child) and self.state_dict is not None: + if any(checking_key in item for item in self.state_dict): + Loading.load(child, self.state_dict, checking_key, self.mp_group) + else: + continue + if len(child._buffers) != 0 and self.state_dict is not None: + Loading.load_buffer(child, self.state_dict, checking_key) + if child.__class__ in self.linear_policies: + setattr(r_module, name, self.linear_policies[child.__class__](child, prev_name + '.' + name, + self.conv_linear_layer)) + elif any(isinstance(child, lp) for lp in self.linear_policies): + # Added for falcon model support + # Note: isinstance will account for class inheritance, child.__class__ does not + key = None + for lp in self.linear_policies: + if isinstance(child, lp): + key = lp + break + assert key is not None + setattr(r_module, name, self.linear_policies[key](child, prev_name + '.' + name, + self.conv_linear_layer)) + else: + self.update_mp_params(child) + self._replace_module(child, name, class_name) + return r_module + + def get_model_num_kv_heads(self, config): + num_kv_heads = None + kv_head_names = ['num_kv_heads', 'num_key_value_heads', 'num_attention_heads', 'n_heads'] + for name in kv_head_names: + if hasattr(config, name): + num_kv_heads = getattr(config, name) + if num_kv_heads is not None: + break + return num_kv_heads + + def _replace_last_linear_module(self, r_module): + if hasattr(r_module, "lm_head"): + name = "lm_head" + child = r_module.lm_head + elif hasattr(r_module, "embed_out"): + name = "embed_out" + child = r_module.embed_out + else: + return r_module + if child.__class__ in self.linear_policies: + setattr(r_module, name, self.linear_policies[child.__class__](child, name, self.conv_linear_layer)) + return r_module diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/auto_tp_model_utils.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/auto_tp_model_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a71b1a54d6f6031c18899b1a5294dd5dd963e92d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/auto_tp_model_utils.py @@ -0,0 +1,104 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed import comm as dist +import torch +from typing import Optional +from deepspeed.module_inject.tp_shard import get_shard_size, get_shard_size_list + + +def build_bloom_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor: + """ + Link to paper: https://arxiv.org/abs/2108.12409 Alibi tensor is not causal as the original paper mentions, it + relies on a translation invariance of softmax for quick implementation: with l being a tensor, and a fixed value + `softmax(l+a) = softmax(l)`. Based on + https://github.com/ofirpress/attention_with_linear_biases/blob/a35aaca144e0eb6b789dfcb46784c4b8e31b7983/fairseq/models/transformer.py#L742 + TODO @thomasw21 this doesn't work as nicely due to the masking strategy, and so masking varies slightly. + + Args: + Returns tensor shaped (batch_size * num_heads, 1, max_seq_len) + attention_mask (`torch.Tensor`): + Token-wise attention mask, this should be of shape (batch_size, max_seq_len). + num_heads (`int`, *required*): + number of heads + dtype (`torch.dtype`, *optional*, default=`torch.bfloat16`): + dtype of the output tensor + """ + import math + batch_size, seq_length = attention_mask.shape + closest_power_of_2 = 2**math.floor(math.log2(num_heads)) + base = torch.tensor(2**(-(2**-(math.log2(closest_power_of_2) - 3))), + device=attention_mask.device, + dtype=torch.float32) + powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32) + slopes = torch.pow(base, powers) + + if closest_power_of_2 != num_heads: + extra_base = torch.tensor(2**(-(2**-(math.log2(2 * closest_power_of_2) - 3))), + device=attention_mask.device, + dtype=torch.float32) + num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2) + extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32) + slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0) + + # Note: alibi will added to the attention bias that will be applied to the query, key product of attention + # => therefore alibi will have to be of shape (batch_size, num_heads, query_length, key_length) + # => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length) + # => the query_length dimension will then be broadcasted correctly + # This is more or less identical to T5's relative position bias: + # https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527 + arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :] + alibi = slopes[..., None] * arange_tensor + if dist.is_initialized(): + num_heads_per_rank = get_shard_size(num_heads, dist.get_world_size()) + offset = sum(get_shard_size_list(num_heads, dist.get_world_size())[0:dist.get_rank()]) + alibi = alibi.view(batch_size, num_heads, 1, seq_length) + alibi = alibi[:, offset:num_heads_per_rank + offset, :, :] + return alibi.reshape(batch_size * num_heads_per_rank, 1, seq_length).to(dtype) + else: + return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype) + + +def get_alibi_mask(self, tensor, seq_length_with_past): + mask = self.get_alibi_mask_orig(tensor, seq_length_with_past) + if not self.training and dist.is_initialized(): + num_heads_per_rank = get_shard_size(self.n_head, dist.get_world_size()) + offset = sum(get_shard_size_list(self.n_head, dist.get_world_size())[0:dist.get_rank()]) + mask = mask[offset:num_heads_per_rank + offset, :seq_length_with_past, :seq_length_with_past] + + return mask + + +def build_mpt_atten_bias_tensor(self, + device, + dtype, + attention_mask: Optional[torch.ByteTensor] = None, + prefix_mask: Optional[torch.ByteTensor] = None, + sequence_id: Optional[torch.LongTensor] = None): + (attn_bias, attention_mask) = self._attn_bias_orig(device, + dtype, + attention_mask=attention_mask, + prefix_mask=prefix_mask, + sequence_id=sequence_id) + if dist.is_initialized(): + num_heads_per_rank = get_shard_size(self.config.n_heads, dist.get_world_size()) + offset = sum(get_shard_size_list(self.config.n_heads, dist.get_world_size())[0:dist.get_rank()]) + attn_bias = attn_bias[:, offset:num_heads_per_rank + offset, :, :] + return attn_bias, attention_mask + + +def build_mpt_alibi_tensor(self, num_heads, sequence_length, alibi_bias_max=8, device=None) -> torch.Tensor: + r""" + Link to paper: https://arxiv.org/abs/2108.12409 - Alibi tensor is not causal as the original paper mentions, it + relies on a translation invariance of softmax for quick implementation. This implementation has been copied from + the alibi implementation of MPT source code that led to slightly different results than the Bloom alibi: + https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L292 + """ + alibi = self.build_mpt_alibi_tensor_orig(num_heads, sequence_length, alibi_bias_max, device) + if dist.is_initialized(): + num_heads_per_rank = int(num_heads / dist.get_world_size()) + offset = dist.get_rank() * num_heads_per_rank + alibi = alibi[offset:num_heads_per_rank + offset, :, :] + return alibi diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/fusedqkv_utils.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/fusedqkv_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..cf087c16da8ad5db05ba91f05eaeeb77b1330c64 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/fusedqkv_utils.py @@ -0,0 +1,125 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +import torch +from deepspeed.utils.logging import warning_once +from deepspeed.module_inject.tp_shard import get_shard_size, get_shard_size_list, get_num_kv_heads, get_n_embd + + +def split_by_qkvlist_and_refuse(qkv_list, split_size, split_dim=0, cat_dim=0): + qkv_split_list = [torch.split(mat, split_size, dim=split_dim) for mat in qkv_list] + tp_fusedqkv_list = [ + torch.cat([qkv_s[i] for qkv_s in qkv_split_list], dim=cat_dim) for i in range(len(qkv_split_list[0])) + ] + return tp_fusedqkv_list + + +def require_tp_fused_qkvw(name, mp_size): + fused_qkvw_name_list = ['qkv_proj', 'query_key_value', 'attn.Wqkv', 'self_attn.W_pack', 'c_attn'] + + if mp_size == 1: + return False + for fused_name in fused_qkvw_name_list: + if fused_name in name: + return True + return False + + +def prepare_tp_fused_qkvw(module, src, mp_size, gpu_index): + + module_str = str(module).strip() + if src is None: + return + fused_type_dict = { + 'CodeGenBlock': 'codegentype', + 'BloomBlock': 'bloomtype', + 'GLMBlock': 'glmtype', + "MPTBlock": 'glmtype', + "MptBlock": 'glmtype', + "BaichuanLayer": 'glmtype', + "QWenBlock": 'qwentype', + "FalconDecoderLayer": 'bloomtype', + "GPTBigCodeBlock": 'bigcodetype', + "DecoderLayer": 'glmtype', + } + + def _codegen_type_transpose(input, mp_size, codegen_mp_num=4): + # codegen_mp_num defined in https://github.com/huggingface/transformers/blob/main/src/transformers/models/codegen/modeling_codegen.py + assert get_num_kv_heads() % ( + mp_size * codegen_mp_num) == 0, "codgen autoTP requires num_kv_heads % (mp_size*codegen_mp_num) == 0" + #input : [3*hidden_dim, hidden_dim](weight) or [3*hidden_dim](bias) + + shape = input.shape + dst_shape = get_shard_size(shape[0], mp_size) + num_mp_blocks = input.reshape(codegen_mp_num, shape[0] // codegen_mp_num, shape[1]) + + #num_mp_blocks : [codegen_mp_num, 3*hidden_dim/codegen_mp_num, :] + src_split = list(torch.split(num_mp_blocks, num_mp_blocks.shape[1] // 3, dim=1)) + src_split = [x.reshape(codegen_mp_num * mp_size, -1, shape[1]) for x in src_split] + + split_fusedqkv = split_by_qkvlist_and_refuse(src_split, get_shard_size(shape[0] // 3, mp_size), 0, 1) + tp_fuseqkv_weight = torch.cat(split_fusedqkv, dim=0).reshape(shape[0], -1) + + return tp_fuseqkv_weight[gpu_index * dst_shape:(gpu_index + 1) * dst_shape] + + def _glm_type_transpose(input, mp_size): + #input : [3*hidden_dim, hidden_dim](weight) or [3*hidden_dim](bias) + + shape = input.shape + src_split = torch.split(input, shape[0] // 3, dim=0) + + split_fusedqkv = split_by_qkvlist_and_refuse(src_split, get_shard_size_list(shape[0] // 3, mp_size)) + return split_fusedqkv[gpu_index] + + def _bloom_type_transpose(input, mp_size): + shape = input.shape + + split_fusedqkv = input.split(get_shard_size_list(shape[0], mp_size), dim=0) + return split_fusedqkv[gpu_index] + + def _qwen_type_transpose(input, mp_size, module): + if not hasattr(module, "_ds_fusedqkv_entered"): + # Adjust splitting absolute value variables + setattr(module, "_ds_fusedqkv_entered", True) + module.attn.split_size = get_shard_size(module.attn.split_size, mp_size) + return _glm_type_transpose(input, mp_size) + + def _bigcode_type_transpose(input, mp_size): + n_embd = get_n_embd() + q = input[:n_embd] + kv = input[n_embd:] + shape = q.shape + split_q = q.split(get_shard_size_list(shape[0], mp_size), dim=0) + return torch.cat((split_q[gpu_index], kv), dim=0) + + def _transpose_fused_qkvw(src, mp_size, fused_qkv_type=None, module=None): + + # suppose num_heads=n, q(n)_w means the n-th q head linear weight, the weight format are as following + # bloomtype: [q(1)_w,k(1)_w,v(1)_w,q(2)_w,k(2)_w,v(2)_w,...,q(n)_w,k(n)_w,v(n)_w] + # glmtype: [q(1)_w, q(2)_w,...,q(n)_w,k(1)_w,k(2)_w,...,k(n)_w,v(1)_w,v(2)_w,...,v(n)_w] + # codegentype: [q(1)_w,q(2)_w,...,q(n/t)_w,k(1)_w,k(2)_w,...,k(n/t)_w,v(1)_2,v(2)_w,...v(n/t)_w,q(n/t+1)_w,...], where t is a const defined in model file. + + if fused_qkv_type == 'bloomtype': + return _bloom_type_transpose(src, mp_size) + elif fused_qkv_type == 'codegentype': + return _codegen_type_transpose(src, mp_size) + elif fused_qkv_type == 'glmtype': + return _glm_type_transpose(src, mp_size) + elif fused_qkv_type == 'qwentype': + return _qwen_type_transpose(src, mp_size, module) + elif fused_qkv_type == 'bigcodetype': + return _bigcode_type_transpose(src, mp_size) + + raise ValueError("unknown fused_qkv_type") + + module_name_matches = [k for k in fused_type_dict.keys() if k in module_str] + if module_name_matches: + # There can be overlap with matches (e.g., "DecoderLayer" and "FalconDecoderLayer"). + # We take the longest matching module_name + module_name = max(module_name_matches, key=len) + fused_type = fused_type_dict[module_name] + return _transpose_fused_qkvw(src, mp_size, fused_type, module) + warning_once(f"Unrecognized fusedkqv weight type, default to using bloom type," + f"please check in prepare_tp_fused_qkvw() to avoid potential calculation errors") + return _bloom_type_transpose(src, mp_size) diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/inject.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/inject.py new file mode 100644 index 0000000000000000000000000000000000000000..401da1bd6ef76b268f65e48b77c30a8ed65bfc64 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/inject.py @@ -0,0 +1,112 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import copy +import torch +from deepspeed.ops.transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig + + +def module_inject(layer_obj, model, config, micro_batch_size, max_seq_length, seed, preln, fp16=True): + for name, child in model.named_children(): + if isinstance(child, layer_obj): + print('REPLACING BertLayer') + + cuda_config = DeepSpeedTransformerConfig(batch_size=micro_batch_size, + max_seq_length=max_seq_length, + hidden_size=config.hidden_size, + heads=config.num_attention_heads, + attn_dropout_ratio=config.attention_probs_dropout_prob, + hidden_dropout_ratio=config.hidden_dropout_prob, + num_hidden_layers=config.num_hidden_layers, + initializer_range=config.initializer_range, + seed=seed, + fp16=fp16, + pre_layer_norm=preln) + + new_module = DeepSpeedTransformerLayer(cuda_config) + + # copy relevant state from child -> new module + qw = child.attention.self.query.weight + qb = child.attention.self.query.bias + kw = child.attention.self.key.weight + kb = child.attention.self.key.bias + vw = child.attention.self.value.weight + vb = child.attention.self.value.bias + + qkvw = torch.cat((qw, kw, vw), 0) + qkvb = torch.cat((qb, kb, vb), 0) + + new_module.attn_qkvw.data = qkvw + new_module.attn_qkvb.data = qkvb + new_module.attn_ow.data = child.attention.output.dense.weight + new_module.attn_ob.data = child.attention.output.dense.bias + if preln: + attention_layerNorm = child.PostAttentionLayerNorm + else: + attention_layerNorm = child.attention.output.LayerNorm + new_module.attn_nw.data = attention_layerNorm.weight + new_module.attn_nb.data = attention_layerNorm.bias + if preln: + intermediate_FF = child.intermediate.dense_act + else: + intermediate_FF = child.intermediate.dense + new_module.inter_w.data = intermediate_FF.weight + new_module.inter_b.data = intermediate_FF.bias + new_module.output_w.data = child.output.dense.weight + new_module.output_b.data = child.output.dense.bias + if preln: + transformer_LayerNorm = child.PreAttentionLayerNorm + else: + transformer_LayerNorm = child.output.LayerNorm + new_module.norm_w.data = transformer_LayerNorm.weight + new_module.norm_b.data = transformer_LayerNorm.bias + + setattr(model, name, copy.deepcopy(new_module)) + + else: + module_inject(layer_obj, child, config, micro_batch_size, max_seq_length, seed, preln, fp16) + + return model + + +def test_hi(): + from turing.nvidia_modelingpreln import BertConfig as BertConfigPreLN + from turing.nvidia_modelingpreln import BertForQuestionAnswering as BertForQuestionAnsweringPreLN + from turing.nvidia_modelingpreln import BertLayer + bert_model_config = { + "vocab_size_or_config_json_file": 119547, + "hidden_size": 1024, + "num_hidden_layers": 1, + "num_attention_heads": 16, + "intermediate_size": 4096, + "hidden_act": "gelu", + "hidden_dropout_prob": 0.1, + "attention_probs_dropout_prob": 0.1, + "hidden_dropout_prob": 0.1, + "attention_probs_dropout_prob": 0.1, + "max_position_embeddings": 512, + "type_vocab_size": 2, + "initializer_range": 0.02 + } + bert_config = BertConfigPreLN(**bert_model_config) + base_model = BertForQuestionAnsweringPreLN(bert_config, args=None) + + #base_model = LinearStack() + + test_model = copy.deepcopy(base_model) + test_model = module_inject(BertLayer, test_model, bert_config, 4, 384, 1234) + + print('BASE', base_model) + print('TEST', test_model) + + #base_model.eval() + #test_model.eval() + + #test_input = torch.rand(1, base_model.input_dim) + + #base_output = base_model(test_input) + #test_output = test_model(test_input) + # + #assert torch.allclose(base_output, test_output, atol=3e-8) diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/layers.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..5fb957faa80cb8ff5bbfea378e92f661c3fe29ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/layers.py @@ -0,0 +1,162 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from deepspeed import comm as dist +from torch import nn +from torch.nn import functional as F + +from torch.nn.parameter import Parameter +from deepspeed.accelerator import get_accelerator +from deepspeed.module_inject.tp_shard import get_shard_size, get_shard_size_list + + +class LinearAllreduce(nn.Module): + + def __init__(self, weight, bias=None, mp_group=None): + super(LinearAllreduce, self).__init__() + self.weight = weight + self.bias = bias + self.mp_group = mp_group + + def forward(self, input): + output = torch.matmul(input, self.weight.transpose(-1, -2)) + if self.mp_group is not None: + dist.inference_all_reduce(output, group=self.mp_group) + if self.bias is not None: + output += self.bias + return output + + +class LmHeadLinearAllreduce(nn.Module): + + def __init__( + self, + weight, + rank, + world_size, + bias=None, + mp_group=None, + ): + super(LmHeadLinearAllreduce, self).__init__() + self.weight = weight + self.bias = bias + self.mp_group = mp_group + self.rank = rank + self.world_size = world_size + + def forward(self, input): + input_shard_size = get_shard_size(input.shape[-1], self.world_size, "lm_head") + input_shard_offset = sum(get_shard_size_list(input.shape[-1], self.world_size, "lm_head")[0:self.rank]) + output = torch.matmul(input[:, :, input_shard_offset:input_shard_offset + input_shard_size], + self.weight.transpose(-1, -2)) + if self.mp_group is not None: + dist.inference_all_reduce(output, group=self.mp_group) + if self.bias is not None: + output += self.bias + return output + + +class LinearLayer(nn.Module): + + def __init__(self, weight_shape=None, dtype=torch.half, weight=None, bias=None): + super(LinearLayer, self).__init__() + if weight is not None: + self.weight = weight + self.bias = bias + else: + self.weight = Parameter( + torch.empty(weight_shape, dtype=dtype, device=get_accelerator().current_device_name())) + + self.bias = Parameter( + torch.empty(weight_shape[0], + dtype=dtype, + device=get_accelerator().current_device_name())) \ + if bias is not None else None + + def forward(self, input): + output = torch.matmul(input, self.weight.transpose(-1, -2)) + if self.bias is not None: + output += self.bias + return output + + +class Normalize(nn.Module): + + def __init__(self, dim=None, dtype=torch.float, eps=1e-5, weight=None, bias=None): + super(Normalize, self).__init__() + if weight is not None: + self.weight = weight + self.bias = bias + else: + self.norm = nn.LayerNorm(dim, eps=eps).to(dtype).to(get_accelerator().current_device_name()) + self.weight = self.norm.weight + self.bias = self.norm.bias + + self.eps = eps + + def forward(self, input): + return nn.functional.layer_norm(input, input.shape[-1:], self.weight, self.bias, eps=self.eps) + + +class EmbeddingLayer(nn.Module): + + def __init__(self, weight_shape=None, dtype=torch.half, weight=None, bias=None): + super(EmbeddingLayer, self).__init__() + if weight is None: + self.weight = Parameter( + torch.empty(weight_shape[0], + weight_shape[1], + dtype=dtype, + device=get_accelerator().current_device_name())) + else: + self.weight = weight + + def forward(self, input): + return F.embedding(input, self.weight) + + +class OPTEmbedding(EmbeddingLayer): + """ + This module learns positional embeddings up to a fixed maximum size. + """ + + def __init__(self, weight_shape=None, weight=None, bias=None): + # OPT is set up so that if padding_idx is specified then offset the embedding ids by 2 + # and adjust num_embeddings appropriately. Other models don't have this hack + self.offset = 2 + super().__init__(weight_shape, weight=weight) + + def forward(self, attention_mask: torch.LongTensor, past_key_values_length: int = 0): + """`input_ids_shape` is expected to be [bsz x seqlen].""" + attention_mask = attention_mask.long() + + # create positions depending on attention_mask + positions = (torch.cumsum(attention_mask, dim=1).type_as(attention_mask) * attention_mask).long() - 1 + + # cut positions if `past_key_values_length` is > 0 + positions = positions[:, past_key_values_length:] + + return super().forward(positions + self.offset) + + +class RMSNormalize(nn.Module): + + def __init__(self, dim=None, dtype=torch.float, eps=1e-5, weight=None): + super(RMSNormalize, self).__init__() + if weight is not None: + self.weight = weight + else: + self.weight = nn.Parameter(torch.ones(dim, dtype=dtype, device=get_accelerator().current_device_name())) + + self.eps = eps + + def forward(self, hidden_states): + variance = hidden_states.to(torch.float32).pow(2).mean(-1, keepdim=True) + hidden_states = hidden_states * torch.rsqrt(variance + self.eps) + if self.weight.dtype in [torch.float16, torch.bfloat16]: + hidden_states = hidden_states.to(self.weight.dtype) + + return hidden_states * self.weight diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/load_checkpoint.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/load_checkpoint.py new file mode 100644 index 0000000000000000000000000000000000000000..4d01fdc698694ec681aa680d68fef65859697c05 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/load_checkpoint.py @@ -0,0 +1,283 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from torch import nn +from deepspeed.model_implementations.transformers.ds_bloom import DeepSpeedBloomInference +from deepspeed.model_implementations.transformers.ds_gpt import DeepSpeedGPTInference +from deepspeed.model_implementations.transformers.ds_bert import DeepSpeedBERTInference +from deepspeed.model_implementations.transformers.ds_megatron_gpt import DeepSpeedMegatronGPTInference +from deepspeed.model_implementations.transformers.ds_opt import DeepSpeedOPTInference +from deepspeed.model_implementations.transformers.ds_llama2 import DeepSpeedLlama2Inference + +import deepspeed.ops.transformer as transformer_inference +from .layers import LinearLayer, Normalize, EmbeddingLayer, OPTEmbedding, RMSNormalize +import torch +import gc +from deepspeed.accelerator import get_accelerator +import re + + +def load_model_with_checkpoint(r_module, + sd, + mp_replace, + ckpt_type, + ckpt_mp_size, + weight_quantizer=None, + rank=0, + container=None): + error_msgs = [] + + def prefix_check(): + # if keys start with 'model.' or 'transformer.', don't skip level 0 prefix + for key in sd[0].keys(): + # OPT models + if re.match("^model[.]", key): + return False + # BLOOM models + if re.match("^transformer[.]", key): + return False + return True + + skip_level_0_prefix = prefix_check() and container.policy.use_load_prefix + + def transpose(data): + with torch.no_grad(): + data = data.contiguous() + data1 = data.transpose(-1, -2).reshape(-1) + data.reshape(-1).copy_(data1) + data1 = None + return data.reshape(data.shape[-1], data.shape[-2]) + + def load(module, prefix): + args = (sd[0], prefix, {}, True, [], [], error_msgs) + + if hasattr(module, 'weight'): + module.weight = mp_replace.copy(module.weight.data, sd[0][prefix + 'weight']) + if prefix + 'bias' in sd[0].keys(): + if module.bias.data.is_meta: + # meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here + module.bias = torch.nn.parameter.Parameter(data=torch.empty_like(module.bias.data, device="cpu"), + requires_grad=module.bias.data.requires_grad) + module.bias = mp_replace.copy(module.bias.data, sd[0][prefix + 'bias']) + args = None + gc.collect() + + def load_transformer_layer(module, prefix): + if ckpt_type == "tp": + + def load_parameters(module, prefix): + for n, p in module.named_parameters(): + if prefix + n in sd[0] and len(n.split('.')) == 1: + if type(sd[0][prefix + n]) is list: + tmp_data, scale = sd[0][prefix + n] + tmp_data = tmp_data + scale = scale.to(get_accelerator().current_device_name()) + # set the quantizer number of groups using the checkpoint scale shape + weight_quantizer.num_groups = scale.shape[0] + else: + tmp_data = sd[0][prefix + n].to(get_accelerator().current_device_name()) + scale = None + src_shape = tmp_data.shape + dst_shape = p.shape + inner_dim = 1 if tmp_data.dtype == torch.int8 else 0 + outer_dim = 0 if tmp_data.dtype == torch.int8 else 1 + if (len(src_shape) == 2 and len(dst_shape) == 2): + if (src_shape[inner_dim] == dst_shape[0] and src_shape[outer_dim] == dst_shape[1]): + if tmp_data.dtype != torch.int8: + p = weight_quantizer.quantize( + transpose(tmp_data) if weight_quantizer.q_int8 else tmp_data) + else: + p = torch.nn.parameter.Parameter(tmp_data, requires_grad=False) + p.scale = scale + setattr(module, n, p) + else: + dim = inner_dim if src_shape[inner_dim] != dst_shape[0] else outer_dim + dim1 = 0 if src_shape[inner_dim] != dst_shape[0] else 1 + if src_shape[dim] > dst_shape[dim1]: + weight_partition = torch.split(tmp_data, dst_shape[dim1], dim=dim)[rank].to( + get_accelerator().current_device_name()) + assert tmp_data.dtype != torch.int8 or scale.numel() > weight_quantizer.num_groups * (rank+1), \ + '''ERROR: We require the quantization scales for larger TP-size when loading INT8 checkpoint!\ + Please use the FP16 checkpoint to generate INT8 checkpoint with the sharding parameters!''' + scale = scale.view(-1)[weight_quantizer.num_groups * (rank + 1):].reshape( + weight_quantizer.num_groups, -1).contiguous() + else: + assert tmp_data.dtype != torch.int8, \ + '''Merging of the checkpoints are not supported when using INT8 checkpoint! \ + Please use a as many GPUs as TP-size for the checkpoint''' + all_data = [ + sd[j][prefix + n] if type(sd[j][prefix + n]) is list else sd[j][prefix + n].to( + get_accelerator().current_device_name()) for j in range(len(sd)) + ] + # Check if the weight tensor is for the QKV parameter + if src_shape[1] == (3 * src_shape[0]) // ckpt_mp_size: + qkv_size = src_shape[outer_dim] // 3 + src_split = [ + torch.split(src[0].data, qkv_size, dim=outer_dim) for src in all_data + ] + + weight_partition = torch.cat([ + torch.cat([qkv_s[i] for qkv_s in src_split], axis=outer_dim) + for i in range(len(src_split[0])) + ], + dim=dim) + else: + weight_partition = torch.cat([ + ad[0].to(get_accelerator().current_device_name()) + if type(ad) is list else ad for ad in all_data + ], + dim=dim) + if tmp_data.dtype == torch.int8: + scale = torch.cat( + [ad[1].to(get_accelerator().current_device_name()) for ad in all_data], + dim=dim) + + if tmp_data.dtype != torch.int8: + weight_partition = weight_quantizer.quantize( + transpose(weight_partition), \ + parallel_dim=(0 if dim == 1 else 1)) if weight_quantizer.q_int8 else \ + weight_quantizer.quantize(weight_partition) + else: + weight_partition = torch.nn.parameter.Parameter(weight_partition, + requires_grad=False) + weight_partition.scale = scale + setattr(module, n, weight_partition) + else: + if src_shape[0] == dst_shape[0]: + p.data.copy_(tmp_data) + else: + if src_shape[0] > dst_shape[0]: + bias_split = torch.split(tmp_data, dst_shape[-1])[rank].to( + get_accelerator().current_device_name()).contiguous() + p.data.copy_(bias_split) + else: + # Check if the weight tensor is for the QKV parameter + if src_shape[0] == (3 * r_module.config.hidden_size) // ckpt_mp_size: + qkv_size = src_shape[0] // 3 + src_split = [ + torch.split(sd[j][prefix + n], qkv_size, dim=0) for j in range(len(sd)) + ] + + p.data.copy_( + torch.cat([ + torch.cat([qkv_s[i] for qkv_s in src_split], axis=0) + for i in range(len(src_split[0])) + ], + dim=0).to(get_accelerator().current_device_name()).contiguous()) + else: + p.data.copy_( + torch.cat([sd[j][prefix + n] for j in range(len(sd))], + dim=0).to(get_accelerator().current_device_name()).contiguous()) + + load_parameters(module, prefix) + for n, child in module.named_children(): + load_parameters(child, prefix + n + '.') + else: + container.load_params(module, sd[0], weight_quantizer, mp_replace, prefix) + + try: + import transformers + OPTLearnedPositionalEmbedding = transformers.models.opt.modeling_opt.OPTLearnedPositionalEmbedding + if hasattr(transformers.models, "llama"): + LlamaRMSNorm = transformers.models.llama.modeling_llama.LlamaRMSNorm + else: + LlamaRMSNorm = None + except: + OPTLearnedPositionalEmbedding = None + try: + from fairscale.nn.model_parallel.layers import ( + ColumnParallelLinear, + ParallelEmbedding, + RowParallelLinear, + ) + except: + ColumnParallelLinear = None + ParallelEmbedding = None + RowParallelLinear = None + try: + from llama.model import RMSNorm + except: + RMSNorm = None + layer_policies = { + nn.Linear: load, + nn.Embedding: load, + nn.LayerNorm: load, + EmbeddingLayer: load, + LinearLayer: load, + Normalize: load, + transformer_inference.DeepSpeedTransformerInference: load_transformer_layer, + DeepSpeedBloomInference: load_transformer_layer, + DeepSpeedGPTInference: load_transformer_layer, + DeepSpeedBERTInference: load_transformer_layer, + DeepSpeedMegatronGPTInference: load_transformer_layer, + DeepSpeedOPTInference: load_transformer_layer, + DeepSpeedLlama2Inference: load_transformer_layer, + OPTLearnedPositionalEmbedding: load, + OPTEmbedding: load, + LlamaRMSNorm: load, + RMSNormalize: load, + ColumnParallelLinear: load, + ParallelEmbedding: load, + RowParallelLinear: load, + RMSNorm: load + } + + all_ds_ids = {} + + def load_module_recursive(module, prefix='', level=0): + for name, child in module.named_children(): + if child.__class__ in layer_policies: + checking_key = prefix + name + '.' + if not any(checking_key in item for item in sd[0].keys()): + if hasattr(child, 'weight') and \ + (hasattr(child.weight, 'ds_id') and \ + child.weight.ds_id in all_ds_ids): + prefix1 = all_ds_ids[child.weight.ds_id] + if child.__class__ is nn.Linear: + child = LinearLayer(weight=all_ds_ids[child.weight.ds_id]) + setattr(module, name, child) + continue + child_params = list(child.parameters()) + if len(child_params) > 0 and (child_params[0].numel() == 0 or child_params[0].is_meta): + if child.weight.is_meta: + ds_shape = child.weight.shape + else: + ds_shape = child.weight.ds_shape + if child.__class__ is nn.LayerNorm: + child = Normalize(dim=ds_shape[-1], dtype=child.weight.dtype, eps=child.eps) + setattr(module, name, child) + elif child.__class__ in [nn.Linear, ColumnParallelLinear, RowParallelLinear]: + child = LinearLayer(weight_shape=child.weight.shape, dtype=child.weight.dtype, bias=child.bias) + setattr(module, name, child) + elif child.__class__ is OPTLearnedPositionalEmbedding: + child = OPTEmbedding(weight_shape=ds_shape) + setattr(module, name, child) + elif child.__class__ in [LlamaRMSNorm, RMSNorm]: + child = RMSNormalize(dim=ds_shape[-1], + dtype=child.weight.dtype, + eps=child.eps if hasattr(child, 'eps') else child.variance_epsilon) + setattr(module, name, child) + else: + ds_id = None + if hasattr(child.weight, 'ds_id'): + ds_id = child.weight.ds_id + child = EmbeddingLayer(weight_shape=ds_shape, dtype=child.weight.dtype) + if ds_id is not None: + all_ds_ids[ds_id] = child.weight + setattr(module, name, child) + layer_policies[child.__class__](child, prefix + name + '.') + else: + load_module_recursive( + child, + prefix if (level == 0 and ckpt_type == 'pp') and skip_level_0_prefix else \ + prefix + name + '.', + level + 1) + + load_module_recursive(r_module) + + for sd_ in sd: + del sd_ + sd = None + gc.collect() diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/module_quantize.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/module_quantize.py new file mode 100644 index 0000000000000000000000000000000000000000..1f5b2f8a1d2861a50878e262a61b783a9324ad4e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/module_quantize.py @@ -0,0 +1,72 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + + +def quantize_transformer_layer(orig_layer_impl, model, megatron=False, preln=False): + """ Quantize bert-style transformer layers with DeepSpeed's transformer layer + Arguments: + orig_layer_impl (torch.nn.Module): the original transformer layer implementation to look for, + e.g., transformers.models.bert.modeling_bert.BertLayer or transformers.BertLayer + model (torch.nn.Module): user's nn.module representing their model + + megatron (bool): megatron model-parallel implementation (this is supported for inference only) + preln (bool): does the original layer implementation do pre or post layer norm? + + Note: For Bert kind of models, we inject based on the DeepSpeed-Example models, if not setting huggingface flag. + + Returns: + Updated nn.module with quantized transformer layers + """ + + def quantize_weight(weight): + return weight.to(torch.int8) + + def megatron_layer_quantize(layer): + layer.attention.query_key_value.weight.data = quantize_weight(layer.attention.query_key_value.weight.data) + layer.attention.dense.weight.data = quantize_weight(layer.attention.dense.weight.data) + layer.mlp.dense_h_to_4h.weight.data = quantize_weight(layer.mlp.dense_h_to_4h.weight.data) + layer.mlp.dense_4h_to_h.weight.data = quantize_weight(layer.mlp.dense_4h_to_h.weight.data) + + def bert_layer_quantize(layer): + layer.attention.self.query.weight.data = quantize_weight(layer.attention.self.query.weight.data) + layer.attention.self.key.weight.data = quantize_weight(layer.attention.self.key.weight.data) + layer.attention.self.value.weight.data = quantize_weight(layer.attention.self.value.weight.data) + layer.attention.output.dense.weight.data = quantize_weight(layer.attention.output.dense.weight.data) + if preln: + layer.intermediate.dense_act.weight.data = quantize_weight(layer.intermediate.dense_act.weight.data) + else: + layer.intermediate.dense.weight.data = quantize_weight(layer.intermediate.dense.weight.data) + layer.output.dense.weight.data = quantize_weight(layer.output.dense.weight.data) + + def quantize_fn(child): + if megatron: + # Quantize megatron GPT2 / GPT3 trained model + megatron_layer_quantize(child) + else: + # Quantize either DeepSpeed or HuggingFace trained model + bert_layer_quantize(child) + + return child + + return quantize_module(model=model, orig_class=orig_layer_impl, quantize_fn=quantize_fn) + + +def quantize_module(model, orig_class, quantize_fn): + policy = {orig_class: quantize_fn} + return _quantize_module(model, policy) + + +def _quantize_module(model, policies): + for name, child in model.named_children(): + if child.__class__ in policies: + orig = repr(child) + setattr(model, name, policies[child.__class__](child)) + new = getattr(model, name) + else: + _quantize_module(child, policies) + + return model diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/policy.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/policy.py new file mode 100644 index 0000000000000000000000000000000000000000..41df2b85dc0c16fb1b1c21942f5c963fcdc02ad1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/policy.py @@ -0,0 +1,224 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from abc import ABC, abstractmethod +from deepspeed.utils.types import ActivationFuncType, NormType +import torch +from deepspeed.accelerator import get_accelerator + +transformer_param_names = ( + 'attn_qkvw', \ + 'attn_qkvb', \ + 'attn_ow' , \ + 'attn_ob', \ + 'inter_w', \ + 'inter_b', \ + 'output_w', \ + 'output_b', \ + 'attn_nw', \ + 'attn_nb', \ + 'norm_w', \ + 'norm_b') + + +class DSPolicy(ABC): + _orig_layer_class = None + + def __init__(self): + self.cuda_graph_supported = False + + @abstractmethod + def attention(self): + """ + Returns attention qkv and dense parameters + weight: (3*hidden, hidden) and (hidden, hidden) + bias: (3*hidden) and (hidden) + """ + raise NotImplementedError + + +class TransformerPolicy(DSPolicy): + # a static class variable containing the HuggingFace model configuration. + # see e.g., transformers.models.opt.configuration_opt.OPTConfig + hf_model_config = None + + def __init__( + self, + inference=True, + linear_layer=True, + scale_attention=True, + megatron_v2=False, + use_mup=False, + # the type of activation function used in MLP + mlp_act_func_type=ActivationFuncType.GELU, + # applies layer norm before attention if `pre_attn_norm` is set to True + pre_attn_norm=True, + # this flag shows whether or not using prefix in loading the checkpoint + use_load_prefix=False, + # whether or not the qkv is stored in the split-format + split_qkv=True, + # Type of normalization to perform + norm_type=NormType.LayerNorm): + super().__init__() + self.cuda_graph_supported = False + self.inference = inference + self.linear_layer = linear_layer + self.scale_attention = scale_attention + self.is_megatron_v2 = megatron_v2 + self.use_mup = use_mup + self.mlp_act_func_type = mlp_act_func_type + self.pre_attn_norm = pre_attn_norm + self.use_load_prefix = use_load_prefix + self.split_qkv = split_qkv + self.norm_type = norm_type + + @abstractmethod + def attention(self): + """ + Returns attention qkv and dense parameters + weight: (3*hidden, hidden) and (hidden, hidden) + bias: (3*hidden) and (hidden) + """ + raise NotImplementedError + + @abstractmethod + def get_hidden_heads(self): + """ + return hidden_size and number of heads + """ + raise NotImplementedError + + @abstractmethod + def mlp(self): + """ + Returns mlp intermediate and output + weight: (intermediate, hidden) and (hidden, intermediate) + bias: (intermediate) and (hidden) + """ + raise NotImplementedError + + @abstractmethod + def layernorm(self): + """ + Returns LayerNorms used in transformer layer + Post-Attention and pre/post layer norm + gamma and beta with shape: (hidden) + """ + raise NotImplementedError + + +# TODO (lekurile): This function exists in base container as well, consolidate as some point +def transpose(data): + with torch.no_grad(): + data = data.contiguous() + data1 = data.transpose(-1, -2).reshape(-1) + data.reshape(-1).copy_(data1) + data1 = None + return data.reshape(data.shape[-1], data.shape[-2]) + + +# TODO (lekurile): This function exists in megatron feature container as well, consolidate as some point +def _transpose(x, heads=1, mp_replace=None): + heads = heads // mp_replace.mp_size # type: ignore + outer_dim = -1 + attention_head_size = x.shape[outer_dim] // heads + new_x_shape = x.size()[:outer_dim] + (heads, attention_head_size) + x_1 = x.view(*new_x_shape) + (q, k, v) = torch.split(x_1, (x_1.shape[-1] // 3), dim=-1) + if len(q.shape) > 2: + new_shape = (q.shape[0], ) + (-1, ) + return torch.cat((q.reshape(new_shape), k.reshape(new_shape), v.reshape(new_shape)), + dim=outer_dim).reshape(x.shape) + else: + return torch.cat((q.reshape(-1), k.reshape(-1), v.reshape(-1)), dim=-1).reshape(x.shape) + + +# This checks if the parameter exits in the checkpoint file and maybe copies it into the corresponding destination tensor. +# Note that not all parameters are saved in one checkpoint, that's why we always need to check if they exist! +def maybe_copy(module, + sd, + weight_quantizer, + mp_replace, + dst_name, + src_name, + qkv=False, + megatron_v2=False, + split_qkv=False, + heads=1): + if src_name in sd: + dst = getattr(module, dst_name) + tmp = sd[src_name] + if len(dst.shape) == 1: + if split_qkv: + dst = mp_replace.strided_copy(dst, tmp, num_splits=3) + else: + dst = mp_replace.copy(dst, tmp) + if qkv and megatron_v2: + dst = torch.nn.parameter.Parameter(_transpose(dst, heads=heads, mp_replace=mp_replace).contiguous()) + else: + if split_qkv: + dst = mp_replace.strided_copy(dst, weight_quantizer.quantize(tmp if weight_quantizer.q_int8 else \ + (transpose(tmp).contiguous())), num_splits=3, int8=weight_quantizer.q_int8) + else: + if qkv and megatron_v2: + tmp = _transpose(transpose(tmp), heads=heads, mp_replace=mp_replace).contiguous() + if weight_quantizer.q_int8: + tmp = transpose(tmp) + dst = mp_replace.copy(dst, weight_quantizer.quantize(tmp if weight_quantizer.q_int8 else \ + transpose(tmp)), int8=weight_quantizer.q_int8) + setattr(module, dst_name, dst) + + +# Extending the maybe_copy function for when the q, k, and v are in separate parameters! +def maybe_copy_qkv(module, sd, weight_quantizer, mp_replace, dst_name, src_names, split_qkv=False): + if src_names[0] in sd: + q = sd[src_names[0]] + k = sd[src_names[1]] + v = sd[src_names[2]] + qkv_data = torch.cat((q, k, v), dim=0) + dst = getattr(module, dst_name) + if len(dst.shape) == 1: + if split_qkv: + dst = mp_replace.strided_copy(dst, qkv_data.contiguous(), num_splits=3) + else: + dst = mp_replace.copy(dst, qkv_data) + else: + if split_qkv: + dst = mp_replace.strided_copy(dst, weight_quantizer.quantize(qkv_data.to(get_accelerator().device_name()) if weight_quantizer.q_int8 else \ + ((transpose(qkv_data)).contiguous())), num_splits=3, int8=weight_quantizer.q_int8) + else: + dst = mp_replace.copy(dst, weight_quantizer.quantize(qkv_data.to(get_accelerator().device_name()) if weight_quantizer.q_int8 else \ + transpose(qkv_data)), int8=weight_quantizer.q_int8) + setattr(module, dst_name, dst) + + +# Extending the `maybe_copy` function for when mlp1 is in separate parameters for GeGLU +def maybe_copy_geglu(module, sd, weight_quantizer, mp_replace, dst_name, src_names): + if src_names[0] in sd: + reg_proj = sd[src_names[0]] + gate_proj = sd[src_names[1]] + + mlp1_data = torch.cat((reg_proj, gate_proj), dim=0) + dst = getattr(module, dst_name) + + dst = mp_replace.strided_copy(dst, weight_quantizer.quantize(mlp1_data.to(get_accelerator().device_name()) if weight_quantizer.q_int8 else \ + transpose(mlp1_data)), num_splits=2, int8=weight_quantizer.q_int8) + setattr(module, dst_name, dst) + + +def pack_lora_weights(p): + return [ + p.lora_right_weight, \ + p.lora_left_weight, \ + p.lora_scaling + ] + + +def maybe_get_lora(p): + if hasattr(p, 'lora_right_weight'): + lora_param = pack_lora_weights(p) + else: + lora_param = [] + return lora_param diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/replace_module.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/replace_module.py new file mode 100644 index 0000000000000000000000000000000000000000..e1703562d1808922b91c687d35370793d9a9a096 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/replace_module.py @@ -0,0 +1,673 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import torch +import tqdm +import deepspeed +import deepspeed.ops.transformer as transformer_inference +from deepspeed.ops.transformer.inference.diffusers_attention import DeepSpeedDiffusersAttention +from deepspeed.ops.transformer.inference.diffusers_transformer_block import DeepSpeedDiffusersTransformerBlock +from deepspeed.ops.transformer.inference.diffusers_2d_transformer import Diffusers2DTransformerConfig +from deepspeed.accelerator import get_accelerator +from .replace_policy import replace_policies, generic_policies +from .auto_tp import AutoTP, ReplaceWithTensorSlicing, Loading + +from deepspeed import comm as dist +from deepspeed.module_inject.tp_shard import set_num_kv_heads, set_n_embd + +from .load_checkpoint import load_model_with_checkpoint +import time + +from .utils import policy_to_ds_container +import gc + + +def get_transformer_name(replaced_module): + from .containers import supported_models + from torch.nn import ModuleList + transformer_name = '' + for n, c in replaced_module.named_children(): + if c.__class__ in supported_models: + transformer_name += n + '.' + for name, child in c.named_children(): + if child.__class__ is ModuleList: + transformer_name += name + break + break + return transformer_name + + +class GroupQuantizer: + + def __init__(self, q_int8=True, group_size=1, num_bits=8, num_groups=0): + self.group_size = group_size + self.num_bits = num_bits + self.q_int8 = q_int8 + + self.num_groups = num_groups + + def quantize(self, inputs, qkv=True, count=1, parallel_dim=0): + if not self.q_int8 or not qkv: + inputs = torch.nn.Parameter(inputs, requires_grad=False) + inputs.scale = torch.empty(1) + return inputs + q_range = 2**self.num_bits + num_groups = self.num_groups if self.num_groups > 0 else inputs.shape[0] // self.group_size + inputs = inputs.to(get_accelerator().current_device_name()) + input_flat = inputs.reshape(num_groups, -1).contiguous() + input_min = torch.min(input_flat, dim=1, keepdim=True)[0].float() + input_max = torch.max(input_flat, dim=1, keepdim=True)[0].float() + scale = torch.max(input_min.abs(), input_max.abs()) * 2.0 / (q_range) + input_flat = (input_flat / scale).round().clamp(-q_range // 2, q_range // 2 - 1) + inputs_q = input_flat.reshape(inputs.shape).to(torch.int8).contiguous() + out = torch.nn.Parameter(inputs_q, requires_grad=False) + inputs_split = inputs.split(inputs.shape[parallel_dim] // 2, dim=parallel_dim) + input_flat = [inputs_split[i].reshape(num_groups, -1).contiguous() for i in range(2)] + input_min = [torch.min(input_flat[i], dim=1, keepdim=True)[0].float() for i in range(2)] + input_max = [torch.max(input_flat[i], dim=1, keepdim=True)[0].float() for i in range(2)] + scale1 = [(torch.max(input_min[i].abs(), input_max[i].abs()) * 2.0 / (q_range)).squeeze().unsqueeze(0) + for i in range(2)] + + out.scale = torch.cat([scale.squeeze().unsqueeze(0), scale1[0], scale1[1]], dim=0).reshape(num_groups, + -1).contiguous() + return out + + +def _module_match(module): + for policy in generic_policies: + policy = policy() + if policy.match(module): + return policy + return None + + +def generic_injection(module, dtype=None, enable_cuda_graph=True): + + def replace_attn(child, policy): + policy_attn = policy.attention(child) + if policy_attn is None: + return child + if len(policy_attn) == 5: + qkvw, attn_ow, attn_ob, hidden_size, heads = policy_attn + else: + qw, kw, vw, attn_ow, attn_ob, hidden_size, heads = policy_attn + + config = transformer_inference.DeepSpeedInferenceConfig( + hidden_size=hidden_size, + heads=heads, + dtype=dtype, + triangular_masking=False, + max_out_tokens=4096, + ) + attn_module = DeepSpeedDiffusersAttention(config) + + def transpose(data): + data = data.contiguous() + data.reshape(-1).copy_(data.transpose(-1, -2).contiguous().reshape(-1)) + data = data.reshape(data.shape[-1], data.shape[-2]) + data.to(get_accelerator().current_device_name()) + return data + + if len(policy_attn) == 5: + attn_module.attn_qkvw.data = transpose(qkvw.data) + else: + attn_module.attn_qkvw = None + attn_module.attn_qw.data = transpose(qw.data) + attn_module.attn_kw.data = transpose(kw.data) + attn_module.attn_vw.data = transpose(vw.data) + + attn_module.attn_qkvb = None + attn_module.attn_ow.data = transpose(attn_ow.data) + attn_module.attn_ob.data.copy_(attn_ob.data.to(get_accelerator().current_device_name())) + return attn_module + + def replace_attn_block(child, policy): + config = Diffusers2DTransformerConfig() + return DeepSpeedDiffusersTransformerBlock(child, config) + + if isinstance(module, torch.nn.Module): + pass + else: + if dtype not in [torch.float16, torch.half]: + raise ValueError("Generic injection only supported with FP16") + + try: + import diffusers + if hasattr(diffusers.models.attention, 'CrossAttention'): + cross_attention = diffusers.models.attention.CrossAttention + else: + cross_attention = diffusers.models.attention_processor.Attention + attention_block = diffusers.models.attention.BasicTransformerBlock + new_policies = { + cross_attention: replace_attn, + attention_block: replace_attn_block, + } + except ImportError: + new_policies = {} + + #replace_transformer_layer(None, + # module.text_encoder, + # training=False, + # replace_with_kernel_inject=True, + # triangular_masking=True, + # max_out_tokens=8192) + from ..model_implementations.transformers.clip_encoder import DSClipEncoder + cg_encoder = DSClipEncoder(module.text_encoder, enable_cuda_graph=enable_cuda_graph) + setattr(module, 'text_encoder', cg_encoder) + for name in module.__dict__.keys(): + sub_module = getattr(module, name) + policy = _module_match(sub_module) + + if policy is not None: + + def _replace_module(module, policy): + for name, child in module.named_children(): + _replace_module(child, policy) + if child.__class__ in new_policies: + replaced_module = new_policies[child.__class__](child, policy) + setattr(module, name, replaced_module) + + _replace_module(sub_module, policy) + new_module = policy.apply(sub_module, enable_cuda_graph=enable_cuda_graph) + print(f"**** found and replaced {name} w. {type(new_module)}") + setattr(module, name, new_module) + + +container_g = None + + +def replace_transformer_layer(orig_layer_impl, model, checkpoint_dict, config, model_config): + """ Replace bert-style transformer layers with DeepSpeed's transformer layer + Arguments: + orig_layer_impl (torch.nn.Module): the original transformer layer implementation to look for, + e.g., transformers.models.bert.modeling_bert.BertLayer or transformers.BertLayer + model (torch.nn.Module): user's nn.module representing their model + checkpoint_dict: Dictionary for checkpoint passed from the Inference Engine + config: top-level DS Inference config defined in inference/config.py + model_config: HuggingFace model config passed from the inference/engine.py + Returns: + Updated nn.module with replaced transformer layers + """ + # defining globals as internally defined functions inherit these everywhere + quantize = (config.dtype == torch.int8) + # todo: Refactor later. In future, let's minimize the style used above and use config.** instead + + linear_layer_setting = None + ''' + linear_layer_setting (tuple of modules) [Optional]: shows which two classes are used for linear layers and embedding layers + ''' + micro_batch_size = -1 + seed = -1 + local_rank = -1 + + mp_replace = ReplaceWithTensorSlicing(mp_group=config.tensor_parallel.tp_group, + mp_size=config.tensor_parallel.tp_size) #, out_dim=0, in_dim=1) + + def replace_with_policy(child, policy_cls, triangular_masking, inference=False, layer_id=0): + policy = policy_cls(child, inference=inference) + if not policy.cuda_graph_supported: + # policy says cuda graph is not supported raise an error if set + assert not config.enable_cuda_graph, "cuda graph is not supported with this model, please disable" + + from deepspeed.moe.layer import MoE + moe = False + if hasattr(child, 'mlp') and isinstance(child.mlp, MoE): + num_experts = child.mlp.num_experts + moe = True + + # 1. Create a model-specific container object using the policy object. + _container = policy_to_ds_container(policy=policy, + config=config, + model_config=model_config, + layer_id=layer_id, + child=child) + _container.set_moe(moe) + + # 2. Set the tensor parallelism config + _container.set_tensor_parallel_config(config.tensor_parallel.tp_size, config.tensor_parallel.tp_group) + + # 3. Initialize tensors + _container.initialize_tensors() + + # 4. deal with data types -- needs refactor to use dtype instead of fp16 + if config.dtype in [torch.float16, torch.bfloat16, torch.int8]: + _container.convert_to_required_dtype() + + # 5. Set the quantization config + quantizer = GroupQuantizer(q_int8=quantize) + _container.set_quantization_config(quantizer) + + # 6. create a DS Inference config object + _container.create_ds_model_config() + + # 7. use the config and create the module + _container.create_module() + + # 8. transpose the weights and bias if needed + _container.transpose() + + # 9. deal with tensor parallelism. + _container.apply_tensor_parallelism(mp_replace) + + # 10. copy the tensors from the model-specific container to the new module + _container.copy_data_to_new_module() + + # 11. set global for generic checkpoint loading + global container_g + + if container_g is None: + container_g = _container + + return _container.module + + def replace_wo_policy(module, all_reduce_linears, prefix="", state_dict=None): + #mp_replace = ReplaceWithTensorSlicing(mp_group=config.tensor_parallel.tp_group) + + # 1. Create AutoTP object + _autotp = AutoTP(module, all_reduce_linears, prefix, state_dict, linear_layer_setting, orig_layer_impl) + + # 2. Set the tensor parallelism config + _autotp.set_tensor_parallel_config(config.tensor_parallel.tp_size, config.tensor_parallel.tp_group) + + # 3. Try to get num_key_heads from model_config.num_key_value_heads + num_kv_heads = _autotp.get_model_num_kv_heads(model_config) + + # 4. When we have num_kv_heads defined, uneven division is possible, otherwise enforce even division + set_num_kv_heads(num_kv_heads) + + # 4.1 Get n_embd + n_embd = None + multi_query_n_embd_names = ['n_embd'] + for name in multi_query_n_embd_names: + if hasattr(model_config, name): + n_embd = getattr(model_config, name) + if n_embd != None: + break + + # 4.2 set n_embd + set_n_embd(n_embd) + + # 5. Set linear policies + _autotp.update_linear_policies() + + # 6. Replace modules + if "lm_head" in all_reduce_linears or "embed_out" in all_reduce_linears: + return _autotp._replace_last_linear_module(module) + return _autotp._replace_module(module) + + def replace_fn(child, _policy, layer_id=0, prefix="", state_dict=None): + training = False # todo: refactor this part to go in the config + if training: + # copy relevant state from child -> new module + new_module = replace_with_policy(child, _policy, config.triangular_masking) + + else: + # copy relevant state from child -> new module + if config.replace_with_kernel_inject: + new_module = replace_with_policy(child, + _policy, + config.triangular_masking, + inference=True, + layer_id=layer_id) + else: + new_module = replace_wo_policy(child, _policy, prefix=prefix, state_dict=state_dict) + + return new_module + + def set_lm_head(module): + embedding_weight = None + for n, p in module.named_parameters(): + if "word_embeddings." in n or "embed_tokens." in n or "wte." in n: + embedding_weight = p + if embedding_weight is not None and hasattr(module, "lm_head") and hasattr( + module.lm_head, "weight") and module.lm_head.weight.is_meta: + module.lm_head.weight = embedding_weight + # enable tensor parallel for the last linear + if hasattr(module, "lm_head") and hasattr(module.lm_head, + "weight") and not module.lm_head.weight.is_meta and isinstance( + module.lm_head, torch.nn.Linear): + module = replace_wo_policy(module, ("lm_head", ), 0, "lm_head") + elif hasattr(module, "embed_out") and hasattr(module.embed_out, + "weight") and not module.embed_out.weight.is_meta and isinstance( + module.embed_out, torch.nn.Linear): + module = replace_wo_policy(module, ("embed_out", ), 0, "embed_out") + return module + + if checkpoint_dict is not None and not config.replace_with_kernel_inject: + # AutoTP shard loading + checkpoint = checkpoint_dict["checkpoints"] + pbar = tqdm.tqdm(total=len(checkpoint), desc=f"Loading {len(checkpoint)} checkpoint shards") + for i in range(len(checkpoint)): + checkpoint_file = os.path.join(config.base_dir, checkpoint[i]) + replaced_module = replace_module(model=model, + orig_class=orig_layer_impl, + replace_fn=replace_fn, + _replace_policy=config.injection_policy_tuple, + checkpoint=checkpoint_file) + pbar.update(1) + gc.collect() + replaced_module = set_lm_head(replaced_module) + else: + replaced_module = replace_module(model=model, + orig_class=orig_layer_impl, + replace_fn=replace_fn, + _replace_policy=config.injection_policy_tuple) + + quantizer = GroupQuantizer(q_int8=quantize) + world_size = dist.get_world_size() if dist.is_initialized() else 1 + rank = dist.get_rank() if dist.is_initialized() else 0 + if checkpoint_dict is not None and config.replace_with_kernel_inject: + assert container_g.ckpt_load_enabled, \ + f"Meta Tensor checkpoint loading not supported in {container_g.__class__.__name__} container" + start_time = time.time() + checkpoint = checkpoint_dict['checkpoints'] + ckpt_list = checkpoint["tp"] if type(checkpoint) is dict else checkpoint + ckpt_type = checkpoint_dict.get('parallelization', 'pp') + ckpt_mp_size = checkpoint_dict.get('tp_size', len(ckpt_list)) + ckpt_mp_size = checkpoint_dict.get('mp_size', ckpt_mp_size) + base_dir1 = checkpoint_dict.get('base_dir', config.base_dir) + + if ckpt_type == 'pp' and type(checkpoint) is list: + pbar = tqdm.tqdm(total=len(checkpoint), desc=f"Loading {len(checkpoint)} checkpoint shards") + + for i in range(len(checkpoint)): + sd = [torch.load(os.path.join(base_dir1, checkpoint[i]), map_location='cpu')] + load_model_with_checkpoint(replaced_module, + sd, + mp_replace, + ckpt_type, + ckpt_mp_size, + quantizer, + container=container_g) + pbar.update(1) + else: + num_checkpoints = len(ckpt_list) // ckpt_mp_size + tp_split_size = (world_size / ckpt_mp_size) + sd_offset = int(rank / tp_split_size) + sd_count = int((rank + max(1, tp_split_size)) / tp_split_size) - sd_offset + pbar = tqdm.tqdm(total=num_checkpoints, desc=f"Loading {num_checkpoints} checkpoint shards") + for i in range(num_checkpoints): + pbar.update(1) + ckpt_index = i * ckpt_mp_size + sd_offset + ckpt_files = [ + os.path.join(base_dir1, ckpt_list[ckpt_index + j]) if base_dir1 else ckpt_list[ckpt_index + j] + for j in range(sd_count) + ] + sds = [torch.load(ckpt_file, map_location='cpu') for ckpt_file in ckpt_files] + load_model_with_checkpoint(replaced_module, + sds, + mp_replace, + ckpt_type, + ckpt_mp_size, + quantizer, + int(rank % tp_split_size), + container=container_g) + sds = [None for _ in sds] + gc.collect() + + if "non_tp" in checkpoint: + pbar = tqdm.tqdm(total=len(checkpoint["non_tp"]), + desc=f"Loading {len(checkpoint['non_tp'])} checkpoint shards") + + for i in range(len(checkpoint["non_tp"])): + pbar.update(1) + ckpt_file = os.path.join(base_dir1, + checkpoint["non_tp"][i]) if base_dir1 else checkpoint["non_tp"][i] + sds = [torch.load(ckpt_file, map_location='cpu')] + load_model_with_checkpoint(replaced_module, + sds, + mp_replace, + ckpt_type, + ckpt_mp_size, + quantizer, + int(rank % tp_split_size), + container=container_g) + sds = [None for _ in sds] + gc.collect() + set_lm_head(replaced_module) + print(f"checkpoint loading time at rank {rank}: {time.time()-start_time} sec") + + if config.save_mp_checkpoint_path is not None: + from collections import OrderedDict + import json + num_partitions = 8 + + if checkpoint_dict is None: + ckpt_name = "ds_model" + try: + from transformers.models.bloom.modeling_bloom import BloomForCausalLM + if isinstance(model, BloomForCausalLM): + ckpt_name = "bloom" + except ImportError: + ckpt_name = "ds_model" + else: + ckpt_name = checkpoint_dict['type'] + if dist.is_initialized(): + dist.barrier() + transformer_name = get_transformer_name(replaced_module) + non_tp_ckpt_name = f'non-tp.pt' + ckpt_files = [non_tp_ckpt_name] + os.makedirs(config.save_mp_checkpoint_path, exist_ok=True) + + if not dist.is_initialized() or dist.get_rank() == 0: + print("Saving tp-sharded checkpoints") + torch.save( + OrderedDict({k: v + for k, v in dict(replaced_module.state_dict()).items() + if transformer_name not in k}), f'{config.save_mp_checkpoint_path}/{non_tp_ckpt_name}') + + dtype_reprs = { + torch.float32: 'float32', + torch.float16: 'float16', + torch.int8: 'int8', + torch.bfloat16: 'bfloat16' + } + + ckpt_config = json.dumps({ + 'type': ckpt_name, + 'base_dir': f'{config.save_mp_checkpoint_path}', + 'checkpoints': { + "non_tp": ckpt_files, + "tp": [f'tp_{r:0>2d}_{m:0>2d}.pt' for m in range(num_partitions) for r in range(world_size)] + }, + 'version': 1.0, + 'parallelization': 'tp', + 'tp_size': world_size, + 'dtype': dtype_reprs[config.dtype] + }) + with open(f"{config.save_mp_checkpoint_path}/ds_inference_config.json", "w") as cfg: + cfg.write(ckpt_config) + + rep_sd = replaced_module.state_dict() + for n, p in replaced_module.named_parameters(): + if hasattr(p, 'scale'): + rep_sd[n] = [p, p.scale] + keys = list(rep_sd.keys()) + partition_size = (len(keys) // num_partitions + 1) + for m in range(num_partitions): + torch.save( + OrderedDict({ + k: [rep_sd[k], rep_sd[k].scale] if hasattr(rep_sd[k], 'scale') else rep_sd[k] + for k in keys[m * partition_size:(m + 1) * partition_size] if transformer_name in k + }), f'{config.save_mp_checkpoint_path}/tp_{rank:0>2d}_{m:0>2d}.pt') + + return replaced_module + + +def revert_transformer_layer(orig_layer_impl, model, config, preln=False): + """ Revert DeepSpeed's transformer layer back to original bert-style transformer layer + Arguments: + orig_layer_impl (torch.nn.Module): the original transformer layer implementation that was replaced, + e.g., transformers.models.bert.modeling_bert.BertLayer or transformers.BertLayer + model (torch.nn.Module): user's nn.module representing their model + config (dict): model config containing hidden size, attention heads, etc. + Returns: + Updated nn.module with original bert-style transformer layers + """ + + def replace_fn(child, _replace_policy, layer_id): + #from turing.nvidia_modelingpreln import BertLayer + orig_module = orig_layer_impl(config) + + # copy relevant state from child -> original module + qkvw = child.attn_qkvw.data + qkvb = child.attn_qkvb.data + + qw, kw, vw = torch.chunk(qkvw, 3, axis=0) + qb, kb, vb = torch.chunk(qkvb, 3, axis=0) + + orig_module.attention.self.query.weight.data = qw + orig_module.attention.self.query.bias.data = qb + orig_module.attention.self.key.weight.data = kw + orig_module.attention.self.key.bias.data = kb + orig_module.attention.self.value.weight.data = vw + orig_module.attention.self.value.bias.data = vb + + orig_module.attention.output.dense.weight.data = child.attn_ow.data + orig_module.attention.output.dense.bias.data = child.attn_ob.data + + attn_ln_w = child.attn_nw.data + attn_ln_b = child.attn_nb.data + if preln: + orig_module.PostAttentionLayerNorm.weight.data = attn_ln_w + orig_module.PostAttentionLayerNorm.bias.data = attn_ln_b + else: + orig_module.attention.output.LayerNorm.weight.data = attn_ln_w + orig_module.attention.output.LayerNorm.bias.data = attn_ln_b + + inter_ff_w = child.inter_w.data + inter_ff_b = child.inter_b.data + if preln: + orig_module.intermediate.dense_act.weight.data = inter_ff_w + orig_module.intermediate.dense_act.bias.data = inter_ff_b + else: + orig_module.intermediate.dense.weight.data = inter_ff_w + orig_module.intermediate.dense.bias.data = inter_ff_b + + orig_module.output.dense.weight.data = child.output_w.data + orig_module.output.dense.bias.data = child.output_b.data + + transformer_ln_w = child.norm_w.data + transformer_ln_b = child.norm_b.data + if preln: + orig_module.PreAttentionLayerNorm.weight.data = transformer_ln_w + orig_module.PreAttentionLayerNorm.bias.data = transformer_ln_b + else: + orig_module.output.LayerNorm.weight.data = transformer_ln_w + orig_module.output.LayerNorm.bias.data = transformer_ln_b + return orig_module + + return replace_module(model=model, + orig_class=deepspeed.DeepSpeedTransformerLayer, + replace_fn=replace_fn, + _replace_policy=None) + + +def replace_module(model, orig_class, replace_fn, _replace_policy, checkpoint=None): + """ Scan the model for instances of ``orig_clas:`` to replace using ``replace_fn``. + Arguments: + model (torch.nn.Module): the model to augment + orig_class (torch.nn.Module): the module to search for + replace_fn (method): a method to convert instances of ``orig_class`` to the + desired type and return a new instance. + Returns: + A modified ``model``. + """ + sd = None + if checkpoint is not None: + if checkpoint.endswith(".safetensors"): + from safetensors.torch import load_file + sd = load_file(checkpoint) + else: + sd = torch.load(checkpoint, map_location='cpu') + + policy = {} + if orig_class is not None: + policy.update({orig_class: (replace_fn, _replace_policy)}) + else: + for plcy in replace_policies: + # instantiate a throw-away policy in order to populate the _orig_layer_class + _ = plcy(None) + if isinstance(plcy._orig_layer_class, list): + for orig_layer_class in plcy._orig_layer_class: + policy.update({orig_layer_class: (replace_fn, plcy)}) + elif plcy._orig_layer_class is not None: + policy.update({plcy._orig_layer_class: (replace_fn, plcy)}) + assert len(policy.items()) > 0,\ + "No default policy found! Please specify your policy injection_policy (like {BertLayer:HFBEertLayerPolicy})." +\ + "You can find some samples here: https://github.com/microsoft/DeepSpeed/blob/master/deepspeed/module_inject/replace_policy.py" + + replaced_module, _ = _replace_module(model, policy, state_dict=sd) + return replaced_module + + +from ..pipe import PipelineModule + +import re + + +def skip_level_0_prefix(model, state_dict): + model = str(model) + key = re.search(r": (.*?)Model", model) + if key is None: + key = re.search(r": (.*?)Stack", model) + if key is None: + key = re.match(r"(.*?)Model", model) + # if keys start with 'model.', don't skip level 0 prefix + if state_dict is not None: + for item in state_dict.keys(): + if re.match("^model[.]", item): + return False + if key is not None and key.group(1).lower() in ["bloom", "opt"]: + return True + return False + + +def _replace_module(model, policies, prefix='', layer_id=0, level_id=0, state_dict=None): + """ Traverse model's children recursively and apply any transformations in ``policies``. + Arguments: + model (torch.nn.Module): model to augment + policies (dict): Mapping of source class to replacement function. + Returns: + Modified ``model``. + """ + for name, child in model.named_children(): + if child.__class__ in policies: + replaced_module = policies[child.__class__][0](child, + policies[child.__class__][-1], + layer_id, + prefix=prefix + name, + state_dict=state_dict) + setattr(model, name, replaced_module) + if isinstance(model, PipelineModule): + assert hasattr(model, 'forward_funcs'),\ + "we require pipe-module to have the list of fwd_functions" + model.forward_funcs[model.fwd_map[name]] = replaced_module + layer_id += 1 + else: + checking_key = prefix + name + '.' + if Loading.is_load_module(child) and state_dict is not None: + if any(checking_key in item for item in state_dict): + Loading.load( + child, + state_dict, + checking_key, + ) + else: + continue + if len(child._buffers) != 0 and state_dict is not None: + Loading.load_buffer(child, state_dict, checking_key) + _, layer_id = _replace_module(child, + policies, + prefix if level_id == 0 and skip_level_0_prefix(model, state_dict) else \ + prefix + name + '.', + layer_id=layer_id, + level_id=level_id + 1, + state_dict=state_dict) + + # Add the reset_cache func to the model, so that it can be called in the beginning of text-generation. + model.reset_cache = transformer_inference.DeepSpeedTransformerInference.reset_cache + return model, layer_id diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/replace_policy.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/replace_policy.py new file mode 100644 index 0000000000000000000000000000000000000000..2c06e31aaa41ae61b5091a7766b709fac2c534f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/replace_policy.py @@ -0,0 +1,30 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .containers import HFGPT2LayerPolicy +from .containers import HFBertLayerPolicy +from .containers import BLOOMLayerPolicy +from .containers import HFGPTJLayerPolicy +from .containers import HFGPTNEOLayerPolicy +from .containers import GPTNEOXLayerPolicy +from .containers import HFOPTLayerPolicy +from .containers import MegatronLayerPolicy +from .containers import HFDistilBertLayerPolicy +from .containers import HFCLIPLayerPolicy +from .containers import LLAMALayerPolicy +from .containers import UNetPolicy +from .containers import VAEPolicy +from .containers import LLAMA2LayerPolicy +from .containers import InternLMLayerPolicy + +# transformer-based policies +replace_policies = [ + HFBertLayerPolicy, HFGPTNEOLayerPolicy, GPTNEOXLayerPolicy, HFGPTJLayerPolicy, MegatronLayerPolicy, + HFGPT2LayerPolicy, BLOOMLayerPolicy, HFOPTLayerPolicy, HFCLIPLayerPolicy, HFDistilBertLayerPolicy, + LLAMALayerPolicy, LLAMA2LayerPolicy, InternLMLayerPolicy +] + +# non-transformer-based policies +generic_policies = [UNetPolicy, VAEPolicy] diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/tp_shard.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/tp_shard.py new file mode 100644 index 0000000000000000000000000000000000000000..79c19b5f12722784633e78c4a78834f0efaecad8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/tp_shard.py @@ -0,0 +1,52 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed import comm as dist +global num_kv_heads + + +def set_num_kv_heads(num): + global num_kv_heads + num_kv_heads = num + + +def set_n_embd(num): + global n_embd + n_embd = num + + +def get_num_kv_heads(): + global num_kv_heads + return num_kv_heads + + +def get_shard_size(total_size, mp_size, name=None, rank=None): + global num_kv_heads + last_linear = ["lm_head", "embed_out"] + # When we have num_kv_heads defined, uneven division is possible, otherwise enforce near even division + if rank == None: + rank = dist.get_rank() + if num_kv_heads != None and total_size % num_kv_heads == 0 and "mlp" not in str(name) and str( + name) not in last_linear: + my_slices = (num_kv_heads // mp_size) + (1 if rank < (num_kv_heads % mp_size) else 0) + return total_size * my_slices // num_kv_heads + else: + if total_size >= 64: + grain_size = total_size // 64 + return (grain_size // mp_size + (1 if rank < (grain_size % mp_size) else 0)) * 64 + else: + return total_size // mp_size + (1 if rank < (total_size % mp_size) else 0) + + +def get_n_embd(): + global n_embd + return n_embd + + +def get_shard_size_list(total_size, mp_size, name=None): + shard_sizes = [] + for i in range(mp_size): + shard_sizes.append(get_shard_size(total_size, mp_size, name, i)) + return shard_sizes diff --git a/venv/lib/python3.10/site-packages/deepspeed/module_inject/utils.py b/venv/lib/python3.10/site-packages/deepspeed/module_inject/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..42822128f9e11c84660dcd57a2473753b3bc6642 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/module_inject/utils.py @@ -0,0 +1,49 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.utils import log_dist + + +# helper function to map between DS policies and DS containers +def policy_to_ds_container(**kwargs): + from .containers import HFGPT2LayerPolicy, DS_GPT2Container + from .containers import HFBertLayerPolicy, DS_BERTContainer + from .containers import BLOOMLayerPolicy, DS_BloomContainer + from .containers import HFGPTJLayerPolicy, DS_GPTJContainer + from .containers import HFGPTNEOLayerPolicy, DS_GPTNEOContainer + from .containers import GPTNEOXLayerPolicy, DS_GPTNEOXContainer + from .containers import HFOPTLayerPolicy, DS_OPTContainer + from .containers import MegatronLayerPolicy, DS_MegatronGPTContainer + from .containers import HFDistilBertLayerPolicy, DS_DistilBERTContainer + from .containers import LLAMALayerPolicy, DS_LLAMAContainer + from .containers import LLAMA2LayerPolicy, DS_LLAMA2Container + from .containers import InternLMLayerPolicy, DS_InternLMContainer + + policy_to_container = { + HFGPT2LayerPolicy: DS_GPT2Container, + HFBertLayerPolicy: DS_BERTContainer, + BLOOMLayerPolicy: DS_BloomContainer, + HFGPTJLayerPolicy: DS_GPTJContainer, + HFGPTNEOLayerPolicy: DS_GPTNEOContainer, + GPTNEOXLayerPolicy: DS_GPTNEOXContainer, + HFOPTLayerPolicy: DS_OPTContainer, + MegatronLayerPolicy: DS_MegatronGPTContainer, + HFDistilBertLayerPolicy: DS_DistilBERTContainer, + LLAMALayerPolicy: DS_LLAMAContainer, + LLAMA2LayerPolicy: DS_LLAMA2Container, + InternLMLayerPolicy: DS_InternLMContainer + } + + container = None + policy = kwargs['policy'] + assert policy is not None, "Policy cannot be None" + policy_type = type(policy) + + if policy_type not in policy_to_container: + log_dist(f"Policy type {policy_type} not supported", [0]) + else: + container = policy_to_container[policy_type](**kwargs) + + return container diff --git a/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a34411e3665437d1e6e03b86976f87f7a685ddfd Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6eea91ac2e0960a197ef818920ac74bc1eb1c49 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/csv_monitor.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/csv_monitor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..093df18c0863a2abfe92f946073054aa390e0707 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/csv_monitor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/monitor.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/monitor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..911db5db2311e46cac3f47e14ec3eef69446dbd7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/monitor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/tensorboard.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/tensorboard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b4bccd36d1433ae3c3b559e7a13c7c54d02bb2ab Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/tensorboard.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c302bae4e820599c473811296b0828e953ad768 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/wandb.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/wandb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c75133a6e12bb6d03134ff899d6ceb495f244610 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/monitor/__pycache__/wandb.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/monitor/monitor.py b/venv/lib/python3.10/site-packages/deepspeed/monitor/monitor.py new file mode 100644 index 0000000000000000000000000000000000000000..5a32b8bbcadd753ea592e1da28083e1a4c9c5f25 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/monitor/monitor.py @@ -0,0 +1,53 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Support different forms of monitoring such as wandb and tensorboard +""" + +from abc import ABC, abstractmethod +import deepspeed.comm as dist + + +class Monitor(ABC): + + @abstractmethod + def __init__(self, monitor_config): + self.monitor_config = monitor_config + + @abstractmethod + def write_events(self, event_list): + pass + + +from .wandb import WandbMonitor +from .tensorboard import TensorBoardMonitor +from .csv_monitor import csvMonitor + + +class MonitorMaster(Monitor): + + def __init__(self, monitor_config): + super().__init__(monitor_config) + self.tb_monitor = None + self.wandb_monitor = None + self.csv_monitor = None + self.enabled = monitor_config.enabled + + if dist.get_rank() == 0: + if monitor_config.tensorboard.enabled: + self.tb_monitor = TensorBoardMonitor(monitor_config.tensorboard) + if monitor_config.wandb.enabled: + self.wandb_monitor = WandbMonitor(monitor_config.wandb) + if monitor_config.csv_monitor.enabled: + self.csv_monitor = csvMonitor(monitor_config.csv_monitor) + + def write_events(self, event_list): + if dist.get_rank() == 0: + if self.tb_monitor is not None: + self.tb_monitor.write_events(event_list) + if self.wandb_monitor is not None: + self.wandb_monitor.write_events(event_list) + if self.csv_monitor is not None: + self.csv_monitor.write_events(event_list) diff --git a/venv/lib/python3.10/site-packages/deepspeed/monitor/tensorboard.py b/venv/lib/python3.10/site-packages/deepspeed/monitor/tensorboard.py new file mode 100644 index 0000000000000000000000000000000000000000..985c9ed44b6f5be28785699bca4a3638ed6063bd --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/monitor/tensorboard.py @@ -0,0 +1,56 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .utils import check_tb_availability +from .monitor import Monitor +import os + +import deepspeed.comm as dist + + +class TensorBoardMonitor(Monitor): + + def __init__(self, tensorboard_config): + super().__init__(tensorboard_config) + check_tb_availability() + + self.summary_writer = None + self.enabled = tensorboard_config.enabled + self.output_path = tensorboard_config.output_path + self.job_name = tensorboard_config.job_name + + if self.enabled and dist.get_rank() == 0: + self.get_summary_writer() + + def get_summary_writer(self, base=os.path.join(os.path.expanduser("~"), "tensorboard")): + if self.enabled and dist.get_rank() == 0: + from torch.utils.tensorboard import SummaryWriter + if self.output_path is not None: + log_dir = os.path.join(self.output_path, self.job_name) + # NOTE: This code path currently is never used since the default output_path is an empty string and not None. Saving it in case we want this functionality in the future. + else: + if "DLWS_JOB_ID" in os.environ: + infra_job_id = os.environ["DLWS_JOB_ID"] + elif "DLTS_JOB_ID" in os.environ: + infra_job_id = os.environ["DLTS_JOB_ID"] + else: + infra_job_id = "unknown-job-id" + + summary_writer_dir_name = os.path.join(infra_job_id, "logs") + log_dir = os.path.join(base, summary_writer_dir_name, self.output_path) + os.makedirs(log_dir, exist_ok=True) + self.summary_writer = SummaryWriter(log_dir=log_dir) + return self.summary_writer + + def write_events(self, event_list, flush=True): + if self.enabled and self.summary_writer is not None and dist.get_rank() == 0: + for event in event_list: + self.summary_writer.add_scalar(*event) + if flush: + self.summary_writer.flush() + + def flush(self): + if self.enabled and self.summary_writer is not None and dist.get_rank() == 0: + self.summary_writer.flush() diff --git a/venv/lib/python3.10/site-packages/deepspeed/nebula/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/nebula/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6c5067f71c8faf166bc78e88f9b62e8627dda7c7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/nebula/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +'''Copyright The Microsoft DeepSpeed Team''' diff --git a/venv/lib/python3.10/site-packages/deepspeed/nebula/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/nebula/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bfad1a8cc5334713e961ccc666f5489c35c78372 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/nebula/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/nebula/__pycache__/config.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/nebula/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..427051dfb7b0ecb7d3b81450569e19ee0dd1a4ac Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/nebula/__pycache__/config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/nebula/__pycache__/constants.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/nebula/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1eb43503201d2b75fb7cb8b9e5627387815c5b97 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/nebula/__pycache__/constants.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/nebula/config.py b/venv/lib/python3.10/site-packages/deepspeed/nebula/config.py new file mode 100644 index 0000000000000000000000000000000000000000..dc49185738c92a3173f2fd5c68f1d6ab5a32dd92 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/nebula/config.py @@ -0,0 +1,43 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from deepspeed.runtime.config_utils import get_scalar_param, DeepSpeedConfigObject +from deepspeed.nebula.constants import * + + +class DeepSpeedNebulaConfig(DeepSpeedConfigObject): + + def __init__(self, param_dict): + super(DeepSpeedNebulaConfig, self).__init__() + + self.enabled = None + self.persistent_storage_path = None + self.persistent_time_interval = None + self.num_of_version_in_retention = None + self.enable_nebula_load = None + + if NEBULA in param_dict.keys(): + nebula_dict = param_dict[NEBULA] + else: + nebula_dict = {} + + self._initialize(nebula_dict) + + def _initialize(self, nebula_dict): + self.enabled = get_scalar_param(nebula_dict, NEBULA_ENABLED, NEBULA_ENABLED_DEFAULT) + + self.load_path = get_scalar_param(nebula_dict, NEBULA_LOAD_PATH, NEBULA_LOAD_PATH_DEFAULT) + + self.enable_nebula_load = get_scalar_param(nebula_dict, NEBULA_ENABLE_NEBULA_LOAD, + NEBULA_ENABLE_NEBULA_LOAD_DEFAULT) + + self.persistent_storage_path = get_scalar_param(nebula_dict, NEBULA_PERSISTENT_STORAGE_PATH, + NEBULA_PERSISTENT_STORAGE_PATH_DEFAULT) + + self.persistent_time_interval = get_scalar_param(nebula_dict, NEBULA_PERSISTENT_TIME_INTERVAL, + NEBULA_PERSISTENT_TIME_INTERVAL_DEFAULT) + + self.num_of_version_in_retention = get_scalar_param(nebula_dict, NEBULA_NUM_OF_VERSION_IN_RETENTION, + NEBULA_NUM_OF_VERSION_IN_RETENTION_DEFAULT) diff --git a/venv/lib/python3.10/site-packages/deepspeed/nebula/constants.py b/venv/lib/python3.10/site-packages/deepspeed/nebula/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..9fa5769b55979e4dcd5c80ead06d2117dcc2ec40 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/nebula/constants.py @@ -0,0 +1,73 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +######################################### +# nebula +######################################### +# Nebula. By default, this feature is not enabled. +# Users can configure in ds_config.json as below example: +NEBULA_FORMAT = ''' +nebula should be enabled as: +"session_params": { + "nebula": { + "enabled": true, + "persistent_storage_path": "/foo/bar", + "persistent_time_interval": 100, + "num_of_version_in_retention": 2, + "enable_nebula_load": true + } +} +''' + +NEBULA = "nebula" + +NEBULA_ENABLED = "enabled" +NEBULA_ENABLED_DEFAULT = False + +# There is a case where customer want to load the checkpoint saved +# by raw torch. Because nebula cannot load torch checkpoint directly +# as they have different folder structures to bring the gap for +# loading(the data are totally same in bytes for torch and nebula +# saving). +# In this case, we must disable nebula load to use raw torch load. +# Customer can just set NEBULA_ENABLE_NEBULA_LOAD to False. Then use +# original way of deepspeed to load, i.e. set the value of "--load". +NEBULA_ENABLE_NEBULA_LOAD = "enable_nebula_load" +NEBULA_ENABLE_NEBULA_LOAD_DEFAULT = True + +# When you want to resume the previous checkpoint saved by nebula, +# you can set NEBULA_LOAD_PATH as the parent folder of checkpoint. +# If NEBULA_LOAD_PATH is None, the NEBULA_PERSISTENT_STORAGE_PATH +# will be the default path to load. +NEBULA_LOAD_PATH = "nebula_load_path" +NEBULA_LOAD_PATH_DEFAULT = None + +# Nebula will save the checkpoint under NEBULA_LOAD_PATH in the +# asynchronous way. +NEBULA_PERSISTENT_STORAGE_PATH = "persistent_storage_path" +NEBULA_PERSISTENT_STORAGE_PATH_DEFAULT = None + +# Time interval to trigger the nebula persistence. +NEBULA_PERSISTENT_TIME_INTERVAL = "persistent_time_interval" +NEBULA_PERSISTENT_TIME_INTERVAL_DEFAULT = 100 + +# Checkpoint number which will be kept in memory. Let us say, +# if the value is 2. Then we have checkpoints 1 and 2 are ready +# now. When it comes to checkpoint 3, the 1 will be removed if +# 1 has been persisted to disk. +NEBULA_NUM_OF_VERSION_IN_RETENTION = "num_of_version_in_retention" +NEBULA_NUM_OF_VERSION_IN_RETENTION_DEFAULT = 2 + +# Nebula envs +NEBULA_EXPORT_ENVS = [ + 'DLTS_JOB_ID', 'DLTS_NUM_WORKER', 'NEBULA_PERSISTENT_STORAGE_PATH', 'NEBULA_PERSISTENT_TIME_INTERVAL', + 'AML_RUN_ID', 'AZUREML_RUN_TOKEN', 'AZUREML_WORKSPACE_SCOPE', 'AZUREML_EXPERIMENT_SCOPE', + 'AZUREML_RUN_HISTORY_SERVICE_ENDPOINT', 'AZUREML_RUN_ID', 'NEBULA_MEMORY_BUFFER_SIZE', + 'AZUREML_PARAMETER_ITPJOB_NAME', 'FC_TASKROLE_NAME', 'FC_TASK_INDEX', 'MASTER_HOST', 'LOCAL_HOST', + 'AZUREML_BLOB_ACCOUNT_NAME', 'AZUREML_BLOB_ACCOUNT_KEY' +] + +# ITP env files +DLTS_POD_ENV_PATH = '/dlts-runtime/env/pod.env' diff --git a/venv/lib/python3.10/site-packages/scipy/ndimage/tests/dots.png b/venv/lib/python3.10/site-packages/scipy/ndimage/tests/dots.png new file mode 100644 index 0000000000000000000000000000000000000000..2cb593b8e1cf68e429cc8402838c31f70be59afc --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/ndimage/tests/dots.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b20b56fadc7471c0694d3e8148d9e28a83d7967bac16bf8852094afea3950414 +size 2114 diff --git a/venv/lib/python3.10/site-packages/scipy/special/tests/data/boost.npz b/venv/lib/python3.10/site-packages/scipy/special/tests/data/boost.npz new file mode 100644 index 0000000000000000000000000000000000000000..a3cba7656ee5445c3c94b8695f526de05973cadf --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/tests/data/boost.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d73ecbbb51654522342ba0470a6263a9684e617c2b8374565fe3a79593f4b231 +size 1270643 diff --git a/venv/lib/python3.10/site-packages/scipy/special/tests/data/local.npz b/venv/lib/python3.10/site-packages/scipy/special/tests/data/local.npz new file mode 100644 index 0000000000000000000000000000000000000000..7a1d159f5fa6dc3c5521bda8cf3049ee24945857 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/tests/data/local.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:102b876c27ec4d2f8041d5ab2fb6dfefc8147021335160f515455e53e06871ff +size 203438 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/_sobol_direction_numbers.npz b/venv/lib/python3.10/site-packages/scipy/stats/_sobol_direction_numbers.npz new file mode 100644 index 0000000000000000000000000000000000000000..44f1f1e9ebd1eb188289ca9adb8027855c1a23b6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/_sobol_direction_numbers.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4859931147d42ce465b8605cb277f957d98b839d03194fdf06579357906d193b +size 589334 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/jf_skew_t_gamlss_pdf_data.npy b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/jf_skew_t_gamlss_pdf_data.npy new file mode 100644 index 0000000000000000000000000000000000000000..721749bcd853fa5c5efe5a1f5ba6e105658395dc --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/jf_skew_t_gamlss_pdf_data.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:254d2dee4a4d547b9331c60243c6fcfcaffd26c8b104d08d4f6045a7645b3bba +size 4064 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy new file mode 100644 index 0000000000000000000000000000000000000000..0a1460e407521836a9b73a081609af4ccdb6deae --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/levy_stable/stable-loc-scale-sample-data.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3c719edd5431fb9e7b9ecb6d19e3ca7a9095298bd19f226685b0fca40f0c073 +size 9328 diff --git a/venv/lib/python3.10/site-packages/scipy/stats/tests/data/rel_breitwigner_pdf_sample_data_ROOT.npy b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/rel_breitwigner_pdf_sample_data_ROOT.npy new file mode 100644 index 0000000000000000000000000000000000000000..80dde74dcda9a23dcdbf9a2f677eb9c98337b0a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/stats/tests/data/rel_breitwigner_pdf_sample_data_ROOT.npy @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eef4dc702dd8c6e31c18c74e1f81284c3e9ca2ab50282de39c9ad30b7bb8e76d +size 38624