applied-ai-018 commited on
Commit
19a3898
·
verified ·
1 Parent(s): 0cc6a28

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/backend.cpython-310.pyc +0 -0
  2. venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/ccl.cpython-310.pyc +0 -0
  3. venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/config.cpython-310.pyc +0 -0
  4. venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/torch.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/deepspeed/linear/__init__.py +7 -0
  6. venv/lib/python3.10/site-packages/deepspeed/linear/__pycache__/__init__.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/deepspeed/linear/__pycache__/config.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/deepspeed/linear/__pycache__/optimized_linear.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/deepspeed/linear/__pycache__/quantization.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/deepspeed/linear/config.py +39 -0
  11. venv/lib/python3.10/site-packages/deepspeed/linear/optimized_linear.py +150 -0
  12. venv/lib/python3.10/site-packages/deepspeed/linear/quantization.py +137 -0
  13. venv/lib/python3.10/site-packages/deepspeed/model_implementations/__init__.py +7 -0
  14. venv/lib/python3.10/site-packages/deepspeed/model_implementations/__pycache__/__init__.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/__init__.py +5 -0
  16. venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/__pycache__/__init__.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/__pycache__/unet.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/__pycache__/vae.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/unet.py +81 -0
  20. venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/vae.py +151 -0
  21. venv/lib/python3.10/site-packages/deepspeed/model_implementations/features/__init__.py +5 -0
  22. venv/lib/python3.10/site-packages/deepspeed/model_implementations/features/__pycache__/__init__.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/deepspeed/model_implementations/features/__pycache__/cuda_graph.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/deepspeed/model_implementations/features/cuda_graph.py +27 -0
  25. venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__init__.py +5 -0
  26. venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/__init__.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/clip_encoder.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_base.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_bert.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_bloom.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_gpt.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_llama2.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_megatron_gpt.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_opt.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_transformer.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/clip_encoder.py +77 -0
  37. venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_base.py +15 -0
  38. venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_bert.py +20 -0
  39. venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_bloom.py +20 -0
  40. venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_gpt.py +20 -0
  41. venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_llama2.py +69 -0
  42. venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_megatron_gpt.py +20 -0
  43. venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_opt.py +20 -0
  44. venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_transformer.py +199 -0
  45. venv/lib/python3.10/site-packages/deepspeed/module_inject/__init__.py +10 -0
  46. venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/auto_tp_model_utils.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/fusedqkv_utils.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/module_quantize.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/deepspeed/module_inject/auto_tp.py +491 -0
  50. venv/lib/python3.10/site-packages/deepspeed/module_inject/auto_tp_model_utils.py +104 -0
venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/backend.cpython-310.pyc ADDED
Binary file (1.61 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/ccl.cpython-310.pyc ADDED
Binary file (6.81 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/config.cpython-310.pyc ADDED
Binary file (1.5 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/comm/__pycache__/torch.cpython-310.pyc ADDED
Binary file (13 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/linear/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .optimized_linear import OptimizedLinear
7
+ from .config import LoRAConfig, QuantizationConfig
venv/lib/python3.10/site-packages/deepspeed/linear/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (310 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/linear/__pycache__/config.cpython-310.pyc ADDED
Binary file (1.75 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/linear/__pycache__/optimized_linear.cpython-310.pyc ADDED
Binary file (4.97 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/linear/__pycache__/quantization.cpython-310.pyc ADDED
Binary file (5.72 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/linear/config.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from dataclasses import dataclass
7
+
8
+
9
+ @dataclass
10
+ class LoRAConfig:
11
+ """
12
+ Configuration settings for LoRAOptimizedLinear.
13
+
14
+ Attributes:
15
+ lora_r (int): LoRA attention dimension, also know as the rank. Defaults is 64.
16
+ lora_alpha (float): LoRA scaling factor, default is 16.
17
+ base_weight_sharding (int): The degree to which the base weights are sharded,
18
+ should typically be set to the data-parallel world size to maximize the memory
19
+ reduction benefits. Defaults to 1, which means this feature is disabled.
20
+ """
21
+ lora_r: int = 64
22
+ lora_alpha: float = 16.
23
+ base_weight_sharding: int = 1
24
+
25
+
26
+ @dataclass
27
+ class QuantizationConfig:
28
+ """
29
+ Configuration settings for quantization for LoRAOptimizedLinear, QuantizedLinear,
30
+ and QuantizedParameter
31
+
32
+ Attributes:
33
+ q_bits (int): The number of bits used for quantization. Default is 8.
34
+ mantissa_bits (int): The number of bits reserved for the mantissa in fixed-point quantization. Default is 3.
35
+ group_size (int): The size of the group used for quantization. Default is 512.
36
+ """
37
+ q_bits: int = 8
38
+ mantissa_bits: int = 3
39
+ group_size: int = 512
venv/lib/python3.10/site-packages/deepspeed/linear/optimized_linear.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ import math
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+ from dataclasses import is_dataclass
11
+ from deepspeed.accelerator import get_accelerator
12
+ import deepspeed.comm as dist
13
+
14
+ from .config import LoRAConfig, QuantizationConfig
15
+ from .quantization import QuantizedParameter, QuantizedLinear
16
+
17
+
18
+ class OptimizedLinear(nn.Module):
19
+ """
20
+ Optimized version of nn.Linear that adds features such as:
21
+ * LoRA w. base weight sharding
22
+ * FP [6,8,12] quantization
23
+
24
+ Arguments:
25
+ input_dim: Required: size of each input sample
26
+ output_dim: Required: size of each output sample
27
+ bias: Optional: If set to False, the layer will not learn an additive bias. Default: False
28
+ lora_config: Optional: LoRAConfig defining lora features and base-weight-sharding degree
29
+ quantization_config: Optional: QuantizationConfig defining quantization features
30
+ dtype: Optional: parameter dtype, only supports bfloat16 currently
31
+
32
+ Returns:
33
+ Returns a new nn.Module depending on the input config. Either native
34
+ torch.nn.Linear, QuantizedLinear, or the full-featured DSOptimizedLinear.
35
+ """
36
+
37
+ def __new__(self,
38
+ input_dim: int,
39
+ output_dim: int,
40
+ bias: bool = False,
41
+ lora_config: LoRAConfig = None,
42
+ quantization_config: QuantizationConfig = None,
43
+ dtype=torch.bfloat16):
44
+
45
+ if quantization_config is not None and not is_dataclass(quantization_config):
46
+ raise ValueError(f"Expecting QuantizationConfig but received {type(quantization_config)}")
47
+ if lora_config is not None and not is_dataclass(lora_config):
48
+ raise ValueError(f"Expecting LoRAConfig but received {type(lora_config)}")
49
+ if lora_config is None and quantization_config is None:
50
+ # Everything disabled, fall back to normal nn.Linear
51
+ self = nn.Linear(input_dim, output_dim, bias=bias, dtype=dtype)
52
+
53
+ elif lora_config:
54
+ # lora enabled, quantization may or may not be
55
+ self = LoRAOptimizedLinear(input_dim=input_dim,
56
+ output_dim=output_dim,
57
+ bias=bias,
58
+ lora_config=lora_config,
59
+ quantization_config=quantization_config,
60
+ dtype=dtype)
61
+
62
+ elif quantization_config:
63
+ # only quantization enabled, no lora
64
+ self = QuantizedLinear(input_dim=input_dim,
65
+ output_dim=output_dim,
66
+ bias=bias,
67
+ quantization_config=quantization_config,
68
+ dtype=dtype)
69
+ return self
70
+
71
+
72
+ class LoRAOptimizedLinear(nn.Module):
73
+
74
+ def __init__(self,
75
+ input_dim: int,
76
+ output_dim: int,
77
+ bias: bool = False,
78
+ lora_config: LoRAConfig = None,
79
+ quantization_config: QuantizationConfig = None,
80
+ device=None,
81
+ dtype=torch.bfloat16):
82
+ super().__init__()
83
+ self.input_dim = input_dim
84
+ self.output_dim = output_dim
85
+ self.bias = bias
86
+ self.lora_config = lora_config
87
+ self.quantization_config = quantization_config
88
+ device = get_accelerator().current_device() if device is None else device
89
+ assert self.lora_config is not None, "DSOptimizedLinear requires a LoRA config"
90
+
91
+ self.zero_shards = self.lora_config.base_weight_sharding
92
+ self.sharded_weight_size = int(float(self.input_dim) // self.zero_shards)
93
+ w = torch.nn.Parameter(torch.empty((self.output_dim, self.sharded_weight_size), dtype=dtype))
94
+ torch.nn.init.xavier_uniform_(w)
95
+
96
+ if self.quantization_config is not None:
97
+ assert dtype == torch.bfloat16, "only bfloat16 is supported when using quantization"
98
+ self.base_weight = QuantizedParameter(w, quantization_config=quantization_config)
99
+ else:
100
+ self.base_weight = w
101
+
102
+ self.base_weight.requires_grad = False
103
+
104
+ # Use RS lora for now.
105
+ self.lora_scaling_factor = self.lora_config.lora_alpha / math.sqrt(self.lora_config.lora_r)
106
+ # Keeping lora weights in bf16 precision for ease of training.
107
+ self.lora_weight_1 = nn.Linear(self.input_dim,
108
+ self.lora_config.lora_r,
109
+ bias=self.bias,
110
+ device=device,
111
+ dtype=dtype)
112
+ self.lora_weight_2 = nn.Linear(self.lora_config.lora_r,
113
+ self.output_dim,
114
+ bias=self.bias,
115
+ device=device,
116
+ dtype=dtype)
117
+ self.lora_weight_1.weight.requires_grad = True
118
+ self.lora_weight_2.weight.requires_grad = True
119
+
120
+ def full_weight(self):
121
+ # This assumes weights are evenly sharded across gpus. which might not be correct.
122
+ # in that case, we should flatten before all_gather.
123
+ local_weight = self.base_weight.dequantized() if isinstance(self.base_weight,
124
+ QuantizedParameter) else self.base_weight
125
+ tensor_list = [
126
+ torch.zeros_like(local_weight, device=local_weight.device, dtype=local_weight.dtype)
127
+ for _ in range(self.zero_shards)
128
+ ]
129
+ dist.all_gather(tensor_list, local_weight)
130
+ weight = nn.Parameter(torch.cat([tensor for tensor in tensor_list], dim=1))
131
+ return weight
132
+
133
+ def linear_without_F_linear(self, input, weight):
134
+ output = torch.mm(input.reshape(-1, input.shape[-1]), weight)
135
+ output = output.view(*input.shape[:-1], weight.shape[1])
136
+ return output
137
+
138
+ def forward(self, input_tensor):
139
+ # Gather the sharded base weight
140
+ if self.zero_shards > 1:
141
+ with torch.no_grad():
142
+ base_weight = self.full_weight()
143
+ elif self.quantization_config:
144
+ base_weight = self.base_weight.dequantized()
145
+ else:
146
+ base_weight = self.base_weight
147
+
148
+ base_weight_output = F.linear(input_tensor, base_weight)
149
+ lora_output = self.lora_weight_2(self.lora_weight_1(input_tensor))
150
+ return base_weight_output + self.lora_scaling_factor * lora_output
venv/lib/python3.10/site-packages/deepspeed/linear/quantization.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import copy
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.functional as F
10
+
11
+ from typing import Optional
12
+
13
+ from deepspeed.accelerator import get_accelerator
14
+ from deepspeed.ops.fp_quantizer import Quantizer, FP_Quantize
15
+ from .config import QuantizationConfig
16
+
17
+
18
+ class QuantizedParameter(nn.Parameter):
19
+ """
20
+ Quantized parameter class that implements weight quantization. Weights
21
+ are stored in quantized form on GPUs, and can be dequantized on-the-fly when
22
+ needed by the model. The weights are actually quantized during any `.to(device)`.
23
+
24
+ Arguments:
25
+ data (Tensor): parameter tensor.
26
+ requires_grad (bool, optional): if the parameter requires gradient. Defaults
27
+ to False and is not supported to be True. Argument provided only for interface
28
+ compatibility with torch.nn.Parameter.
29
+ quantization_config (QuantizationConfig, optional):
30
+ quantizer (Quantizer, optional): Defaults to FP_Quantize but can be any quantizer
31
+ that implements deepspeed.ops.fp_quantizer.Quantizer. This argument is also
32
+ required since the quantizer is stashed in the Parameter itself, some models
33
+ may clone the Parameter by passing an attribute __dict__. For an example, see
34
+ tests/unit/linear/test_quant_param.py::TestQuantParam::test_hf_clone
35
+ """
36
+
37
+ def __new__(
38
+ cls,
39
+ data: Optional[torch.Tensor] = None,
40
+ requires_grad: bool = False, # quantized weights must be frozen
41
+ quantization_config: QuantizationConfig = None,
42
+ quantizer: Quantizer = None,
43
+ ):
44
+ if requires_grad:
45
+ raise ValueError(f"requires_grad=True is not supported with QuantizedParameter")
46
+ if data is None:
47
+ data = torch.empty(0)
48
+ self = torch.Tensor._make_subclass(cls, data, requires_grad)
49
+ self.quantization_config = QuantizationConfig() if quantization_config is None else quantization_config
50
+ if quantizer is not None:
51
+ self.quantizer = quantizer
52
+ else:
53
+ # if FPQuantizerBuilder is not compatible in this env this init will fail
54
+ self.quantizer = FP_Quantize(group_size=self.quantization_config.group_size)
55
+ self._ensure_quantized(self)
56
+ return self
57
+
58
+ def _ensure_quantized(self, tensor: torch.Tensor):
59
+ # If the tensor is on the accelerator and is not quantized, then quantize it in-place.
60
+ if get_accelerator().on_accelerator(tensor) and tensor.dtype != torch.int8:
61
+ with get_accelerator().stream(get_accelerator().current_stream(tensor.device)):
62
+ tensor.data = self.quantizer.quantize(tensor.data,
63
+ q_bits=self.quantization_config.q_bits,
64
+ q_mantisa_bits=self.quantization_config.mantissa_bits)
65
+ assert tensor.dtype == torch.int8
66
+
67
+ def dequantized(self) -> torch.Tensor:
68
+ """
69
+ Return a tensor containing the dequantized weights of this parameter.
70
+ """
71
+ if get_accelerator().on_accelerator(self.data) and self.data.dtype == torch.int8:
72
+ with get_accelerator().stream(get_accelerator().current_stream(self.data.device)):
73
+ return self.quantizer.dequantize(self.data,
74
+ q_bits=self.quantization_config.q_bits,
75
+ q_mantisa_bits=self.quantization_config.mantissa_bits)
76
+ return self.data
77
+
78
+ def __getstate__(self):
79
+ state = self.__dict__
80
+ state["data"] = self.data
81
+ state["quantization_config"] = self.quantization_config
82
+ state["requires_grad"] = self.requires_grad
83
+ return state
84
+
85
+ def __setstate__(self, state):
86
+ self.quantizer = state["quantizer"]
87
+ self.quantization_config = state["quantization_config"]
88
+ self.data = state["data"]
89
+ self.requires_grad = state["requires_grad"]
90
+
91
+ def __deepcopy__(self, memo):
92
+ new_instance = type(self).__new__(type(self))
93
+ state = self.__getstate__()
94
+ new_instance.__setstate__(state)
95
+ new_instance.quantizer = copy.deepcopy(state["quantizer"])
96
+ new_instance.quantization_config = copy.deepcopy(state["quantization_config"])
97
+ new_instance.data = copy.deepcopy(state["data"])
98
+ return new_instance
99
+
100
+ def __copy__(self):
101
+ new_instance = type(self).__new__(type(self))
102
+ state = self.__getstate__()
103
+ new_instance.__setstate__(state)
104
+ return new_instance
105
+
106
+ def cuda(self, device=None, non_blocking=False):
107
+ return self.to(device="cuda" if device is None else device, non_blocking=non_blocking)
108
+
109
+ def to(self, *args, **kwargs):
110
+ """
111
+ Move the parameter to the given device. Then, if the device is a cuda device,
112
+ quantize it.
113
+ """
114
+ tensor = super().to(*args, **kwargs)
115
+ self._ensure_quantized(tensor)
116
+ return tensor
117
+
118
+
119
+ class QuantizedLinear(nn.Linear):
120
+ """
121
+ Linear layer that implements weight quantization. Parameters
122
+ are stored via `QuantizedParameter` and are dequantized on-the-fly during any
123
+ forward pass.
124
+ """
125
+
126
+ def __init__(self,
127
+ input_dim: int,
128
+ output_dim: int,
129
+ bias: bool = False,
130
+ quantization_config: QuantizationConfig = None,
131
+ dtype=torch.bfloat16):
132
+ super().__init__(input_dim, output_dim, bias=bias, dtype=dtype)
133
+ assert dtype == torch.bfloat16, "currently only supports bfloat16 dtype"
134
+ self.weight = QuantizedParameter(self.weight.data, quantization_config=quantization_config)
135
+
136
+ def forward(self, input: torch.Tensor) -> torch.Tensor:
137
+ return F.linear(input, self.weight.dequantized(), self.bias)
venv/lib/python3.10/site-packages/deepspeed/model_implementations/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .transformers.ds_transformer import DeepSpeedTransformerInference
7
+ from .transformers.clip_encoder import DSClipEncoder
venv/lib/python3.10/site-packages/deepspeed/model_implementations/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (343 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ '''Copyright The Microsoft DeepSpeed Team'''
venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (259 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/__pycache__/unet.cpython-310.pyc ADDED
Binary file (2.48 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/__pycache__/vae.cpython-310.pyc ADDED
Binary file (3.84 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/unet.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from deepspeed.accelerator import get_accelerator
8
+ from ..features.cuda_graph import CUDAGraph
9
+
10
+
11
+ class DSUNet(CUDAGraph, torch.nn.Module):
12
+
13
+ def __init__(self, unet, enable_cuda_graph=True):
14
+ super().__init__(enable_cuda_graph=enable_cuda_graph)
15
+ self.unet = unet
16
+ # SD pipeline accesses this attribute
17
+ self.in_channels = unet.in_channels
18
+ self.device = self.unet.device
19
+ self.dtype = self.unet.dtype
20
+ self.config = self.unet.config
21
+ self.fwd_count = 0
22
+ self.unet.requires_grad_(requires_grad=False)
23
+ self.unet.to(memory_format=torch.channels_last)
24
+ self.cuda_graph_created = False
25
+
26
+ def _graph_replay(self, *inputs, **kwargs):
27
+ for i in range(len(inputs)):
28
+ if torch.is_tensor(inputs[i]):
29
+ self.static_inputs[i].copy_(inputs[i])
30
+ for k in kwargs:
31
+ if torch.is_tensor(kwargs[k]):
32
+ self.static_kwargs[k].copy_(kwargs[k])
33
+ get_accelerator().replay_graph(self._cuda_graphs)
34
+ return self.static_output
35
+
36
+ def forward(self, *inputs, **kwargs):
37
+ if self.enable_cuda_graph:
38
+ if self.cuda_graph_created:
39
+ outputs = self._graph_replay(*inputs, **kwargs)
40
+ else:
41
+ self._create_cuda_graph(*inputs, **kwargs)
42
+ outputs = self._graph_replay(*inputs, **kwargs)
43
+ return outputs
44
+ else:
45
+ return self._forward(*inputs, **kwargs)
46
+
47
+ def _create_cuda_graph(self, *inputs, **kwargs):
48
+ # warmup to create the workspace and cublas handle
49
+ cuda_stream = torch.cuda.Stream()
50
+ cuda_stream.wait_stream(torch.cuda.current_stream())
51
+ with torch.cuda.stream(cuda_stream):
52
+ for i in range(3):
53
+ ret = self._forward(*inputs, **kwargs)
54
+ torch.cuda.current_stream().wait_stream(cuda_stream)
55
+
56
+ # create cuda_graph and assign static_inputs and static_outputs
57
+ self._cuda_graphs = get_accelerator().create_graph()
58
+ self.static_inputs = inputs
59
+ self.static_kwargs = kwargs
60
+
61
+ with get_accelerator().capture_to_graph(self._cuda_graphs):
62
+ self.static_output = self._forward(*self.static_inputs, **self.static_kwargs)
63
+
64
+ self.cuda_graph_created = True
65
+
66
+ def _forward(self,
67
+ sample,
68
+ timestamp,
69
+ encoder_hidden_states,
70
+ return_dict=True,
71
+ cross_attention_kwargs=None,
72
+ timestep_cond=None,
73
+ added_cond_kwargs=None):
74
+ if cross_attention_kwargs:
75
+ return self.unet(sample,
76
+ timestamp,
77
+ encoder_hidden_states,
78
+ return_dict,
79
+ cross_attention_kwargs=cross_attention_kwargs)
80
+ else:
81
+ return self.unet(sample, timestamp, encoder_hidden_states, return_dict)
venv/lib/python3.10/site-packages/deepspeed/model_implementations/diffusers/vae.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from deepspeed.accelerator import get_accelerator
8
+ from ..features.cuda_graph import CUDAGraph
9
+
10
+
11
+ class DSVAE(CUDAGraph, torch.nn.Module):
12
+
13
+ def __init__(self, vae, enable_cuda_graph=True):
14
+ super().__init__(enable_cuda_graph=enable_cuda_graph)
15
+ self.vae = vae
16
+ self.config = vae.config
17
+ self.device = self.vae.device
18
+ self.dtype = self.vae.dtype
19
+ self.vae.requires_grad_(requires_grad=False)
20
+ self.decoder_cuda_graph_created = False
21
+ self.encoder_cuda_graph_created = False
22
+ self.all_cuda_graph_created = False
23
+
24
+ def _graph_replay_decoder(self, *inputs, **kwargs):
25
+ for i in range(len(inputs)):
26
+ if torch.is_tensor(inputs[i]):
27
+ self.static_decoder_inputs[i].copy_(inputs[i])
28
+ for k in kwargs:
29
+ if torch.is_tensor(kwargs[k]):
30
+ self.static_decoder_kwargs[k].copy_(kwargs[k])
31
+ get_accelerator().replay_graph(self._decoder_cuda_graph)
32
+ return self.static_decoder_output
33
+
34
+ def _decode(self, x, return_dict=True, generator=None):
35
+ return self.vae.decode(x, return_dict=return_dict)
36
+
37
+ def _create_cuda_graph_decoder(self, *inputs, **kwargs):
38
+ # warmup to create the workspace and cublas handle
39
+ cuda_stream = torch.cuda.Stream()
40
+ cuda_stream.wait_stream(torch.cuda.current_stream())
41
+ with torch.cuda.stream(cuda_stream):
42
+ for i in range(3):
43
+ ret = self._decode(*inputs, **kwargs)
44
+ torch.cuda.current_stream().wait_stream(cuda_stream)
45
+
46
+ # create cuda_graph and assign static_inputs and static_outputs
47
+ self._decoder_cuda_graph = get_accelerator().create_graph()
48
+ self.static_decoder_inputs = inputs
49
+ self.static_decoder_kwargs = kwargs
50
+
51
+ with get_accelerator().capture_to_graph(self._decoder_cuda_graph):
52
+ self.static_decoder_output = self._decode(*self.static_decoder_inputs, **self.static_decoder_kwargs)
53
+
54
+ self.decoder_cuda_graph_created = True
55
+
56
+ def decode(self, *inputs, **kwargs):
57
+ if self.enable_cuda_graph:
58
+ if self.decoder_cuda_graph_created:
59
+ outputs = self._graph_replay_decoder(*inputs, **kwargs)
60
+ else:
61
+ self._create_cuda_graph_decoder(*inputs, **kwargs)
62
+ outputs = self._graph_replay_decoder(*inputs, **kwargs)
63
+ return outputs
64
+ else:
65
+ return self._decode(*inputs, **kwargs)
66
+
67
+ def _graph_replay_encoder(self, *inputs, **kwargs):
68
+ for i in range(len(inputs)):
69
+ if torch.is_tensor(inputs[i]):
70
+ self.static_encoder_inputs[i].copy_(inputs[i])
71
+ for k in kwargs:
72
+ if torch.is_tensor(kwargs[k]):
73
+ self.static_encoder_kwargs[k].copy_(kwargs[k])
74
+ get_accelerator().replay_graph(self._encoder_cuda_graph)
75
+ return self.static_encoder_output
76
+
77
+ def _encode(self, x, return_dict=True):
78
+ return self.vae.encode(x, return_dict=return_dict)
79
+
80
+ def _create_cuda_graph_encoder(self, *inputs, **kwargs):
81
+ # warmup to create the workspace and cublas handle
82
+ cuda_stream = torch.cuda.Stream()
83
+ cuda_stream.wait_stream(torch.cuda.current_stream())
84
+ with torch.cuda.stream(cuda_stream):
85
+ for i in range(3):
86
+ ret = self._encode(*inputs, **kwargs)
87
+ torch.cuda.current_stream().wait_stream(cuda_stream)
88
+
89
+ # create cuda_graph and assign static_inputs and static_outputs
90
+ self._encoder_cuda_graph = get_accelerator().create_graph()
91
+ self.static_encoder_inputs = inputs
92
+ self.static_encoder_kwargs = kwargs
93
+
94
+ with get_accelerator().capture_to_graph(self._encoder_cuda_graph):
95
+ self.static_encoder_output = self._encode(*self.static_encoder_inputs, **self.static_encoder_kwargs)
96
+
97
+ self.encoder_cuda_graph_created = True
98
+
99
+ def encode(self, *inputs, **kwargs):
100
+ if self.enable_cuda_graph:
101
+ if self.encoder_cuda_graph_created:
102
+ outputs = self._graph_replay_encoder(*inputs, **kwargs)
103
+ else:
104
+ self._create_cuda_graph_encoder(*inputs, **kwargs)
105
+ outputs = self._graph_replay_encoder(*inputs, **kwargs)
106
+ return outputs
107
+ else:
108
+ return self._encode(*inputs, **kwargs)
109
+
110
+ def _graph_replay(self, *inputs, **kwargs):
111
+ for i in range(len(inputs)):
112
+ if torch.is_tensor(inputs[i]):
113
+ self.static_inputs[i].copy_(inputs[i])
114
+ for k in kwargs:
115
+ if torch.is_tensor(kwargs[k]):
116
+ self.static_kwargs[k].copy_(kwargs[k])
117
+ get_accelerator().replay_graph(self._all_cuda_graph)
118
+ return self.static_output
119
+
120
+ def forward(self, *inputs, **kwargs):
121
+ if self.enable_cuda_graph:
122
+ if self.cuda_graph_created:
123
+ outputs = self._graph_replay(*inputs, **kwargs)
124
+ else:
125
+ self._create_cuda_graph(*inputs, **kwargs)
126
+ outputs = self._graph_replay(*inputs, **kwargs)
127
+ return outputs
128
+ else:
129
+ return self._forward(*inputs, **kwargs)
130
+
131
+ def _create_cuda_graph(self, *inputs, **kwargs):
132
+ # warmup to create the workspace and cublas handle
133
+ cuda_stream = torch.cuda.Stream()
134
+ cuda_stream.wait_stream(torch.cuda.current_stream())
135
+ with torch.cuda.stream(cuda_stream):
136
+ for i in range(3):
137
+ ret = self._forward(*inputs, **kwargs)
138
+ torch.cuda.current_stream().wait_stream(cuda_stream)
139
+
140
+ # create cuda_graph and assign static_inputs and static_outputs
141
+ self._all_cuda_graph = get_accelerator().create_graph()
142
+ self.static_inputs = inputs
143
+ self.static_kwargs = kwargs
144
+
145
+ with get_accelerator().capture_to_graph(self._all_cuda_graph):
146
+ self.static_output = self._forward(*self.static_inputs, **self.static_kwargs)
147
+
148
+ self.all_cuda_graph_created = True
149
+
150
+ def _forward(self, sample, timestamp, encoder_hidden_states, return_dict=True):
151
+ return self.vae(sample, timestamp, encoder_hidden_states, return_dict)
venv/lib/python3.10/site-packages/deepspeed/model_implementations/features/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ '''Copyright The Microsoft DeepSpeed Team'''
venv/lib/python3.10/site-packages/deepspeed/model_implementations/features/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (258 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/model_implementations/features/__pycache__/cuda_graph.cpython-310.pyc ADDED
Binary file (1.01 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/model_implementations/features/cuda_graph.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from abc import ABC, abstractmethod
7
+
8
+
9
+ class CUDAGraph(ABC):
10
+
11
+ def __init__(self, enable_cuda_graph=False):
12
+ super().__init__()
13
+ self.enable_cuda_graph = enable_cuda_graph
14
+
15
+ @abstractmethod
16
+ def _create_cuda_graph(self):
17
+ """
18
+ Create CUDA graph(s)
19
+ """
20
+ raise NotImplementedError
21
+
22
+ @abstractmethod
23
+ def _graph_replay(self):
24
+ """
25
+ Replay CUDA graph(s)
26
+ """
27
+ raise NotImplementedError
venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__init__.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+ '''Copyright The Microsoft DeepSpeed Team'''
venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (262 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/clip_encoder.cpython-310.pyc ADDED
Binary file (2.81 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_base.cpython-310.pyc ADDED
Binary file (558 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_bert.cpython-310.pyc ADDED
Binary file (889 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_bloom.cpython-310.pyc ADDED
Binary file (893 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_gpt.cpython-310.pyc ADDED
Binary file (885 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_llama2.cpython-310.pyc ADDED
Binary file (2 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_megatron_gpt.cpython-310.pyc ADDED
Binary file (919 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_opt.cpython-310.pyc ADDED
Binary file (885 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/__pycache__/ds_transformer.cpython-310.pyc ADDED
Binary file (5.47 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/clip_encoder.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from deepspeed.accelerator import get_accelerator
8
+ from ..features.cuda_graph import CUDAGraph
9
+
10
+
11
+ class DSClipEncoder(CUDAGraph, torch.nn.Module):
12
+
13
+ def __init__(self, enc, enable_cuda_graph=False):
14
+ super().__init__(enable_cuda_graph=enable_cuda_graph)
15
+ enc.text_model._build_causal_attention_mask = self._build_causal_attention_mask
16
+ self.enc = enc
17
+ self.device = self.enc.device
18
+ self.dtype = self.enc.dtype
19
+ self.cuda_graph_created = [False, False]
20
+ self.static_inputs = [None, None]
21
+ self.static_kwargs = [None, None]
22
+ self.static_output = [None, None]
23
+ self._cuda_graphs = [None, None]
24
+ self.iter = 0
25
+ self.config = self.enc.config
26
+
27
+ def _build_causal_attention_mask(self, bsz, seq_len, dtype):
28
+ mask = torch.empty(bsz, seq_len, seq_len, dtype=dtype, device=get_accelerator().current_device_name())
29
+ mask.fill_(torch.tensor(torch.finfo(dtype).min))
30
+ mask.triu_(1)
31
+ mask = mask.unsqueeze(1)
32
+ return mask
33
+
34
+ def _graph_replay(self, *inputs, **kwargs):
35
+ for i in range(len(inputs)):
36
+ if torch.is_tensor(inputs[i]):
37
+ self.static_inputs[self.iter][i].copy_(inputs[i])
38
+ for k in kwargs:
39
+ if torch.is_tensor(kwargs[k]):
40
+ self.static_kwargs[self.iter][k].copy_(kwargs[k])
41
+ get_accelerator().replay_graph(self._cuda_graphs[self.iter])
42
+ return self.static_output[self.iter]
43
+
44
+ def forward(self, *inputs, **kwargs):
45
+ if self.enable_cuda_graph:
46
+ if self.cuda_graph_created[self.iter]:
47
+ outputs = self._graph_replay(*inputs, **kwargs)
48
+ else:
49
+ self._create_cuda_graph(*inputs, **kwargs)
50
+ outputs = self._graph_replay(*inputs, **kwargs)
51
+ self.iter = (self.iter + 1) % 2
52
+ return outputs
53
+ else:
54
+ return self.enc(*inputs, **kwargs)
55
+
56
+ def _create_cuda_graph(self, *inputs, **kwargs):
57
+ # warmup to create the workspace and cublas handle
58
+ cuda_stream = torch.cuda.Stream()
59
+ cuda_stream.wait_stream(torch.cuda.current_stream())
60
+ with torch.cuda.stream(cuda_stream):
61
+ for i in range(3):
62
+ ret = self._forward(*inputs, **kwargs)
63
+ torch.cuda.current_stream().wait_stream(cuda_stream)
64
+
65
+ # create cuda_graph and assign static_inputs and static_outputs
66
+ self._cuda_graphs[self.iter] = get_accelerator().create_graph()
67
+ self.static_inputs[self.iter] = inputs
68
+ self.static_kwargs[self.iter] = kwargs
69
+
70
+ with get_accelerator().capture_to_graph(self._cuda_graphs[self.iter]):
71
+ self.static_output[self.iter] = self._forward(*self.static_inputs[self.iter],
72
+ **self.static_kwargs[self.iter])
73
+
74
+ self.cuda_graph_created[self.iter] = True
75
+
76
+ def _forward(self, *inputs, **kwargs):
77
+ return self.enc(*inputs, **kwargs)
venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_base.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch.nn as nn
7
+
8
+
9
+ class DeepSpeedTransformerBase(nn.module):
10
+
11
+ def __init__(self):
12
+ pass
13
+
14
+ # this would be the new clean base class that will replace DeepSpeedTransformerInference.
15
+ # we currently don't know how this will look like but keeping it here as a placeholder.
venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_bert.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
7
+
8
+
9
+ class DeepSpeedBERTInference(DeepSpeedTransformerInference):
10
+ """Initialize the DeepSpeed BERT Transformer Layer.
11
+ """
12
+
13
+ def __init__(self,
14
+ config,
15
+ mp_group=None,
16
+ quantize_scales=None,
17
+ quantize_groups=1,
18
+ merge_count=1,
19
+ mlp_extra_grouping=False):
20
+ super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_bloom.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
7
+
8
+
9
+ class DeepSpeedBloomInference(DeepSpeedTransformerInference):
10
+ """Initialize the DeepSpeed Bloom Transformer Layer.
11
+ """
12
+
13
+ def __init__(self,
14
+ config,
15
+ mp_group=None,
16
+ quantize_scales=None,
17
+ quantize_groups=1,
18
+ merge_count=1,
19
+ mlp_extra_grouping=False):
20
+ super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_gpt.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
7
+
8
+
9
+ class DeepSpeedGPTInference(DeepSpeedTransformerInference):
10
+ """Initialize the DeepSpeed GPT Transformer Layer.
11
+ """
12
+
13
+ def __init__(self,
14
+ config,
15
+ mp_group=None,
16
+ quantize_scales=None,
17
+ quantize_groups=1,
18
+ merge_count=1,
19
+ mlp_extra_grouping=False):
20
+ super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_llama2.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from deepspeed import comm as dist
8
+ from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
9
+
10
+ inference_module = None
11
+
12
+
13
+ class DeepSpeedLlama2Inference(DeepSpeedTransformerInference):
14
+ """Initialize the DeepSpeed OPT Transformer Layer.
15
+ """
16
+
17
+ def __init__(self,
18
+ config,
19
+ mp_group=None,
20
+ quantize_scales=None,
21
+ quantize_groups=1,
22
+ merge_count=1,
23
+ mlp_extra_grouping=False):
24
+ super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
25
+
26
+ def forward(self, *args, **kwargs):
27
+
28
+ input = args[0]
29
+ input_mask = None
30
+ # Allocate memory only on first layer forward
31
+ if self.config.layer_id == 0 and self._alloc_workspace:
32
+ self.allocate_workspace(self.config.hidden_size, self.config.heads,
33
+ input.size()[1],
34
+ input.size()[0], DeepSpeedTransformerInference.layer_id, self.config.mp_size,
35
+ self.config.bigscience_bloom,
36
+ dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens,
37
+ self.config.min_out_tokens)
38
+ self._alloc_workspace = False
39
+
40
+ get_present = True
41
+
42
+ # We set the prev key/value to None when there is a prompt
43
+ if input.shape[1] > 1:
44
+ self.layer_past = None
45
+ layer_past = self.layer_past
46
+
47
+ input_type = input.dtype
48
+
49
+ if (self.config.dtype in [torch.float16, torch.bfloat16, torch.int8]) \
50
+ and input.dtype == torch.float:
51
+ target_dtype = torch.half if self.dtype == torch.int8 else self.dtype
52
+ input = input.to(target_dtype)
53
+
54
+ with torch.no_grad():
55
+ attention_output, key, value, context_outputtn_ctx, inp_norm = \
56
+ self.attention(input,
57
+ input_mask,
58
+ None,
59
+ layer_past,
60
+ get_present,
61
+ None, None, None,
62
+ self.norm_w,
63
+ self.norm_b,
64
+ None)
65
+ self.layer_past = (key, value)
66
+ output = self.mlp(attention_output, input, inp_norm, self.attention.attn_ob)
67
+
68
+ output = output.to(input_type)
69
+ return output
venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_megatron_gpt.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
7
+
8
+
9
+ class DeepSpeedMegatronGPTInference(DeepSpeedTransformerInference):
10
+ """Initialize the DeepSpeed Megatron GPT Transformer Layer.
11
+ """
12
+
13
+ def __init__(self,
14
+ config,
15
+ mp_group=None,
16
+ quantize_scales=None,
17
+ quantize_groups=1,
18
+ merge_count=1,
19
+ mlp_extra_grouping=False):
20
+ super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_opt.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from deepspeed.model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
7
+
8
+
9
+ class DeepSpeedOPTInference(DeepSpeedTransformerInference):
10
+ """Initialize the DeepSpeed OPT Transformer Layer.
11
+ """
12
+
13
+ def __init__(self,
14
+ config,
15
+ mp_group=None,
16
+ quantize_scales=None,
17
+ quantize_groups=1,
18
+ merge_count=1,
19
+ mlp_extra_grouping=False):
20
+ super().__init__(config, mp_group, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping)
venv/lib/python3.10/site-packages/deepspeed/model_implementations/transformers/ds_transformer.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ from deepspeed import comm as dist
9
+ from deepspeed.utils.logging import log_dist
10
+
11
+ from deepspeed.ops.transformer.inference.ds_mlp import DeepSpeedMLP
12
+ from deepspeed.ops.transformer.inference.ds_attention import DeepSpeedSelfAttention, BloomSelfAttention
13
+ from deepspeed.accelerator import get_accelerator
14
+ from deepspeed.ops.op_builder import InferenceBuilder
15
+ import deepspeed
16
+ if deepspeed.HAS_TRITON:
17
+ from deepspeed.ops.transformer.inference.triton.mlp import TritonMLP
18
+ from deepspeed.ops.transformer.inference.triton.attention import TritonSelfAttention
19
+
20
+ inference_module = None
21
+
22
+
23
+ class DeepSpeedTransformerInference(nn.Module):
24
+ """Initialize the DeepSpeed Transformer Layer.
25
+ Arguments:
26
+ layer_id: The layer index starting from 0, e.g. if model has 24 transformer layers,
27
+ layer_id will be 0,1,2...23 when each layer object is instantiated
28
+ config: An object of DeepSpeedInferenceConfig
29
+ mp_group: Model parallelism group initialized on the modeling side.
30
+ quantize_scales: This argument groups all the layers' scales used for quantization
31
+ quantize_groups: Number of groups used for quantizing the model
32
+ merge_count: Shows the number of model-parallel checkpoints merged before running inference.
33
+ We use this argument to control the quantization scale for the model parameters if a bigger
34
+ quantize-grouping than 1 is used.
35
+ mlp_extra_grouping: This flag is used to show a 2x higher number of groups used for the MLP part
36
+ of a Transformer layer. We use this feature for quantization to reduce the convergence impact
37
+ for specific downstream tasks.
38
+ """
39
+ layer_id = 0
40
+
41
+ def __init__(self,
42
+ config,
43
+ mp_group=None,
44
+ quantize_scales=None,
45
+ quantize_groups=1,
46
+ merge_count=1,
47
+ mlp_extra_grouping=False):
48
+ super(DeepSpeedTransformerInference, self).__init__()
49
+
50
+ self.config = config
51
+ self.config.layer_id = DeepSpeedTransformerInference.layer_id
52
+ DeepSpeedTransformerInference.layer_id += 1
53
+
54
+ data_type = torch.half if self.config.dtype == torch.int8 else self.config.dtype
55
+ global inference_module
56
+ if inference_module is None:
57
+ builder = InferenceBuilder()
58
+ inference_module = builder.load()
59
+
60
+ if DeepSpeedTransformerInference.layer_id == 1:
61
+ log_dist(f"DeepSpeed-Inference config: {self.config.__dict__}", [0])
62
+ if deepspeed.HAS_TRITON and self.config.use_triton:
63
+ log_dist(f"Injecting Triton kernels ...", [0])
64
+
65
+ if self.config.bigscience_bloom:
66
+ self.attention = BloomSelfAttention(self.config, mp_group, quantize_scales, quantize_groups, merge_count)
67
+ assert not self.config.use_triton
68
+ else:
69
+ if deepspeed.HAS_TRITON and self.config.use_triton:
70
+ self.attention = TritonSelfAttention(self.config)
71
+ else:
72
+ self.attention = DeepSpeedSelfAttention(self.config, mp_group, quantize_scales, quantize_groups,
73
+ merge_count)
74
+
75
+ if deepspeed.HAS_TRITON and self.config.use_triton:
76
+ self.mlp = TritonMLP(self.config)
77
+ else:
78
+ self.mlp = DeepSpeedMLP(self.config, mp_group, quantize_scales, quantize_groups, merge_count,
79
+ mlp_extra_grouping)
80
+
81
+ device = get_accelerator().current_device_name() # if config.bigscience_bloom else 'cpu'
82
+ if self.config.set_empty_params:
83
+ self.norm_w = None
84
+ self.norm_b = None
85
+ else:
86
+ self.norm_w = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type, device=device),
87
+ requires_grad=False)
88
+ self.norm_b = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type, device=device),
89
+ requires_grad=False)
90
+ self.layer_past = None
91
+ try:
92
+ if config.dtype == torch.float32:
93
+ self.allocate_workspace = inference_module.allocate_workspace_fp32
94
+ elif config.dtype == torch.bfloat16:
95
+ self.allocate_workspace = inference_module.allocate_workspace_bf16
96
+ else:
97
+ self.allocate_workspace = inference_module.allocate_workspace_fp32
98
+ self._alloc_workspace = True
99
+ except AttributeError:
100
+ self.allocate_workspace = None
101
+ self._alloc_workspace = False
102
+
103
+ @classmethod
104
+ def reset_cache(cls):
105
+ if inference_module is not None:
106
+ inference_module.reset_cache()
107
+
108
+ def forward(
109
+ self,
110
+ input=None,
111
+ input_mask=None,
112
+ attention_mask=None,
113
+ attn_mask=None,
114
+ head_mask=None,
115
+ layer_past=None,
116
+ get_key_value=False,
117
+ get_present=False,
118
+ encoder_output=None,
119
+ enc_dec_attn_mask=None,
120
+ x=None,
121
+ encoder_hidden_states=None,
122
+ encoder_attention_mask=None,
123
+ use_cache=False,
124
+ alibi=None,
125
+ output_attentions=False,
126
+ # TODO(arashb): 'layer_head_mask' and 'past_key_value' are only added to satisfy the OPT models API.
127
+ # This needs to be redesigned later!
128
+ layer_head_mask=None,
129
+ past_key_value=None,
130
+ **kwargs):
131
+
132
+ if x is not None:
133
+ input = x
134
+ if "hidden_states" in kwargs:
135
+ input = kwargs["hidden_states"]
136
+
137
+ input_mask = (input_mask if attn_mask is None else attn_mask) if attention_mask is None else attention_mask
138
+
139
+ # Allocate memory only on first layer forward
140
+ if self.config.layer_id == 0 and self._alloc_workspace:
141
+ self.allocate_workspace(self.config.hidden_size, self.config.heads,
142
+ input.size()[1],
143
+ input.size()[0], DeepSpeedTransformerInference.layer_id, self.config.mp_size,
144
+ self.config.bigscience_bloom,
145
+ dist.get_rank() if dist.is_initialized() else 0, self.config.max_out_tokens,
146
+ self.config.min_out_tokens)
147
+ self._alloc_workspace = False
148
+
149
+ get_present = (get_present or get_key_value or use_cache)
150
+ input_mask = input_mask if attention_mask is None else attention_mask
151
+
152
+ # We set the prev key/value to None when there is a prompt
153
+ if input.shape[1] > 1:
154
+ self.layer_past = None
155
+ layer_past = layer_past if layer_past is not None else self.layer_past
156
+ head_mask = layer_head_mask if layer_head_mask is not None else head_mask
157
+
158
+ attn_mask = None
159
+ if isinstance(input, tuple):
160
+ attn_mask = input[1]
161
+ input = input[0]
162
+ input_type = input.dtype
163
+
164
+ if (self.config.dtype in [torch.float16, torch.bfloat16, torch.int8]) \
165
+ and input.dtype == torch.float:
166
+ target_dtype = torch.half if self.config.dtype == torch.int8 else self.config.dtype
167
+ input = input.to(target_dtype)
168
+
169
+ with torch.no_grad():
170
+ attention_output, key, value, context_outputtn_ctx, inp_norm = \
171
+ self.attention(input,
172
+ input_mask,
173
+ head_mask,
174
+ layer_past,
175
+ get_present,
176
+ encoder_hidden_states,
177
+ encoder_attention_mask,
178
+ output_attentions,
179
+ self.norm_w,
180
+ self.norm_b,
181
+ alibi)
182
+
183
+ presents = (key, value)
184
+ self.layer_past = presents if layer_past is None else None
185
+ output = self.mlp(attention_output, input, inp_norm, self.attention.attn_ob)
186
+
187
+ if not self.config.pre_layer_norm:
188
+ output = inference_module.layer_norm(output, self.norm_w, self.norm_b, self.config.epsilon)
189
+
190
+ output = output.to(input_type)
191
+ if get_present:
192
+ output = (output, presents)
193
+
194
+ if self.config.return_single_tuple:
195
+ return (output, )
196
+ elif self.config.return_tuple:
197
+ return output if type(output) is tuple else (output, attn_mask)
198
+ else:
199
+ return output
venv/lib/python3.10/site-packages/deepspeed/module_inject/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .replace_module import replace_transformer_layer, revert_transformer_layer, ReplaceWithTensorSlicing, GroupQuantizer, generic_injection
7
+ from .module_quantize import quantize_transformer_layer
8
+ from .replace_policy import HFBertLayerPolicy
9
+ from .layers import LinearAllreduce, LinearLayer, EmbeddingLayer, Normalize
10
+ from .policy import DSPolicy
venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/auto_tp_model_utils.cpython-310.pyc ADDED
Binary file (4.04 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/fusedqkv_utils.cpython-310.pyc ADDED
Binary file (4.82 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/module_inject/__pycache__/module_quantize.cpython-310.pyc ADDED
Binary file (2.84 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/module_inject/auto_tp.py ADDED
@@ -0,0 +1,491 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ # Automatic Tensor Parallelism
7
+ import re
8
+
9
+ from torch import nn
10
+ from .replace_policy import replace_policies
11
+ from typing import Optional
12
+ import torch
13
+ from deepspeed import comm as dist
14
+ from .layers import LinearAllreduce, LinearLayer, LmHeadLinearAllreduce
15
+ from deepspeed.accelerator import get_accelerator
16
+ from .fusedqkv_utils import require_tp_fused_qkvw, prepare_tp_fused_qkvw
17
+ from deepspeed.module_inject.tp_shard import get_shard_size, get_shard_size_list
18
+
19
+
20
+ def move(tensor, device):
21
+ if tensor.is_meta:
22
+ return torch.empty_like(tensor, device=device)
23
+ else:
24
+ # Using new tensors help in freeing memory (after split for example) was done before by calling clone().
25
+ # Using copy=True instead of clone() will help in case of cpu --> cpu.
26
+ # Otherwise to() will not create a new copy for the view of the full tensor, and it will not be de-referenced.
27
+ return tensor.to(device, copy=True)
28
+
29
+
30
+ class ReplaceWithTensorSlicing:
31
+
32
+ def __init__(self, mp_group=None, mp_size=1, out_dim=1, in_dim=0):
33
+ if mp_group is not None:
34
+ self.gpu_index = dist.get_rank(group=mp_group)
35
+ else:
36
+ self.gpu_index = 0
37
+ self.out_dim = out_dim
38
+ self.in_dim = in_dim
39
+ self.mp_size = mp_size
40
+
41
+ def merge_assert(self, dim1, dim2):
42
+ assert dim1 > dim2, \
43
+ 'Merging tensors is not allowed here! Please use deepspeed load_checkpoint\
44
+ for merging your checkpoints before replacing the transformer layer with\
45
+ inference-kernels'
46
+
47
+ def strided_copy(self,
48
+ dst: Optional[torch.Tensor],
49
+ src: Optional[torch.Tensor],
50
+ num_splits: int,
51
+ int8: bool = False,
52
+ allocate_tensor: bool = False):
53
+ if src is None:
54
+ return src
55
+ src_shape = src.shape
56
+ dst_shape = dst.shape
57
+
58
+ outer_dim = 0 if int8 else -1
59
+
60
+ if allocate_tensor:
61
+ dst = torch.empty_like(dst)
62
+
63
+ src_split = torch.split(src.data, src.shape[outer_dim] // num_splits, dim=outer_dim)
64
+ if (len(src_shape) == 2 and len(dst_shape) == 2):
65
+ if src_shape[outer_dim] == dst_shape[self.out_dim]:
66
+ try:
67
+ dst = dst.reshape(-1).data.copy_(src.data.reshape(-1)).reshape(src.shape)
68
+ except:
69
+ print(dst.shape, src.shape)
70
+ exit()
71
+ dst = torch.nn.parameter.Parameter(dst, requires_grad=False)
72
+ if hasattr(src, 'scale'):
73
+ dst.scale = src.scale
74
+ return dst
75
+ self.merge_assert(src_shape[outer_dim], dst_shape[self.out_dim])
76
+ qkv_size = dst_shape[self.out_dim] // num_splits
77
+ qkv_split = [torch.split(src_s, qkv_size, dim=outer_dim) for src_s in src_split]
78
+ weight_split = [
79
+ torch.cat([qkv_s[i] for qkv_s in qkv_split], axis=outer_dim) for i in range(len(qkv_split[0]))
80
+ ]
81
+ dst = dst.reshape(-1).data.copy_(weight_split[self.gpu_index].contiguous().reshape(-1)).reshape(
82
+ weight_split[self.gpu_index].shape)
83
+ else:
84
+ if src_shape[0] == dst_shape[0]:
85
+ return torch.nn.parameter.Parameter(src)
86
+ qkv_size = dst_shape[0] // num_splits
87
+ qkv_split = [torch.split(src_s, qkv_size, dim=0) for src_s in src_split]
88
+ bias_split = [torch.cat([qkv_s[i] for qkv_s in qkv_split], axis=0) for i in range(len(qkv_split[0]))]
89
+ dst.data.copy_(bias_split[self.gpu_index].contiguous())
90
+
91
+ dst = torch.nn.parameter.Parameter(dst, requires_grad=False)
92
+ if hasattr(src, 'scale'):
93
+ dst.scale = src.scale
94
+ return dst
95
+
96
+ def copy(self, dst, src, int8=False, allocate_tensor=False):
97
+ if src is None:
98
+ return src
99
+ assert not dst.data.is_meta # the torch.Tensor.copy_ method used below will silently fail on meta tensors
100
+ if allocate_tensor:
101
+ dst = torch.empty_like(dst)
102
+ outer_dim = 0 if int8 else 1
103
+ inner_dim = 1 if int8 else 0
104
+ src_shape = src.shape
105
+ dst_shape = dst.shape
106
+ if (len(src_shape) == 2 and len(dst_shape) == 2):
107
+
108
+ if src_shape[inner_dim] == dst_shape[self.in_dim] and src_shape[outer_dim] == dst_shape[self.out_dim]:
109
+ dst = dst.reshape(-1).data.copy_(src.data.reshape(-1)).reshape(src.shape)
110
+ else:
111
+ if src_shape[inner_dim] != dst_shape[self.in_dim]:
112
+ self.merge_assert(src_shape[inner_dim], dst_shape[self.in_dim])
113
+ dst.data.copy_(src[:, self.gpu_index * dst_shape[self.in_dim]: (self.gpu_index + 1) * dst_shape[self.in_dim]] if inner_dim == 1 else \
114
+ src[self.gpu_index * dst_shape[self.in_dim]: (self.gpu_index + 1) * dst_shape[self.in_dim], :])
115
+ else:
116
+ self.merge_assert(src_shape[outer_dim], dst_shape[self.out_dim])
117
+ dst.data.copy_(src[:, self.gpu_index * dst_shape[self.out_dim]: (self.gpu_index + 1) * dst_shape[self.out_dim]] if outer_dim == 1 else \
118
+ src[self.gpu_index * dst_shape[self.out_dim]: (self.gpu_index + 1) * dst_shape[self.out_dim], :])
119
+ else:
120
+ if src_shape[0] == dst_shape[0]:
121
+ dst = src if src.dtype == dst.dtype else dst.data.copy_(src)
122
+ else:
123
+ dst.data.copy_(src[self.gpu_index * dst_shape[-1]:(self.gpu_index + 1) * dst_shape[-1]])
124
+ dst = torch.nn.parameter.Parameter(dst, requires_grad=False)
125
+ if hasattr(src, 'scale'):
126
+ dst.scale = src.scale
127
+ return dst
128
+
129
+
130
+ class Loading():
131
+
132
+ def is_load_module(module):
133
+ load_layers = [nn.Linear, nn.Embedding, nn.LayerNorm]
134
+ load_layer_names = [
135
+ "LPLayerNorm", "SharedEmbedding", "OPTLearnedPositionalEmbedding", "LlamaRMSNorm", "FalconLinear",
136
+ "MistralRMSNorm", "T5LayerNorm", "MixtralRMSNorm"
137
+ ]
138
+ return module.__class__ in load_layers or module._get_name() in load_layer_names
139
+
140
+ def load_buffer(module, state_dict, prefix):
141
+ for name in module._buffers.keys():
142
+ if module._buffers[name].data.is_meta:
143
+ module._buffers[name] = torch.nn.parameter.Parameter(
144
+ data=torch.empty_like(module._buffers[name].data, device="cpu"),
145
+ requires_grad=module._buffers[name].data.requires_grad)
146
+ if prefix + name in state_dict.keys():
147
+ module._buffers[name].data.copy_(state_dict[prefix + name])
148
+
149
+ def load(module, state_dict, prefix, mp_group=None):
150
+ mp_replace = ReplaceWithTensorSlicing(mp_group=mp_group)
151
+ if hasattr(module, 'weight'):
152
+ if module.weight.data.is_meta:
153
+ # meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
154
+ module.weight = torch.nn.parameter.Parameter(data=torch.empty_like(module.weight.data, device="cpu"),
155
+ requires_grad=module.weight.data.requires_grad)
156
+ if 'query_key_value' in prefix:
157
+ module.weight = mp_replace.strided_copy(module.weight.data,
158
+ state_dict[prefix + 'weight'],
159
+ num_splits=3)
160
+ else:
161
+ module.weight = mp_replace.copy(module.weight.data, state_dict[prefix + 'weight'])
162
+ else:
163
+ if hasattr(module, 'norm') and hasattr(module.norm, 'weight'):
164
+ if module.norm.weight.data.is_meta:
165
+ # meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
166
+ module.norm.weight = torch.nn.parameter.Parameter(
167
+ data=torch.empty_like(module.norm.weight.data, device="cpu"),
168
+ requires_grad=module.norm.weight.data.requires_grad)
169
+ module.norm.weight = mp_replace.copy(module.norm.weight.data, state_dict[prefix + 'weight'])
170
+
171
+ if prefix + 'bias' in state_dict.keys():
172
+ if hasattr(module, 'bias'):
173
+ if module.bias.data.is_meta:
174
+ # meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
175
+ module.bias = torch.nn.parameter.Parameter(data=torch.empty_like(module.bias.data, device="cpu"),
176
+ requires_grad=module.bias.data.requires_grad)
177
+ module.bias = mp_replace.copy(module.bias, state_dict[prefix + 'bias'])
178
+ else:
179
+ if hasattr(module, 'norm') and hasattr(module.norm, 'bias'):
180
+ if module.norm.bias.data.is_meta:
181
+ # meta tensor cannot be casted or copied to, so we need to replace it with a normal tensor here
182
+ module.norm.bias = torch.nn.parameter.Parameter(
183
+ data=torch.empty_like(module.norm.bias.data, device="cpu"),
184
+ requires_grad=module.norm.bias.data.requires_grad)
185
+ module.norm.bias = mp_replace.copy(module.norm.bias, state_dict[prefix + 'bias'])
186
+
187
+
188
+ class AutoTP():
189
+
190
+ def __init__(self, module, all_reduce_linears, prefix, state_dict, linear_layer_setting, orig_layer_impl):
191
+ self.module = module
192
+ self.all_reduce_linears = all_reduce_linears
193
+ self.prefix = prefix
194
+ self.state_dict = state_dict
195
+
196
+ self.mp_size = None
197
+ self.mp_group = None
198
+ self.linear_layer_setting = linear_layer_setting
199
+ self.orig_layer_impl = orig_layer_impl
200
+ self.linear_policies = None
201
+ self.conv_linear_layer = False
202
+
203
+ def in_module_list(module, module_list):
204
+ for item in module_list:
205
+ if type(item).__name__ == type(module).__name__:
206
+ return True
207
+ return False
208
+
209
+ def get_module_list(model):
210
+ mlist = []
211
+ for child in model.children():
212
+ if isinstance(child, nn.ModuleList):
213
+ for module in child.children():
214
+ if not mlist:
215
+ mlist = [module]
216
+ elif not AutoTP.in_module_list(module, mlist):
217
+ mlist = mlist + [module]
218
+ else:
219
+ mlist = mlist + AutoTP.get_module_list(child)
220
+ return mlist
221
+
222
+ def supported(model):
223
+ unsupported = ['deberta', 'flaubert', 'fsmt', 'gpt2', 'led', 'longformer', 'xlm', 'xlnet']
224
+ model = str(model)
225
+ key = re.search(r": (.*?)Model", model)
226
+ if key is None:
227
+ key = re.search(r": (.*?)Stack", model)
228
+ if key is None:
229
+ key = re.match(r"(.*?)Model", model)
230
+ assert key is not None, "Not able to determine model policy automatically. Please provide policy."
231
+ if key.group(1).lower() in unsupported:
232
+ return False
233
+ return True
234
+
235
+ def get_layers(parent, module):
236
+ layer_list = []
237
+ for key, submodule in module._modules.items():
238
+ if isinstance(submodule, nn.Linear):
239
+ layer_list = layer_list + [parent + "." + key]
240
+ elif isinstance(submodule, nn.LayerNorm) or key == 'LayerNorm' or key == 'layer_norm':
241
+ layer_list = layer_list + ["ln"]
242
+ else:
243
+ layer_list = layer_list + AutoTP.get_layers(key, submodule)
244
+ return layer_list
245
+
246
+ def update_policy_list(policy_list, new_module, new_gems):
247
+ if len(policy_list):
248
+ for i, policy in enumerate(policy_list):
249
+ # if module already exists in policy, combine gems and remove duplicates
250
+ if policy[0] == type(new_module):
251
+ new_gems = set(new_gems + policy[1])
252
+ policy_list[i] = tuple([type(new_module), new_gems])
253
+ return policy_list
254
+ policy_list.append(tuple([type(new_module), new_gems]))
255
+ return policy_list
256
+
257
+ def kernel_supported(module_list):
258
+ policy = []
259
+ for plcy in replace_policies:
260
+ # instantiate a throw-away policy in order to populate the _orig_layer_class
261
+ _ = plcy(None)
262
+ if isinstance(plcy._orig_layer_class, list):
263
+ for orig_layer_class in plcy._orig_layer_class:
264
+ policy.append(orig_layer_class)
265
+ elif plcy._orig_layer_class is not None:
266
+ policy.append(plcy._orig_layer_class)
267
+ for child in module_list:
268
+ if child.__class__ in policy:
269
+ return True
270
+ return False
271
+
272
+ def tp_parser(model):
273
+ policy_list = []
274
+ module_list = []
275
+ layer_list = []
276
+ gem_list = []
277
+
278
+ module_list = AutoTP.get_module_list(model)
279
+ assert AutoTP.supported(model), "AutoTP not supported for model. Please use kernel injection since container policy for model exists." \
280
+ if AutoTP.kernel_supported(module_list) else "AutoTP not supported for model. Please provide policy."
281
+ norm_layer_name_list = ['LayerNorm', 'layer_norm', 'ln_1', 'ln_2']
282
+ #ln_1 , ln_2 for Qwen
283
+ for module in module_list:
284
+ for key, submodule in module._modules.items():
285
+ if isinstance(submodule, nn.Linear):
286
+ layer_list = layer_list + ["." + key]
287
+ elif isinstance(submodule, nn.LayerNorm) or key in norm_layer_name_list:
288
+ layer_list = layer_list + ["ln"]
289
+ else:
290
+ layer_list = layer_list + AutoTP.get_layers(key, submodule)
291
+ for i, layer in enumerate(layer_list):
292
+ if layer == 'ln':
293
+ if layer_list[i - 1] != 'ln':
294
+ gem_list = gem_list + [layer_list[i - 1]]
295
+ elif 'out_proj' in layer:
296
+ gem_list = gem_list + [layer]
297
+ elif 'o_proj' in layer:
298
+ gem_list = gem_list + [layer]
299
+ elif 'down_proj' in layer:
300
+ gem_list = gem_list + [layer]
301
+ elif 'attention.dense' in layer and 'GPTNeoX' in str(model):
302
+ gem_list = gem_list + [layer]
303
+ elif 'self_attention.dense' in layer and 'falcon' in str(
304
+ type(module)): # this is a hack to get the right linear layer for this model!
305
+ gem_list = gem_list + [layer]
306
+ # Mixtral-7x8b used w2*act(w1*w3) linear. need to replace w2 to linearallreduce.
307
+ elif 'w2' in layer and 'Mixtral' in str(type(module)):
308
+ gem_list = gem_list + [layer]
309
+
310
+ layer_list = []
311
+ if gem_list != []:
312
+ gem_list = list(set(gem_list))
313
+ policy_list = AutoTP.update_policy_list(policy_list, module, gem_list)
314
+ gem_list = []
315
+ assert len(policy_list), "AutoTP not supported for model. Please use kernel injection since container policy for model exists." \
316
+ if AutoTP.kernel_supported(module_list) else "Not able to determine model policy automatically. Please provide policy."
317
+ return policy_list
318
+
319
+ def set_tensor_parallel_config(self, mp_size, mp_group):
320
+ self.mp_size = mp_size
321
+ self.mp_group = mp_group
322
+
323
+ def _replace(self, child, name, conv_linear_layer):
324
+ if getattr(child, "replaced", False) == True:
325
+ return
326
+ weight_shape = child.weight.shape
327
+ mp_replace = ReplaceWithTensorSlicing(mp_group=self.mp_group)
328
+ # For mixtral-7x8b, need to skip MoE gate linear replace.
329
+ if name == "block_sparse_moe.gate":
330
+ return child
331
+ if name in self.all_reduce_linears:
332
+ # if conv_linear_layer [weight_shape[1], weight_shape[0] // mp_size]
333
+ # else [weight_shape[0], weight_shape[1] // mp_size]
334
+
335
+ if self.conv_linear_layer:
336
+ child.weight.data = child.weight.data.transpose(-1, -2).contiguous()
337
+ data = child.weight.data.split(get_shard_size_list(
338
+ weight_shape[0] if self.conv_linear_layer else weight_shape[1], self.mp_size, name),
339
+ dim=1)
340
+ data_dc = move(data[mp_replace.gpu_index], get_accelerator().current_device_name()).detach()
341
+ del data
342
+
343
+ setattr(child, "replaced", True)
344
+ if name == "lm_head" or name == 'embed_out':
345
+ return LmHeadLinearAllreduce(
346
+ torch.nn.parameter.Parameter(data_dc, requires_grad=False), dist.get_rank(), dist.get_world_size(),
347
+ child.bias if child.bias is None else torch.nn.parameter.Parameter(
348
+ move(child.bias,
349
+ get_accelerator().current_device_name())), self.mp_group)
350
+ return LinearAllreduce(torch.nn.parameter.Parameter(data_dc, requires_grad=False), child.bias if child.bias is None else \
351
+ torch.nn.parameter.Parameter(move(child.bias, get_accelerator().current_device_name())), self.mp_group)
352
+ else:
353
+
354
+ # if conv_linear_layer [weight_shape[1], weight_shape[0] // mp_size]
355
+ # else [weight_shape[0] // mp_size, weight_shape[1]]
356
+ if self.conv_linear_layer:
357
+ child.weight.data = child.weight.data.transpose(-1, -2).contiguous()
358
+
359
+ if require_tp_fused_qkvw(name, self.mp_size):
360
+ #Check and handle fused qkv for TP
361
+ #The copy is a regular copy, The shape of dst and src is the same
362
+ data_dc = move(
363
+ prepare_tp_fused_qkvw(self.module, child.weight.data, self.mp_size, mp_replace.gpu_index),
364
+ get_accelerator().current_device_name())
365
+
366
+ bias_data_dc = None if child.bias is None else move(
367
+ prepare_tp_fused_qkvw(self.module, child.bias.data, self.mp_size, mp_replace.gpu_index),
368
+ get_accelerator().current_device_name())
369
+ else:
370
+ data = child.weight.data.split(get_shard_size_list(weight_shape[0], self.mp_size, name),
371
+ dim=1 if self.conv_linear_layer else 0)
372
+ data_dc = move(data[mp_replace.gpu_index], get_accelerator().current_device_name()).detach()
373
+ del data
374
+
375
+ if child.bias is not None:
376
+ bias_data = child.bias.data.split(get_shard_size_list(
377
+ weight_shape[1] if self.conv_linear_layer else weight_shape[0], self.mp_size, name),
378
+ dim=0)
379
+ bias_data = move(bias_data[mp_replace.gpu_index], get_accelerator().current_device_name())
380
+ bias_data_dc = torch.nn.parameter.Parameter(bias_data, requires_grad=False)
381
+ del bias_data
382
+ else:
383
+ bias_data_dc = None
384
+
385
+ setattr(child, "replaced", True)
386
+ return LinearLayer(weight=torch.nn.parameter.Parameter(data_dc, requires_grad=False), bias=bias_data_dc)
387
+
388
+ def _slice_embedding(self, child, name, conv_linear_layer):
389
+ if getattr(child, "replaced", False) == True:
390
+ return
391
+ mp_replace = ReplaceWithTensorSlicing(mp_group=self.mp_group)
392
+
393
+ if hasattr(child.weight, 'ds_tensor'):
394
+ data = child.weight.ds_tensor.data.split(get_shard_size_list(child.weight.shape[1], self.mp_size), dim=1)
395
+ else:
396
+ data = child.weight.data.split(get_shard_size_list(child.weight.shape[1], self.mp_size, name), dim=1)
397
+ data = data[mp_replace.gpu_index].to(get_accelerator().current_device_name())
398
+ data = torch.nn.parameter.Parameter(data, requires_grad=False)
399
+
400
+ new_embedding = nn.Embedding(child.weight.shape[0], get_shard_size(child.weight.shape[1], self.mp_size, name))
401
+ new_embedding.weight.data.copy_(data)
402
+ setattr(child, "replaced", True)
403
+ return new_embedding
404
+
405
+ def update_mp_params(self, child):
406
+ if getattr(child, "replaced", False) == True:
407
+ return
408
+ for param in [
409
+ "n_heads", "inner_dim", "num_heads", "num_kv", "num_attention_heads", "num_attn_heads",
410
+ "all_head_size", "embed_dim", "hidden_size", "num_key_value_heads", "num_kv_heads", "kv_n_heads",
411
+ "d_model"
412
+ ]:
413
+ if hasattr(child, param):
414
+ param_val = getattr(child, param)
415
+ setattr(child, param, get_shard_size(param_val, self.mp_size))
416
+ setattr(child, "replaced", True)
417
+
418
+ def update_linear_policies(self):
419
+ self.conv_linear_layer = False
420
+ if self.linear_layer_setting is not None:
421
+ self.linear_policies = {self.linear_layer_setting[0]: self._replace}
422
+ if len(self.linear_layer_setting) == 2:
423
+ self.linear_policies.update({self.linear_layer_setting[1]: self._slice_embedding})
424
+ else:
425
+ import transformers
426
+ if self.orig_layer_impl is transformers.models.gpt2.modeling_gpt2.GPT2Block:
427
+ try:
428
+ self.conv_linear_layer = True
429
+ self.linear_policies = {transformers.pytorch_utils.Conv1D: self._replace}
430
+ except ImportError:
431
+ self.linear_policies = {nn.Linear: self._replace}
432
+ else:
433
+ self.linear_policies = {nn.Linear: self._replace, nn.Embedding: self._slice_embedding}
434
+
435
+ def _replace_module(self, r_module, prev_name='', prev_class_name=''):
436
+ for name, child in r_module.named_children():
437
+ if prev_class_name == "":
438
+ class_name = prev_name
439
+ elif prev_name == "":
440
+ class_name = prev_class_name
441
+ else:
442
+ class_name = prev_class_name + '.' + prev_name
443
+ checking_key = self.prefix + '.' + class_name + '.' + name + '.' if class_name != "" else self.prefix + '.' + name + '.'
444
+ if Loading.is_load_module(child) and self.state_dict is not None:
445
+ if any(checking_key in item for item in self.state_dict):
446
+ Loading.load(child, self.state_dict, checking_key, self.mp_group)
447
+ else:
448
+ continue
449
+ if len(child._buffers) != 0 and self.state_dict is not None:
450
+ Loading.load_buffer(child, self.state_dict, checking_key)
451
+ if child.__class__ in self.linear_policies:
452
+ setattr(r_module, name, self.linear_policies[child.__class__](child, prev_name + '.' + name,
453
+ self.conv_linear_layer))
454
+ elif any(isinstance(child, lp) for lp in self.linear_policies):
455
+ # Added for falcon model support
456
+ # Note: isinstance will account for class inheritance, child.__class__ does not
457
+ key = None
458
+ for lp in self.linear_policies:
459
+ if isinstance(child, lp):
460
+ key = lp
461
+ break
462
+ assert key is not None
463
+ setattr(r_module, name, self.linear_policies[key](child, prev_name + '.' + name,
464
+ self.conv_linear_layer))
465
+ else:
466
+ self.update_mp_params(child)
467
+ self._replace_module(child, name, class_name)
468
+ return r_module
469
+
470
+ def get_model_num_kv_heads(self, config):
471
+ num_kv_heads = None
472
+ kv_head_names = ['num_kv_heads', 'num_key_value_heads', 'num_attention_heads', 'n_heads']
473
+ for name in kv_head_names:
474
+ if hasattr(config, name):
475
+ num_kv_heads = getattr(config, name)
476
+ if num_kv_heads is not None:
477
+ break
478
+ return num_kv_heads
479
+
480
+ def _replace_last_linear_module(self, r_module):
481
+ if hasattr(r_module, "lm_head"):
482
+ name = "lm_head"
483
+ child = r_module.lm_head
484
+ elif hasattr(r_module, "embed_out"):
485
+ name = "embed_out"
486
+ child = r_module.embed_out
487
+ else:
488
+ return r_module
489
+ if child.__class__ in self.linear_policies:
490
+ setattr(r_module, name, self.linear_policies[child.__class__](child, name, self.conv_linear_layer))
491
+ return r_module
venv/lib/python3.10/site-packages/deepspeed/module_inject/auto_tp_model_utils.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from deepspeed import comm as dist
7
+ import torch
8
+ from typing import Optional
9
+ from deepspeed.module_inject.tp_shard import get_shard_size, get_shard_size_list
10
+
11
+
12
+ def build_bloom_alibi_tensor(attention_mask: torch.Tensor, num_heads: int, dtype: torch.dtype) -> torch.Tensor:
13
+ """
14
+ Link to paper: https://arxiv.org/abs/2108.12409 Alibi tensor is not causal as the original paper mentions, it
15
+ relies on a translation invariance of softmax for quick implementation: with l being a tensor, and a fixed value
16
+ `softmax(l+a) = softmax(l)`. Based on
17
+ https://github.com/ofirpress/attention_with_linear_biases/blob/a35aaca144e0eb6b789dfcb46784c4b8e31b7983/fairseq/models/transformer.py#L742
18
+ TODO @thomasw21 this doesn't work as nicely due to the masking strategy, and so masking varies slightly.
19
+
20
+ Args:
21
+ Returns tensor shaped (batch_size * num_heads, 1, max_seq_len)
22
+ attention_mask (`torch.Tensor`):
23
+ Token-wise attention mask, this should be of shape (batch_size, max_seq_len).
24
+ num_heads (`int`, *required*):
25
+ number of heads
26
+ dtype (`torch.dtype`, *optional*, default=`torch.bfloat16`):
27
+ dtype of the output tensor
28
+ """
29
+ import math
30
+ batch_size, seq_length = attention_mask.shape
31
+ closest_power_of_2 = 2**math.floor(math.log2(num_heads))
32
+ base = torch.tensor(2**(-(2**-(math.log2(closest_power_of_2) - 3))),
33
+ device=attention_mask.device,
34
+ dtype=torch.float32)
35
+ powers = torch.arange(1, 1 + closest_power_of_2, device=attention_mask.device, dtype=torch.int32)
36
+ slopes = torch.pow(base, powers)
37
+
38
+ if closest_power_of_2 != num_heads:
39
+ extra_base = torch.tensor(2**(-(2**-(math.log2(2 * closest_power_of_2) - 3))),
40
+ device=attention_mask.device,
41
+ dtype=torch.float32)
42
+ num_remaining_heads = min(closest_power_of_2, num_heads - closest_power_of_2)
43
+ extra_powers = torch.arange(1, 1 + 2 * num_remaining_heads, 2, device=attention_mask.device, dtype=torch.int32)
44
+ slopes = torch.cat([slopes, torch.pow(extra_base, extra_powers)], dim=0)
45
+
46
+ # Note: alibi will added to the attention bias that will be applied to the query, key product of attention
47
+ # => therefore alibi will have to be of shape (batch_size, num_heads, query_length, key_length)
48
+ # => here we set (batch_size=1, num_heads=num_heads, query_length=1, key_length=max_length)
49
+ # => the query_length dimension will then be broadcasted correctly
50
+ # This is more or less identical to T5's relative position bias:
51
+ # https://github.com/huggingface/transformers/blob/f681437203baa7671de3174b0fa583c349d9d5e1/src/transformers/models/t5/modeling_t5.py#L527
52
+ arange_tensor = ((attention_mask.cumsum(dim=-1) - 1) * attention_mask)[:, None, :]
53
+ alibi = slopes[..., None] * arange_tensor
54
+ if dist.is_initialized():
55
+ num_heads_per_rank = get_shard_size(num_heads, dist.get_world_size())
56
+ offset = sum(get_shard_size_list(num_heads, dist.get_world_size())[0:dist.get_rank()])
57
+ alibi = alibi.view(batch_size, num_heads, 1, seq_length)
58
+ alibi = alibi[:, offset:num_heads_per_rank + offset, :, :]
59
+ return alibi.reshape(batch_size * num_heads_per_rank, 1, seq_length).to(dtype)
60
+ else:
61
+ return alibi.reshape(batch_size * num_heads, 1, seq_length).to(dtype)
62
+
63
+
64
+ def get_alibi_mask(self, tensor, seq_length_with_past):
65
+ mask = self.get_alibi_mask_orig(tensor, seq_length_with_past)
66
+ if not self.training and dist.is_initialized():
67
+ num_heads_per_rank = get_shard_size(self.n_head, dist.get_world_size())
68
+ offset = sum(get_shard_size_list(self.n_head, dist.get_world_size())[0:dist.get_rank()])
69
+ mask = mask[offset:num_heads_per_rank + offset, :seq_length_with_past, :seq_length_with_past]
70
+
71
+ return mask
72
+
73
+
74
+ def build_mpt_atten_bias_tensor(self,
75
+ device,
76
+ dtype,
77
+ attention_mask: Optional[torch.ByteTensor] = None,
78
+ prefix_mask: Optional[torch.ByteTensor] = None,
79
+ sequence_id: Optional[torch.LongTensor] = None):
80
+ (attn_bias, attention_mask) = self._attn_bias_orig(device,
81
+ dtype,
82
+ attention_mask=attention_mask,
83
+ prefix_mask=prefix_mask,
84
+ sequence_id=sequence_id)
85
+ if dist.is_initialized():
86
+ num_heads_per_rank = get_shard_size(self.config.n_heads, dist.get_world_size())
87
+ offset = sum(get_shard_size_list(self.config.n_heads, dist.get_world_size())[0:dist.get_rank()])
88
+ attn_bias = attn_bias[:, offset:num_heads_per_rank + offset, :, :]
89
+ return attn_bias, attention_mask
90
+
91
+
92
+ def build_mpt_alibi_tensor(self, num_heads, sequence_length, alibi_bias_max=8, device=None) -> torch.Tensor:
93
+ r"""
94
+ Link to paper: https://arxiv.org/abs/2108.12409 - Alibi tensor is not causal as the original paper mentions, it
95
+ relies on a translation invariance of softmax for quick implementation. This implementation has been copied from
96
+ the alibi implementation of MPT source code that led to slightly different results than the Bloom alibi:
97
+ https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L292
98
+ """
99
+ alibi = self.build_mpt_alibi_tensor_orig(num_heads, sequence_length, alibi_bias_max, device)
100
+ if dist.is_initialized():
101
+ num_heads_per_rank = int(num_heads / dist.get_world_size())
102
+ offset = dist.get_rank() * num_heads_per_rank
103
+ alibi = alibi[offset:num_heads_per_rank + offset, :, :]
104
+ return alibi