applied-ai-018 commited on
Commit
11ed373
·
verified ·
1 Parent(s): dd3126b

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/__init__.cpython-310.pyc +0 -0
  2. venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/abstract_accelerator.cpython-310.pyc +0 -0
  3. venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/cuda_accelerator.cpython-310.pyc +0 -0
  4. venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/real_accelerator.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/deepspeed/ops/fp_quantizer/__pycache__/__init__.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/deepspeed/ops/fp_quantizer/__pycache__/quantize.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/deepspeed/ops/fp_quantizer/quantize.py +141 -0
  8. venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__init__.py +9 -0
  9. venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/__init__.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/bert_sparse_self_attention.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/matmul.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/softmax.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/sparse_attention_utils.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/sparse_self_attention.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/sparsity_config.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/bert_sparse_self_attention.py +77 -0
  17. venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/matmul.py +819 -0
  18. venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/softmax.py +296 -0
  19. venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/sparse_attention_utils.py +208 -0
  20. venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/sparse_self_attention.py +149 -0
  21. venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/sparsity_config.py +727 -0
  22. venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/trsrc/__init__.py +37 -0
  23. venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/trsrc/__pycache__/__init__.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/trsrc/matmul.tr +208 -0
  25. venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/trsrc/softmax_bwd.tr +61 -0
  26. venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/trsrc/softmax_fwd.tr +143 -0
  27. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/__init__.py +9 -0
  28. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__init__.py +8 -0
  29. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/__init__.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/config.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/diffusers_2d_transformer.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/diffusers_transformer_block.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/ds_mlp.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/triton_ops.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/bias_add.py +26 -0
  36. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/config.py +134 -0
  37. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/diffusers_2d_transformer.py +10 -0
  38. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/diffusers_attention.py +196 -0
  39. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/diffusers_transformer_block.py +104 -0
  40. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/ds_attention.py +290 -0
  41. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/ds_mlp.py +124 -0
  42. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/moe_inference.py +365 -0
  43. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__init__.py +13 -0
  44. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/__init__.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/base.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/gelu_gemm.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/linear.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/mlp_gemm.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/qkv_gemm.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/residual_add.cpython-310.pyc +0 -0
venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (379 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/abstract_accelerator.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/cuda_accelerator.cpython-310.pyc ADDED
Binary file (13.7 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/accelerator/__pycache__/real_accelerator.cpython-310.pyc ADDED
Binary file (4.29 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/fp_quantizer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (258 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/fp_quantizer/__pycache__/quantize.cpython-310.pyc ADDED
Binary file (4.16 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/fp_quantizer/quantize.py ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ import abc
8
+ from abc import ABC
9
+
10
+ from deepspeed.ops.op_builder import FPQuantizerBuilder
11
+
12
+ fp_quant_module = None
13
+
14
+
15
+ class Quantizer(ABC):
16
+ """
17
+ Abstract Quantizer class that implmenents quantize/dequantize methods.
18
+
19
+ Arguments:
20
+ group_size (int, optional): number of values or elements that are grouped
21
+ together for the quantization process.
22
+ """
23
+
24
+ def __init__(self, group_size=512) -> None:
25
+ self.group_size = group_size
26
+
27
+ @abc.abstractmethod
28
+ def quantize(self,
29
+ input,
30
+ q_bits=8,
31
+ q_mantisa_bits=3,
32
+ stochastic_mode=False,
33
+ return_meta_tensor=False) -> torch.Tensor:
34
+ ...
35
+
36
+ @abc.abstractmethod
37
+ def dequantize(self, input_q, fp_out=None, q_bits=8, q_mantisa_bits=3, scale=None) -> torch.Tensor:
38
+ ...
39
+
40
+
41
+ class FP_Quantize(Quantizer):
42
+
43
+ def __init__(self, group_size=512) -> None:
44
+ global fp_quant_module
45
+ super().__init__(group_size=group_size)
46
+ if fp_quant_module is None:
47
+ fp_quant_module = FPQuantizerBuilder().load()
48
+ self.orig_dtype = None
49
+
50
+ def quantize(self,
51
+ input,
52
+ q_bits=8,
53
+ q_mantisa_bits=3,
54
+ stochastic_mode=False,
55
+ return_meta_tensor=False) -> torch.Tensor:
56
+ assert input.dtype == torch.bfloat16, "only support bf16 for now"
57
+ if return_meta_tensor:
58
+ assert q_bits == 8, "meta tensor is only supported with q_bit=8"
59
+
60
+ self.orig_dtype = input.dtype
61
+ self.orig_shape = input.shape
62
+
63
+ if q_bits == 8:
64
+ pass
65
+ elif q_bits == 12:
66
+ q_mantisa_bits = 4
67
+ elif q_bits == 6:
68
+ q_mantisa_bits = 2
69
+ elif q_bits == 4:
70
+ q_mantisa_bits = 1
71
+ else:
72
+ assert (0), \
73
+ f"Missing {q_bits}-quantization, please add the template arguments for the kernel to support this precision!"
74
+
75
+ out = fp_quant_module.quantize(input, self.group_size, stochastic_mode, q_bits, q_mantisa_bits)
76
+
77
+ if return_meta_tensor:
78
+ data, scale = out.split(self.group_size, dim=-1)
79
+ return data.contiguous().reshape(input.shape), scale.contiguous()
80
+
81
+ return out
82
+
83
+ def dequantize(self, input_q, fp_out=None, q_bits=8, q_mantisa_bits=3, scale=None) -> torch.Tensor:
84
+ assert (self.orig_dtype is not None), \
85
+ "[De-quantization Error]: you need to call quantize before dequantizing!"
86
+ fp_out = torch.empty(self.orig_shape, dtype=self.orig_dtype,
87
+ device=input_q.device) if fp_out is None else fp_out
88
+ if q_bits == 8:
89
+ pass
90
+ elif q_bits == 12:
91
+ q_mantisa_bits = 4
92
+ elif q_bits == 6:
93
+ q_mantisa_bits = 2
94
+ elif q_bits == 4:
95
+ q_mantisa_bits = 1
96
+ else:
97
+ assert (0), \
98
+ f"Missing {q_bits}-dequantization, please add the template arguments for the kernel to support this precision!"
99
+
100
+ if scale is not None:
101
+ assert input_q.numel() == fp_out.numel(), \
102
+ f'[De-quantization Error]: quantized data should have the same size as original tensor when scale is not None!'
103
+ input_q = torch.cat([input_q.reshape(-1, self.group_size), scale], dim=-1).contiguous()
104
+
105
+ fp_quant_module.dequantize(fp_out, input_q, self.group_size, q_mantisa_bits, q_bits - q_mantisa_bits - 1)
106
+ return fp_out
107
+
108
+ def selective_dequantize(self,
109
+ input_q,
110
+ indexes,
111
+ fp_out=None,
112
+ q_bits=8,
113
+ q_mantisa_bits=3,
114
+ scale=None) -> torch.Tensor:
115
+ assert (not hasattr(self, 'orig_shape') or len(self.orig_shape) == 3), \
116
+ "Selective-Dequantization works on 3d tensor only! Please reshape the tensor before calling dequantize function."
117
+ assert (self.orig_dtype is not None), \
118
+ "[De-quantization Error]: you need to call quantize before dequantizing!"
119
+ fp_out = torch.empty(
120
+ (indexes.shape[0],
121
+ *self.orig_shape[1:]), dtype=self.orig_dtype, device=input_q.device) if fp_out is None else fp_out
122
+ if q_bits == 8:
123
+ pass
124
+ elif q_bits == 12:
125
+ q_mantisa_bits = 4
126
+ elif q_bits == 6:
127
+ q_mantisa_bits = 2
128
+ elif q_bits == 4:
129
+ q_mantisa_bits = 1
130
+ else:
131
+ assert (0), \
132
+ f"Missing {q_bits}-dequantization, please add the template arguments for the kernel to support this precision!"
133
+
134
+ if scale is not None:
135
+ assert input_q.numel() == fp_out.numel(), \
136
+ f'[De-quantization Error]: quantized data should have the same size as original tensor when scale is not None!'
137
+ input_q = torch.cat([input_q.reshape(-1, self.group_size), scale], dim=-1).contiguous()
138
+
139
+ fp_quant_module.selective_dequantize(fp_out, input_q, indexes, self.group_size, q_mantisa_bits,
140
+ q_bits - q_mantisa_bits - 1)
141
+ return fp_out
venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .sparsity_config import SparsityConfig, DenseSparsityConfig, FixedSparsityConfig, VariableSparsityConfig, BigBirdSparsityConfig, BSLongformerSparsityConfig, LocalSlidingWindowSparsityConfig
7
+ from .sparse_self_attention import SparseSelfAttention
8
+ from .bert_sparse_self_attention import BertSparseSelfAttention
9
+ from .sparse_attention_utils import SparseAttentionUtils
venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (663 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/bert_sparse_self_attention.cpython-310.pyc ADDED
Binary file (3.08 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/matmul.cpython-310.pyc ADDED
Binary file (18.7 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/softmax.cpython-310.pyc ADDED
Binary file (9.1 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/sparse_attention_utils.cpython-310.pyc ADDED
Binary file (9.99 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/sparse_self_attention.cpython-310.pyc ADDED
Binary file (5.2 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/__pycache__/sparsity_config.cpython-310.pyc ADDED
Binary file (32.8 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/bert_sparse_self_attention.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from torch import nn
7
+ from deepspeed.ops.sparse_attention import SparseSelfAttention, FixedSparsityConfig
8
+
9
+
10
+ class BertSparseSelfAttention(nn.Module):
11
+ """Implements Sparse Self Attention layer of Bert model based on https://github.com/microsoft/DeepSpeedExamples/blob/master/bing_bert/nvidia/modelingpreln.py#L373
12
+
13
+ For more information please see, TODO DeepSpeed Sparse Transformer.
14
+
15
+ For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial.
16
+ """
17
+
18
+ def __init__(
19
+ self,
20
+ config,
21
+ # SparsityConfig parameters needs to be set accordingly
22
+ sparsity_config=FixedSparsityConfig(num_heads=4)):
23
+ """Initialize the bert sparse self attention layer.
24
+
25
+ Note) you can use any of the provided sparsity configs or simply add yours!
26
+
27
+ Arguments:
28
+ config: required: Bert model config
29
+ sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on FixedSparsityConfig class.
30
+ """
31
+
32
+ super(BertSparseSelfAttention, self).__init__()
33
+ if config.hidden_size % config.num_attention_heads != 0:
34
+ raise ValueError("The hidden size (%d) is not a multiple of the number of attention "
35
+ "heads (%d)" % (config.hidden_size, config.num_attention_heads))
36
+ self.num_attention_heads = config.num_attention_heads
37
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
38
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
39
+
40
+ self.query = nn.Linear(config.hidden_size, self.all_head_size)
41
+ self.key = nn.Linear(config.hidden_size, self.all_head_size)
42
+ self.value = nn.Linear(config.hidden_size, self.all_head_size)
43
+
44
+ self.sparse_self_attention = SparseSelfAttention(sparsity_config)
45
+
46
+ def transpose_for_scores(self, x):
47
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
48
+ x = x.view(*new_x_shape)
49
+ return x.permute(0, 2, 1, 3)
50
+
51
+ def forward(self, hidden_states, attention_mask):
52
+ """Applies forward phase of bert sparse self attention
53
+
54
+ Arguments:
55
+ hidden_states: required: hidden_states tensor of the bert model
56
+ attn_mask: required: a mask tensor of size (SequenceLength X SequenceLength); currently only 2D is supported
57
+
58
+ Return:
59
+ context_layer: a dense tensor containing attention context
60
+ """
61
+ mixed_query_layer = self.query(hidden_states)
62
+ mixed_key_layer = self.key(hidden_states)
63
+ mixed_value_layer = self.value(hidden_states)
64
+
65
+ query_layer = self.transpose_for_scores(mixed_query_layer)
66
+ key_layer = self.transpose_for_scores(mixed_key_layer)
67
+ value_layer = self.transpose_for_scores(mixed_value_layer)
68
+
69
+ context_layer = self.sparse_self_attention(query_layer,
70
+ key_layer,
71
+ value_layer,
72
+ key_padding_mask=attention_mask)
73
+
74
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
75
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size, )
76
+ context_layer = context_layer.view(*new_context_layer_shape)
77
+ return context_layer
venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/matmul.py ADDED
@@ -0,0 +1,819 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ # DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a
7
+ # https://github.com/ptillet/torch-blocksparse/blob/master/torch_blocksparse/matmul.py
8
+ import importlib
9
+ import torch
10
+
11
+ import triton
12
+ import triton.language as tl
13
+ import triton._C.libtriton as libtriton
14
+ from deepspeed.accelerator import get_accelerator
15
+
16
+
17
+ @triton.jit
18
+ def _kernel(A, B, C, stride_za, stride_ha, stride_ma, stride_ka, stride_zb, stride_hb, stride_kb, stride_nb, stride_zc,
19
+ stride_hc, stride_mc, stride_nc, DS0, DS1, SDD_K, SDD_off_width, lut, locks, nlocks, **meta):
20
+ TM = meta['TM']
21
+ TN = meta['TN']
22
+ TK = meta['TK']
23
+ TZ = meta['TZ']
24
+ BLOCK = meta['BLOCK']
25
+ #------------#
26
+ #- Prologue -#
27
+ #------------#
28
+ pid0 = tl.program_id(0)
29
+ pid1 = tl.program_id(1)
30
+ pidz = tl.program_id(2)
31
+ if meta['SDD']:
32
+ pid1 = pid1 + SDD_off_width
33
+ blockidm = tl.arange(0, TM) // BLOCK
34
+ blockidn = tl.arange(0, TN) // BLOCK
35
+ offlutm = blockidm * (TN // BLOCK) * 4
36
+ offlutn = blockidn * 4
37
+ header = lut + pid1 * (TM // BLOCK) * (TN // BLOCK) * 4
38
+ z = tl.load(header + 0)
39
+ i = tl.load(header + 1 + offlutm)
40
+ j = tl.load(header + 2 + offlutn)
41
+ AS1 = SDD_K // TZ
42
+ lockid = tl.where(TZ > 1, 1, 0)
43
+ offka = pid0 * AS1
44
+ offkb = pid0 * AS1
45
+ offmc = 0
46
+ offnc = 0
47
+ offpa = 0
48
+ offpb = 0
49
+ maxid = TZ
50
+ offhc = 0
51
+ offha = z
52
+ offhb = z
53
+ ram = i * BLOCK + (tl.arange(0, TM) % BLOCK)
54
+ rbn = j * BLOCK + (tl.arange(0, TN) % BLOCK)
55
+ else:
56
+ header = lut + pid0 * 6
57
+ offset = tl.load(header + 0)
58
+ AS1 = tl.load(header + 1)
59
+ column = tl.load(header + 2)
60
+ depth = tl.load(header + 3)
61
+ lockid = tl.load(header + 4)
62
+ maxid = tl.load(header + 5)
63
+ pinc = lut + offset
64
+ offhc = depth
65
+ if meta['DSD']:
66
+ # output offset
67
+ offnc = pid1 * TN
68
+ offmc = column * TM
69
+ offpc = 0
70
+ # dense input offset
71
+ offnb = pid1 * TN
72
+ offkb = tl.load(pinc)
73
+ offkb = tl.multiple_of(offkb, 8) # compiler hint
74
+ offpb = 0
75
+ # sparse input offset
76
+ offma = 0
77
+ offka = 0
78
+ offpa = tl.load(pinc + 1)
79
+ offpa = tl.multiple_of(offpa, 8) # compiler hint
80
+ offpa = offpa * BLOCK * BLOCK
81
+ offha = 0
82
+ offhb = depth
83
+ else:
84
+ # output offset
85
+ offmc = pid1 * TM
86
+ offnc = column * TN
87
+ offpc = 0
88
+ # dense input offset
89
+ offma = pid1 * TM
90
+ offka = tl.load(pinc)
91
+ offka = tl.multiple_of(offka, 8) # compiler hint
92
+ offpa = 0
93
+ # sparse input offset
94
+ offnb = 0
95
+ offkb = 0
96
+ offpb = tl.load(pinc + 1)
97
+ offpb = tl.multiple_of(offpb, 8) # compiler hint
98
+ offpb = offpb * BLOCK * BLOCK
99
+ offha = depth
100
+ offhb = 0
101
+ ram = offma + tl.arange(0, TM)
102
+ rbn = offnb + tl.arange(0, TN)
103
+
104
+ # initialize a, b pointers
105
+ rka = offka + tl.arange(0, TK)
106
+ rkb = offkb + tl.arange(0, TK)
107
+ pa = A + pidz * stride_za + offha * stride_ha + offpa + ram[:, None] * stride_ma + rka[None, :] * stride_ka
108
+ pb = B + pidz * stride_zb + offhb * stride_hb + offpb + rbn[None, :] * stride_nb + rkb[:, None] * stride_kb
109
+ if meta['DDS']:
110
+ checkam = ram[:, None] < DS0
111
+ else:
112
+ checkam = AS1 > 0
113
+ if meta['DSD']:
114
+ checkbn = rbn[None, :] < DS0
115
+ else:
116
+ checkbn = AS1 > 0
117
+ a = tl.load(pa, mask=checkam, other=0.)
118
+ b = tl.load(pb, mask=checkbn, other=0.)
119
+
120
+ ## ---------------- ##
121
+ ## Inner Loop ##
122
+ ## ---------------- ##
123
+ acc = tl.zeros((TM, TN), dtype=tl.float32)
124
+ for k in range(AS1, 0, -TK):
125
+ acc += tl.dot(a, b)
126
+ if meta['SDD']:
127
+ inc_a = TK * stride_ka
128
+ inc_b = TK * stride_kb
129
+ else:
130
+ pinc += 2
131
+ if meta['DSD']:
132
+ inc_b = tl.load(pinc)
133
+ inc_a = tl.load(pinc + 1)
134
+ inc_b = tl.multiple_of(inc_b, 8)
135
+ inc_a = tl.multiple_of(inc_a, 8)
136
+ inc_b = inc_b * stride_kb
137
+ if meta['DDS']:
138
+ inc_a = tl.load(pinc)
139
+ inc_b = tl.load(pinc + 1)
140
+ inc_a = tl.multiple_of(inc_a, 8)
141
+ inc_b = tl.multiple_of(inc_b, 8)
142
+ inc_a = inc_a * stride_ka
143
+ pa += inc_a
144
+ pb += inc_b
145
+ # pre-fetch
146
+ checkak = k > TK
147
+ checkbk = k > TK
148
+ checka = checkam & checkak
149
+ checkb = checkbn & checkbk
150
+ a = tl.load(pa, mask=checka)
151
+ b = tl.load(pb, mask=checkb)
152
+ c = acc.to(C.dtype.element_ty)
153
+
154
+ if meta['SDD']:
155
+ checkc = True
156
+ rr_blockidm = tl.arange(0, TM) // BLOCK
157
+ rr_blockidn = tl.arange(0, TN) // BLOCK
158
+ rr_offlutm = rr_blockidm * (TN // BLOCK) * 4
159
+ rr_offlutn = rr_blockidn * 4
160
+ off_bkid = 3 + rr_offlutm[:, None] + rr_offlutn[None, :]
161
+ bkid = tl.load(header + off_bkid)
162
+ offpc = bkid * BLOCK * BLOCK
163
+ rcm = tl.arange(0, TM) % BLOCK
164
+ rcn = tl.arange(0, TN) % BLOCK
165
+ else:
166
+ rcm = offmc + tl.arange(0, TM)
167
+ rcn = offnc + tl.arange(0, TN)
168
+ if meta['DSD']:
169
+ checkc = rcn[None, :] < DS0
170
+ if meta['DDS']:
171
+ checkc = rcm[:, None] < DS0
172
+
173
+ pc = C + offpc + offhc * stride_hc + pidz * stride_zc + rcm[:, None] * stride_mc + rcn[None, :] * stride_nc
174
+ # write-back directly
175
+ if lockid == 0:
176
+ tl.store(pc, c, mask=checkc)
177
+ # accumulate partial results using spin-locks
178
+ else:
179
+ plock = locks + tl.program_id(2) * nlocks * tl.num_programs(1) + tl.program_id(1) * nlocks + lockid - 1
180
+ pcount = plock + tl.num_programs(2) * tl.num_programs(1) * nlocks
181
+ while tl.atomic_cas(plock, 0, 1) == 1:
182
+ pass
183
+ count = tl.load(pcount)
184
+ if count == 0:
185
+ tl.store(pc, c, mask=checkc)
186
+ else:
187
+ d = tl.load(pc, mask=checkc)
188
+ tl.store(pc, d + c, mask=checkc)
189
+ tl.atomic_xchg(pcount, (count + 1) % maxid)
190
+ tl.atomic_xchg(plock, 0)
191
+
192
+
193
+ ##############
194
+ # MAIN API #
195
+ ##############
196
+ class _sparse_matmul(torch.autograd.Function):
197
+
198
+ sdd_cache = dict()
199
+ dsd_cache = dict()
200
+ dds_cache = dict()
201
+ locks = dict()
202
+
203
+ # Given an array sizes representing reduction size for each
204
+ # column of a block-mode matrix multiplication,
205
+ # performs load-balancing to achieve more smaller reductions
206
+ # between `seg_size` elements
207
+ @staticmethod
208
+ def load_balance(sizes, block):
209
+ #global triton
210
+ #if triton is None:
211
+ # triton = importlib.import_module('triton')
212
+ # segment size
213
+ # heuristics taken from OpenAI blocksparse code
214
+ # https://github.com/openai/blocksparse/blob/master/blocksparse/matmul.py#L95
215
+ max_size = sizes.max()
216
+ min_size = sizes[sizes != 0].min()
217
+ #if max_size > min_size * 2.0:
218
+ # seg_max = max(triton.cdiv(max_size, 4), min_size*2)
219
+ #else:
220
+ # seg_max = max_size
221
+ seg_max = max_size
222
+ seg_min = max(triton.cdiv(seg_max, 4), 4)
223
+ # split reduction into segments
224
+ div = sizes // seg_max
225
+ rem = sizes % seg_max
226
+ packs = div + (sizes < seg_min).long() + (rem >= seg_min).long()
227
+ width = packs.sum()
228
+ segments = torch.empty(width, dtype=sizes.dtype)
229
+ column = torch.empty_like(segments)
230
+ lockid = torch.zeros_like(segments)
231
+ maxid = torch.zeros_like(segments)
232
+ nlocks = 0
233
+ current = 0
234
+ col_idx = 0
235
+ for i in range(len(sizes)):
236
+ d, r = div[i], rem[i]
237
+ isempty = sizes[i] < seg_min
238
+ last = current + d + (r >= seg_min) + isempty
239
+ # column id
240
+ column[current:last] = col_idx
241
+ # lock id
242
+ if d > 1 or (d == 1 and r >= seg_min):
243
+ nlocks += 1
244
+ lockid[current:last] = nlocks
245
+ maxid[current:last] = last - current
246
+ # segment size
247
+ segments[current:current + d] = seg_max
248
+ if r < seg_min and not isempty:
249
+ segments[current + d - 1] += r
250
+ if r >= seg_min or isempty:
251
+ segments[current + d] = r
252
+ current = last
253
+ col_idx += 1
254
+ offsets = torch.zeros_like(segments)
255
+ offsets[1:] = torch.cumsum(segments[:-1], dim=0)
256
+ return segments, column, lockid, maxid, offsets
257
+
258
+ @staticmethod
259
+ def get_locks(size, dev):
260
+ if dev not in _sparse_matmul.locks or \
261
+ size > _sparse_matmul.locks[dev].size(0):
262
+ _sparse_matmul.locks[dev] = torch.zeros(size, dtype=torch.int32, device=dev)
263
+ return _sparse_matmul.locks[dev]
264
+
265
+ ##########################
266
+ # SPARSE = DENSE x DENSE #
267
+ ##########################
268
+
269
+ @staticmethod
270
+ def make_sdd_lut(layout, block, dtype, device):
271
+ #_sparse_matmul._load_utils()
272
+ #start_width = 64 // block
273
+ #segmented = _sparse_matmul.sdd_segment(layout.type(torch.int32), start_width)
274
+ start_width = (128 if block > 16 else 32) // block
275
+ layout = layout.type(torch.int32)
276
+ segmented = libtriton.superblock(layout.data_ptr(), layout.shape[0], layout.shape[1], layout.shape[2],
277
+ start_width)
278
+ luts, widths, packs = [], [], []
279
+ for size, nnz in segmented:
280
+ """ width = nnz.shape[0] // (size * size)
281
+ h = nnz[:, 0]
282
+ i = nnz[:, 1]
283
+ j = nnz[:, 2]
284
+ b = nnz[:, 3]
285
+ lut = torch.stack((h, i, j, b), dim=1).view(-1).contiguous()
286
+ luts.append(lut.type(torch.int32).to(device))
287
+ widths.append(width)
288
+ packs.append(size) """
289
+ nnz = nnz.reshape(-1, 4)
290
+ width = nnz.shape[0] // (size * size)
291
+ luts.append(torch.from_numpy(nnz).type(torch.int32).to(device))
292
+ widths.append(width)
293
+ packs.append(size)
294
+ # create locks
295
+ return luts, None, widths, packs
296
+
297
+ @staticmethod
298
+ def _sdd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, luts, num_locks, widths, packs, bench, time):
299
+ if trans_c:
300
+ a, b = b, a
301
+ trans_a, trans_b = not trans_b, not trans_a
302
+ AS0 = a.size(0)
303
+ # Shape check
304
+ a_dim = -2 if trans_a else -1
305
+ b_dim = -1 if trans_b else -2
306
+ a_inner, b_inner = a.shape[a_dim], b.shape[b_dim]
307
+ if a_inner != b_inner:
308
+ raise ValueError(f"Size of tensor A along the {a_dim} dim ({a_inner}) must match size "
309
+ f"of tensor B along the {b_dim} dim ({b_inner})")
310
+ if a_inner % 16 != 0:
311
+ raise ValueError('Reduction size for SDD must be a multiple of 16')
312
+
313
+ batch_size = a.size(0)
314
+ a_outer = a.size(3 if trans_a else 2)
315
+ dtype = a.dtype
316
+ is_16_multiple = a_inner % 16 == 0
317
+ is_32_multiple = a_inner % 32 == 0
318
+ is_64_multiple = a_inner % 64 == 0
319
+ if not is_16_multiple:
320
+ raise ValueError('Reduction size for SDD must be a multiple of 16')
321
+ device = a.device
322
+ # create kernel
323
+ total_width = sum([width * pack * pack for width, pack in zip(widths, packs)])
324
+ c = torch.empty((batch_size, total_width, block, block), dtype=dtype, device=a.device)
325
+ for lut, width, pack in zip(luts, widths, packs):
326
+ F32TK = [8, 16]
327
+ F16TK = [16]
328
+ F16TK += [32] if is_32_multiple else []
329
+ F16TK += [64] if is_64_multiple else []
330
+ TK = {torch.float32: F32TK, torch.float16: F16TK}[dtype]
331
+ num_lock = 1
332
+ meta = {
333
+ 'TM': block * pack,
334
+ 'TN': block * pack,
335
+ 'BLOCK': block,
336
+ 'TK': TK[0],
337
+ 'TZ': 1,
338
+ 'SDD': True,
339
+ 'DSD': False,
340
+ 'DDS': False
341
+ }
342
+ # create output
343
+ locks = _sparse_matmul.get_locks(2 * width * AS0 * num_lock, a.device)
344
+ # maximum grid size is 65535
345
+ # so operation might be decomposed into multiple
346
+ # kernel calls
347
+ max_width = 49152
348
+ total = 0 if bench else None
349
+ for off_width in range(0, width, max_width):
350
+ grid = lambda meta: [meta['TZ'], min(max_width, width - off_width), batch_size]
351
+ _kernel[grid](a,
352
+ b,
353
+ c,
354
+ a.stride(0),
355
+ a.stride(1),
356
+ a.stride(3 if trans_a else 2),
357
+ a.stride(2 if trans_a else 3),
358
+ b.stride(0),
359
+ b.stride(1),
360
+ b.stride(3 if trans_b else 2),
361
+ b.stride(2 if trans_b else 3),
362
+ c.stride(0),
363
+ c.stride(0),
364
+ c.stride(2),
365
+ c.stride(3),
366
+ a_outer,
367
+ a_outer,
368
+ a_inner,
369
+ off_width,
370
+ lut,
371
+ locks,
372
+ num_lock,
373
+ num_warps=4,
374
+ **meta)
375
+ # save for backward pass
376
+ return c
377
+
378
+ ##########################
379
+ # DENSE = DENSE x SPARSE #
380
+ ##########################
381
+
382
+ # Given a binary layout of 0s and 1s,
383
+ # Construct look-up table for efficient execution on GPUs
384
+ @staticmethod
385
+ def make_dxx_lut(layout, block, step, trans, device, transform=lambda idx: idx):
386
+ # load-balancing
387
+ _empty = torch.tensor([], dtype=torch.int64, device=layout.device)
388
+ segments = _empty.clone()
389
+ column = _empty.clone()
390
+ depth = _empty.clone()
391
+ lockid = _empty.clone()
392
+ maxid = _empty.clone()
393
+ offsets = _empty.clone()
394
+ current_offset = 0
395
+ current_maxid = 0
396
+ for z in range(layout.size(0)):
397
+ if trans:
398
+ sizes = torch.sum(layout[z, :, :], 1)
399
+ else:
400
+ sizes = torch.sum(layout[z, :, :], 0)
401
+ z_segments, z_column, z_lockid, z_maxid, z_offsets = _sparse_matmul.load_balance(sizes, block)
402
+ z_depth = z * torch.ones_like(z_segments)
403
+ z_lockid[z_lockid > 0] += current_maxid
404
+ current_maxid = z_lockid.max()
405
+ # concatenate depth
406
+ segments = torch.cat((segments, z_segments))
407
+ column = torch.cat((column, z_column))
408
+ depth = torch.cat((depth, z_depth))
409
+ maxid = torch.cat((maxid, z_maxid))
410
+ offsets = torch.cat((offsets, current_offset + z_offsets))
411
+ lockid = torch.cat((lockid, z_lockid))
412
+ current_offset += layout[z, :, :].sum()
413
+ segments *= step
414
+ # pointer increments
415
+ if trans:
416
+ nnz = layout.nonzero()
417
+ else:
418
+ nnz = layout.transpose(1, 2).nonzero()
419
+ num_blocks = nnz.size(0)
420
+ offsets = torch.min(offsets, (num_blocks - 1) * torch.ones_like(offsets))
421
+ idx = transform(nnz[:, 2] * block)
422
+ xincs = idx.clone()
423
+ xincs[1:] -= idx[:-1]
424
+ # divide block into multiple steps
425
+ div = block // step
426
+ xincs = xincs.view(-1, 1).repeat(1, div)
427
+ xincs[:, 1:] = step
428
+ xincs[:, 0] -= (div - 1) * step
429
+ # first increment for each reduction is actually the offset
430
+ xincs[offsets[segments > 0], 0] = idx[offsets[segments > 0]]
431
+ xincs = xincs.view(-1)
432
+ # block-mode input increments
433
+ if trans:
434
+ widx = torch.arange(num_blocks)
435
+ else:
436
+ widx = _empty.clone()
437
+ current_offset = 0
438
+ for z in range(layout.size(0)):
439
+ layoutw = layout[z, :, :].clone()
440
+ msum = layoutw.sum()
441
+ layoutw[layoutw > 0] = 1 + torch.arange(msum)
442
+ widx = torch.cat((widx, current_offset + layoutw.T[layoutw.T > 0] - 1))
443
+ current_offset += msum
444
+ widx = widx
445
+ wincs = widx * block * block
446
+ wincs[1:] -= widx[:-1] * block * block
447
+ wincs = wincs.view(-1, 1).repeat(1, div)
448
+ if trans:
449
+ wincs[:, 1:] = step
450
+ wincs[:, 0] -= (div - 1) * step
451
+ else:
452
+ wincs[:, 1:] = step * block
453
+ wincs[:, 0] -= (div - 1) * step * block
454
+ wincs[offsets[segments > 0], 0] = widx[offsets[segments > 0]]
455
+ wincs = wincs.view(-1)
456
+ # adjust offset and segment size
457
+ offsets *= 2 * div
458
+ segments *= div
459
+ # create header
460
+ width = column.size(0)
461
+ offsets += 6 * width
462
+ header = torch.stack((offsets, segments, column, depth, lockid, maxid), dim=1).view(-1).contiguous()
463
+ incs = torch.stack((xincs, wincs), dim=1).view(-1).contiguous()
464
+ incs = torch.cat((incs, torch.zeros(2, device=incs.device, dtype=incs.dtype)))
465
+ # create lut
466
+ lut = torch.cat((header, incs))
467
+ lut = lut.type(torch.int32).to(device)
468
+ # create locks
469
+ num_locks = max(1, lockid.max())
470
+ return lut, num_locks, width, None
471
+
472
+ @staticmethod
473
+ def _dds_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, num_locks, width, packs, bench, time):
474
+ global triton
475
+ if triton is None:
476
+ triton = importlib.import_module('triton')
477
+
478
+ # shapes / dtypes
479
+ AS0 = a.size(0)
480
+ AS1 = a.size(1)
481
+ AS2 = a.size(3 if trans_a else 2)
482
+ AS3 = a.size(2 if trans_a else 3)
483
+ BS0 = spdims[0]
484
+ BS1 = block * spdims[2 if trans_b else 1]
485
+ BS2 = block * spdims[1 if trans_b else 2]
486
+ dtype = a.dtype
487
+ # kernel
488
+ meta = {'TN': block, 'TM': 128, 'TK': 16, 'BLOCK': block, 'TZ': 1, 'SDD': False, 'DSD': False, 'DDS': True}
489
+ # output
490
+ CS0 = AS0
491
+ CS1 = AS1
492
+ CS2 = BS2 if trans_c else AS2
493
+ CS3 = AS2 if trans_c else BS2
494
+ locks = _sparse_matmul.get_locks(2 * AS0 * AS2 // 32 * num_locks, a.device)
495
+ c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
496
+ grid = lambda meta: [width, triton.cdiv(AS2, meta['TM']), AS0]
497
+ _kernel[grid](a,
498
+ b,
499
+ c,
500
+ a.stride(0),
501
+ a.stride(1),
502
+ a.stride(3 if trans_a else 2),
503
+ a.stride(2 if trans_a else 3),
504
+ b.stride(0),
505
+ b.stride(1),
506
+ b.stride(3 if trans_b else 2),
507
+ b.stride(2 if trans_b else 3),
508
+ c.stride(0),
509
+ c.stride(1),
510
+ c.stride(3 if trans_c else 2),
511
+ c.stride(2 if trans_c else 3),
512
+ AS2,
513
+ BS2,
514
+ 0,
515
+ 0,
516
+ lut,
517
+ locks,
518
+ num_locks,
519
+ num_warps=4,
520
+ **meta)
521
+ return c
522
+
523
+ @staticmethod
524
+ def _dsd_matmul(a, b, trans_a, trans_b, trans_c, spdims, block, lut, num_locks, width, packs, bench, time):
525
+ global triton
526
+ if triton is None:
527
+ triton = importlib.import_module('triton')
528
+
529
+ # shapes / dtypes
530
+ AS0 = spdims[0]
531
+ AS1 = block * spdims[2 if trans_a else 1]
532
+ AS2 = block * spdims[1 if trans_a else 2]
533
+ BS0 = b.size(0)
534
+ BS1 = b.size(1)
535
+ BS2 = b.size(3 if trans_b else 2)
536
+ BS3 = b.size(2 if trans_b else 3)
537
+ dtype = a.dtype
538
+ # kernel
539
+
540
+ meta = {'TM': block, 'TN': 128, 'TK': 16, 'BLOCK': block, 'TZ': 1, 'SDD': False, 'DSD': True, 'DDS': False}
541
+ # output
542
+ CS0 = BS0
543
+ CS1 = BS1
544
+ CS2 = BS3 if trans_c else AS1
545
+ CS3 = AS1 if trans_c else BS3
546
+ locks = _sparse_matmul.get_locks(2 * BS0 * BS3 // 32 * num_locks, a.device)
547
+ c = torch.empty((CS0, CS1, CS2, CS3), dtype=dtype, device=a.device)
548
+ grid = lambda meta: [width, triton.cdiv(BS3, meta['TN']), BS0]
549
+ _kernel[grid](a,
550
+ b,
551
+ c,
552
+ a.stride(0),
553
+ a.stride(1),
554
+ a.stride(3 if trans_a else 2),
555
+ a.stride(2 if trans_a else 3),
556
+ b.stride(0),
557
+ b.stride(1),
558
+ b.stride(3 if trans_b else 2),
559
+ b.stride(2 if trans_b else 3),
560
+ c.stride(0),
561
+ c.stride(1),
562
+ c.stride(2),
563
+ c.stride(3),
564
+ BS3,
565
+ AS1,
566
+ 0,
567
+ 0,
568
+ lut,
569
+ locks,
570
+ num_locks,
571
+ num_warps=4,
572
+ **meta)
573
+ return c
574
+
575
+ fn = {'sdd': _sdd_matmul.__get__(object), 'dsd': _dsd_matmul.__get__(object), 'dds': _dds_matmul.__get__(object)}
576
+
577
+ @staticmethod
578
+ def forward(ctx, a, b, trans_a, trans_b, trans_c, mode, spdims, block, c_lut, c_num_locks, c_width, c_packs,
579
+ c_bench, c_time, da_lut, da_num_locks, da_width, da_packs, da_bench, da_time, db_lut, db_num_locks,
580
+ db_width, db_packs, db_bench, db_time):
581
+ c = _sparse_matmul.fn[mode](a, b, trans_a, trans_b, trans_c, spdims, block, c_lut, c_num_locks, c_width,
582
+ c_packs, c_bench, c_time)
583
+ # save for backward
584
+ ctx.save_for_backward(a, b)
585
+ ctx.da_num_locks = da_num_locks
586
+ ctx.da_lut = da_lut
587
+ ctx.da_width = da_width
588
+ ctx.da_packs = da_packs
589
+ ctx.da_bench = da_bench
590
+ ctx.da_time = da_time
591
+ ctx.db_lut = db_lut
592
+ ctx.db_num_locks = db_num_locks
593
+ ctx.db_width = db_width
594
+ ctx.db_bench = db_bench
595
+ ctx.db_packs = db_packs
596
+ ctx.db_time = db_time
597
+ ctx.mode = mode
598
+ ctx.spdims = spdims
599
+ ctx.block = block
600
+ ctx.trans_a = trans_a
601
+ ctx.trans_b = trans_b
602
+ return c
603
+
604
+ @staticmethod
605
+ def backward(ctx, dc):
606
+ # saved for backward
607
+ a, b = ctx.saved_tensors
608
+ mode = ctx.mode
609
+ # gradients w.r.t. a
610
+ if ctx.needs_input_grad[0]:
611
+ mode_da = mode[1] + mode[0] + mode[2]
612
+ da = _sparse_matmul.fn[mode_da](dc, b, False, not ctx.trans_b, ctx.trans_a, ctx.spdims, ctx.block,
613
+ ctx.da_lut, ctx.da_num_locks, ctx.da_width, ctx.da_packs, ctx.da_bench,
614
+ ctx.da_time)
615
+ # gradients w.r.t. b
616
+ if ctx.needs_input_grad[1]:
617
+ mode_db = mode[2] + mode[1] + mode[0]
618
+ db = _sparse_matmul.fn[mode_db](a, dc, not ctx.trans_a, False, ctx.trans_b, ctx.spdims, ctx.block,
619
+ ctx.db_lut, ctx.db_num_locks, ctx.db_width, ctx.db_packs, ctx.db_bench,
620
+ ctx.db_time)
621
+ return da, db, None, None, None,\
622
+ None, None, None, None,\
623
+ None, None, None, None, None, None,\
624
+ None, None, None, None, None, None,\
625
+ None, None, None, None, None, None
626
+
627
+
628
+ class MatMul:
629
+ """Block-Sparse MatMul class; this class handles three types of matrix-multiplication:
630
+ - sparse = dense X dense
631
+ - dense = sparse X dense
632
+ - dense = dense X sparse
633
+
634
+ For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
635
+ """
636
+
637
+ def make_lut(self, dtype, device):
638
+ """Generates the sparsity layout/s used in block-sparse matmul
639
+ """
640
+ key = (dtype, device)
641
+ if key in self.lut_cache:
642
+ return self.lut_cache[key]
643
+ # C look-up table
644
+ layout, block = self.layout, self.block
645
+ step = 16
646
+ if self.mode == 'sdd':
647
+ c_lut, c_num_locks, c_width, c_packs = _sparse_matmul.make_sdd_lut(layout, block, dtype, device)
648
+ elif self.mode == 'dsd':
649
+ c_lut, c_num_locks, c_width, c_packs = _sparse_matmul.make_dxx_lut(layout, block, step, not self.trans_a,
650
+ device)
651
+ elif self.mode == 'dds':
652
+ c_lut, c_num_locks, c_width, c_packs = _sparse_matmul.make_dxx_lut(layout, block, step, self.trans_b,
653
+ device)
654
+ # DA look-up table
655
+ if self.mode == 'sdd':
656
+ da_lut, da_num_locks, da_width, da_packs = _sparse_matmul.make_dxx_lut(layout, block, step, True, device)
657
+ elif self.mode == 'dsd':
658
+ da_lut, da_num_locks, da_width, da_packs = _sparse_matmul.make_sdd_lut(layout, block, dtype, device)
659
+ elif self.mode == 'dds':
660
+ da_lut, da_num_locks, da_width, da_packs = _sparse_matmul.make_dxx_lut(layout, block, step,
661
+ not self.trans_b, device)
662
+ # DB look-up table
663
+ if self.mode == 'sdd':
664
+ db_lut, db_num_locks, db_width, db_packs = _sparse_matmul.make_dxx_lut(layout, block, step, False, device)
665
+ elif self.mode == 'dsd':
666
+ db_lut, db_num_locks, db_width, db_packs = _sparse_matmul.make_dxx_lut(layout, block, step, self.trans_a,
667
+ device)
668
+ elif self.mode == 'dds':
669
+ db_lut, db_num_locks, db_width, db_packs = _sparse_matmul.make_sdd_lut(layout, block, dtype, device)
670
+ self.lut_cache[key] = (c_lut, c_num_locks, c_width, c_packs,\
671
+ da_lut, da_num_locks, da_width, da_packs,\
672
+ db_lut, db_num_locks, db_width, db_packs)
673
+ return self.lut_cache[key]
674
+
675
+ def __init__(self, layout, block, mode, trans_a=False, trans_b=False, bench=False):
676
+ """Initialize the Block-Sparse MatMul class.
677
+
678
+ Arguments:
679
+ layout: required: sparsity layout tensor
680
+ block: required: an integer determining the block size.
681
+ mode: required: a string determining type of matmul; ('sdd') sparse = dense X dense, ('dsd') dense = sparse X dense, ('dds') dense = dense X sparse
682
+ trans_a: optional: a boolean determining if multiplication needs to be applied on transpose of input a; default is false
683
+ trans_b: optional: a boolean determining if multiplication needs to be applied on transpose of input b; default is false
684
+ bench: optional: set if you want to do benchmarking
685
+ """
686
+
687
+ if mode not in ['sdd', 'dsd', 'dds']:
688
+ raise NotImplementedError('Supported modes are: sdd, dsd, dds')
689
+ # look-up table cache
690
+ self.lut_cache = dict()
691
+ # attributes
692
+ self.trans_a = trans_a
693
+ self.trans_b = trans_b
694
+ self.mode = mode
695
+ self.block = block
696
+ self.layout = layout
697
+ layout_dim = layout.ndim
698
+ assert layout_dim in (2, 3), "Layout should be a 2 or 3 dimensional tensor of 0s and 1s"
699
+ if not mode == 'sdd':
700
+ # Dims to be reduced on the 'inside' of the matmul, either -1 or -2
701
+ trans_dense, trans_sparse, sparse_inner = (trans_b, trans_a, -1) if mode == 'dsd' else (trans_a, trans_b,
702
+ -2)
703
+ self.dense_inner_dim = -((sparse_inner % 2) + 1) if not trans_dense else sparse_inner
704
+ sparse_inner = sparse_inner if not trans_sparse else -((sparse_inner % 2) + 1)
705
+
706
+ # Inner dim of the dense input should be equal to the inner dim of the sparse input
707
+ self.dense_inner_size = layout.shape[sparse_inner] * block
708
+ # Expected shape for sparse inputs
709
+ self.sparse_shape = (layout.sum().item(), block, block)
710
+
711
+ # Support using the same layout across attention heads etc.
712
+ if layout_dim == 2:
713
+ layout = layout.unsqueeze(0)
714
+
715
+ layout = layout.long() # Above code assumes the layout tensor is an integral type
716
+
717
+ self.spdims = layout.shape
718
+ # timings
719
+ self.bench = bench
720
+ self.time_c = None
721
+ self.time_da = None
722
+ self.time_db = None
723
+
724
+ # pad shapes of a tensor to make it
725
+ # compatible with kernel calls
726
+ @staticmethod
727
+ def _pad_shape(x, is_sparse):
728
+ max_dim = 3 if is_sparse else 4
729
+ for i in range(max_dim - x.dim()):
730
+ x = x.unsqueeze(0)
731
+ return x
732
+
733
+ def __call__(self, a, b):
734
+ """Applies Block-Sparse MatMul.
735
+
736
+ For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
737
+
738
+ Arguments:
739
+ a: required: a dense/block-sparse tensor; first input of mat-mul
740
+ b: required: a dense/block-sparse tensor; second input of mat-mul
741
+
742
+ Return:
743
+ c: a dense/block-sparse tensor result of a X b
744
+ """
745
+
746
+
747
+ c_lut, c_num_locks, c_width, c_packs,\
748
+ da_lut, da_num_locks, da_width, da_packs,\
749
+ db_lut, db_num_locks, db_width, db_packs = self.make_lut(a.dtype, a.device)
750
+ # timings
751
+ time_c = [None]
752
+ time_da = [None]
753
+ time_db = [None]
754
+
755
+ original_dims = max(a.ndim, b.ndim)
756
+ a, b = self._validate_inputs(a, b)
757
+
758
+ # pad shapes with ones
759
+ a = MatMul._pad_shape(a, self.mode == 'dsd')
760
+ b = MatMul._pad_shape(b, self.mode == 'dds')
761
+ # execute
762
+
763
+ c = _sparse_matmul.apply(a, b, self.trans_a, self.trans_b, False, self.mode, self.spdims, self.block, c_lut,
764
+ c_num_locks, c_width, c_packs, self.bench, time_c, da_lut, da_num_locks, da_width,
765
+ da_packs, self.bench, time_da, db_lut, db_num_locks, db_width, db_packs, self.bench,
766
+ time_db)
767
+
768
+ # This removes any leading singleton dimensions we may have added to the tensor that weren't in the input
769
+ dims_to_trim = c.ndim - original_dims
770
+ for _ in range(dims_to_trim):
771
+ c = c.squeeze(0)
772
+
773
+ self.time_c = time_c[0]
774
+ self.time_da = time_da[0]
775
+ self.time_db = time_db[0]
776
+ return c
777
+
778
+ def _validate_inputs(self, a, b):
779
+ if a.device != b.device:
780
+ raise ValueError(f"Inputs must be on the same device; got {a.device} for tensor A "
781
+ f"and {b.device} for tensor B")
782
+ if not get_accelerator().on_accelerator(a):
783
+ raise ValueError("Only GPU devices are supported for now")
784
+
785
+ # When autocast is enabled, torch.matmul autocasts to float16, so we do the same here
786
+ if torch.is_autocast_enabled():
787
+ a, b = a.half(), b.half()
788
+ elif a.dtype != b.dtype:
789
+ raise ValueError(f"Inputs must be the same dtype; got {a.dtype} for A and {b.dtype} for B")
790
+
791
+ mode, trans_a, trans_b = self.mode, self.trans_a, self.trans_b
792
+ if mode != 'sdd':
793
+ # One input is sparse
794
+ dense, dense_name, sparse, sparse_name = (a, 'A', b, 'B') if mode == 'dds' else (b, 'B', a, 'A')
795
+ dense_inner = dense.shape[self.dense_inner_dim]
796
+ if dense_inner != self.dense_inner_size:
797
+ raise ValueError(f"Expected tensor {dense_name} to have size {self.dense_inner_size} at dim "
798
+ f"{self.dense_inner_dim % dense.ndim}, got {dense_inner}.")
799
+
800
+ if sparse.shape[-len(self.sparse_shape):] != self.sparse_shape:
801
+ raise ValueError(f"Expected tensor with trailing dimensions of shape {self.sparse_shape} for argument "
802
+ f"{sparse_name}, got {sparse.shape}")
803
+
804
+ def add_extra_dims(x):
805
+ # Add extra leading singleton dimensions if needed
806
+ dims_needed = 4 - x.ndim
807
+ if dims_needed > 0:
808
+ singletons = [1] * dims_needed
809
+ x = x.view(*singletons, *x.shape)
810
+ elif dims_needed < 0:
811
+ raise ValueError("Tensors with more than 4 dimensions are not currently supported")
812
+
813
+ return x
814
+
815
+ # Pad shapes with leading singleton dimensions
816
+ a = add_extra_dims(a)
817
+ b = add_extra_dims(b)
818
+
819
+ return a, b
venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/softmax.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ # DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a
7
+ # https://github.com/ptillet/torch-blocksparse/blob/master/torch_blocksparse/matmul.py
8
+
9
+ import torch
10
+
11
+ import triton
12
+ import triton.language as tl
13
+
14
+
15
+ def next_power_of_2(n):
16
+ n -= 1
17
+ n |= n >> 1
18
+ n |= n >> 2
19
+ n |= n >> 4
20
+ n |= n >> 8
21
+ n |= n >> 16
22
+ n += 1
23
+ return n
24
+
25
+
26
+ def num_warps(n):
27
+ if n < 512:
28
+ return 4
29
+ if n < 2048:
30
+ return 8
31
+ return 16
32
+
33
+
34
+ @triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[6] * meta['BLOCK'])})
35
+ @triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[6] * meta['BLOCK'])})
36
+ @triton.jit
37
+ def _forward(X, scale, LUT, RPE, KP_M, ATTN_M, sizemax, stride_zx, stride_zrpe, stride_hrpe, stride_srpe, stride_zkpm,
38
+ stride_zattnm, **meta):
39
+ TN = meta['TN']
40
+ BLOCK = meta['BLOCK']
41
+ pidhm = tl.program_id(0)
42
+ pidz = tl.program_id(1)
43
+ # create index ranges
44
+ rxm = pidhm % BLOCK
45
+ rbm = pidhm // BLOCK
46
+ rxn = tl.arange(0, TN) % BLOCK
47
+ rbn = tl.arange(0, TN) // BLOCK
48
+ # extract information from LUT
49
+ header = LUT + rbm * 2
50
+ size = tl.load(header + 0)
51
+ offset = tl.load(header + 1)
52
+ check = rbn < size
53
+ rbmn = tl.where(check, rbn, size - 1)
54
+ # block id and column id
55
+ blockid = tl.load(LUT + offset + rbmn * 4 + 0)
56
+ columnid = tl.load(LUT + offset + rbmn * 4 + 1)
57
+ rowid = tl.load(LUT + offset + rbmn * 4 + 2)
58
+ headid = tl.load(LUT + offset + rbmn * 4 + 3)
59
+ # pointers to X
60
+ px = X + pidz * stride_zx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
61
+ x = tl.load(px, mask=check, other=-float('inf'))
62
+ x = x.to(tl.float32)
63
+ # apply scale
64
+ if meta['APPLY_SCALE']:
65
+ x = x * scale
66
+ # apply RPE
67
+ if meta['APPLY_RPE']:
68
+ prpe = RPE + pidz * stride_zrpe + headid * stride_hrpe + columnid * BLOCK + rowid * BLOCK * stride_srpe + rxm * stride_srpe + rxn
69
+ rpe = tl.load(prpe, mask=check, other=0)
70
+ x = x + rpe
71
+ # apply key-padding mask
72
+ if meta['APPLY_KP_MASK']:
73
+ pkp_m = KP_M + pidz * stride_zkpm + columnid * BLOCK + rxn
74
+ kp_m = tl.load(pkp_m, mask=check, other=-float('inf'))
75
+ if meta['KP_MASK_MUL']:
76
+ kp_m = tl.where(kp_m == 0, -float('inf'), 0.)
77
+ x = x + kp_m
78
+ # apply attention mask
79
+ if meta['APPLY_ATTN_MASK']:
80
+ pattn_m = ATTN_M + columnid * BLOCK + rowid * BLOCK * stride_zattnm + rxm * stride_zattnm + rxn
81
+ attn_m = tl.load(pattn_m, mask=check, other=-float('inf'))
82
+ if meta['ATTN_MASK_MUL']:
83
+ attn_m = tl.where(attn_m == 0, -float('inf'), 0.)
84
+ x = x + attn_m
85
+ # computation
86
+ x = tl.softmax(x)
87
+ tl.store(px, x, mask=check)
88
+
89
+
90
+ @triton.heuristics({'num_warps': lambda *args, **meta: num_warps(args[4] * meta['BLOCK'])})
91
+ @triton.heuristics({'TN': lambda *args, **meta: next_power_of_2(args[4]) * meta['BLOCK']})
92
+ @triton.jit
93
+ def _backward(X, scale, DX, LUT, sizemax, stride_zx, stride_zdx, **meta):
94
+ pidhm = tl.program_id(0)
95
+ pidz = tl.program_id(1)
96
+ TN = meta['TN']
97
+ BLOCK = meta['BLOCK']
98
+ # create index ranges
99
+ rxm = pidhm % BLOCK
100
+ rbm = pidhm // BLOCK
101
+ rxn = tl.arange(0, TN) % BLOCK
102
+ rbn = tl.arange(0, TN) // BLOCK
103
+ # extract information from look-up table
104
+ header = LUT + rbm * 2
105
+ size = tl.load(header + 0)
106
+ offset = tl.load(header + 1)
107
+ # bounds checking on lut
108
+ check = rbn < size
109
+ rbmn = tl.where(check, rbn, size - 1)
110
+ # initialize pointers to block-sparse input
111
+ blockid = tl.load(LUT + offset + rbmn * 4)
112
+ X = X + pidz * stride_zx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
113
+ DX = DX + pidz * stride_zdx + blockid * BLOCK * BLOCK + rxm * BLOCK + rxn
114
+ # compute fused softmax backward
115
+ x = tl.load(X, mask=check, other=0)
116
+ dx = tl.load(DX, mask=check, other=0)
117
+ x = x.to(tl.float32)
118
+ dx = dx.to(tl.float32)
119
+ y = x * (dx - tl.sum(x * dx, 0)) * scale
120
+ tl.store(DX, y, mask=check)
121
+
122
+
123
+ class _sparse_softmax(torch.autograd.Function):
124
+
125
+ bwd_kernels = dict()
126
+
127
+ @staticmethod
128
+ def make_lut(layout, block, device):
129
+ _empty = torch.tensor([], dtype=torch.int64, device=layout.device)
130
+ sizes = _empty.clone()
131
+ # sizes along rows
132
+ for h in range(layout.shape[0]):
133
+ sizes = torch.cat((sizes, layout[h, :, :].sum(-1)))
134
+ # offsets in block format
135
+ offsets = torch.zeros_like(sizes)
136
+ offsets[1:] = torch.cumsum(sizes[:-1], dim=0)
137
+ # block indices
138
+ idx = torch.arange(layout.sum())
139
+ head = layout.nonzero()[:, 0]
140
+ rows = layout.nonzero()[:, 1]
141
+ columns = layout.nonzero()[:, 2]
142
+ core = torch.stack((idx, columns, rows, head), dim=1).view(-1)
143
+ # construct look-up table
144
+ offsets = offsets * 4 + 2 * sizes.numel()
145
+ header = torch.stack((sizes, offsets), dim=1).view(-1)
146
+ lut = torch.cat((header, core)).type(torch.int32).to(device)
147
+ return lut, int(sizes.max())
148
+
149
+ @staticmethod
150
+ def forward(ctx, x, scale, rpe, key_padding_mask, attn_mask, kp_mask_mode, attn_mask_mode, spdims, block, lut,
151
+ num_blocks, maxlut, bench, time):
152
+
153
+ apply_scale = False if scale == 1.0 else True
154
+
155
+ # handle None rpe
156
+ if rpe is None:
157
+ apply_rpe = False
158
+ stride_zrpe, stride_hrpe, stride_srpe = 0, 0, 0
159
+ rpe = torch.empty(0, dtype=x.dtype, device=x.device)
160
+ else:
161
+ apply_rpe = True
162
+ stride_zrpe, stride_hrpe, stride_srpe = rpe.stride(0), rpe.stride(1), rpe.stride(2)
163
+
164
+ # handle None key_padding_mask
165
+ if key_padding_mask is None:
166
+ apply_kp_mask = False
167
+ stride_zkpm = 0
168
+ key_padding_mask = torch.empty(0, dtype=x.dtype, device=x.device)
169
+ else:
170
+ apply_kp_mask = True
171
+ stride_zkpm = key_padding_mask.stride(0)
172
+
173
+ # handle None attention_mask
174
+ if attn_mask is None:
175
+ apply_attn_mask = False
176
+ stride_zattnm = 0
177
+ attn_mask = torch.empty(0, dtype=x.dtype, device=x.device)
178
+ else:
179
+ apply_attn_mask = True
180
+ stride_zattnm = attn_mask.stride(0)
181
+
182
+ # run kernel
183
+ M = x.shape[0]
184
+ meta = {
185
+ 'BLOCK': block,
186
+ 'APPLY_SCALE': apply_scale,
187
+ 'APPLY_RPE': apply_rpe,
188
+ 'APPLY_KP_MASK': apply_kp_mask,
189
+ 'APPLY_ATTN_MASK': apply_attn_mask,
190
+ 'KP_MASK_MUL': kp_mask_mode == 'mul',
191
+ 'ATTN_MASK_MUL': attn_mask_mode == 'mul',
192
+ }
193
+ grid = lambda opt: [spdims[0] * spdims[1] * block, M]
194
+ _forward[grid](x, scale, lut, rpe, key_padding_mask, attn_mask, maxlut, x.stride(0),\
195
+ stride_zrpe, stride_hrpe, stride_srpe, stride_zkpm, stride_zattnm, **meta)
196
+
197
+ # save to context
198
+ ctx.mark_dirty(x)
199
+ ctx.save_for_backward(x, lut)
200
+ ctx.spdims = spdims
201
+ ctx.block = block
202
+ ctx.maxlut = maxlut
203
+ ctx.scale = scale
204
+ ctx.apply_scale = apply_scale
205
+ ctx.apply_rpe = apply_rpe
206
+ ctx.apply_kp_mask = apply_kp_mask
207
+ ctx.apply_attn_mask = apply_attn_mask
208
+ ctx.kp_mask_mode = kp_mask_mode
209
+ ctx.attn_mask_mode = attn_mask_mode
210
+ return x
211
+
212
+ @staticmethod
213
+ def backward(ctx, dx):
214
+
215
+ # retrieve from context
216
+ x, lut = ctx.saved_tensors
217
+ # run kernel
218
+ M = x.shape[0]
219
+ grid = lambda opt: [ctx.spdims[0] * ctx.spdims[1] * ctx.block, M]
220
+ _backward[grid](x, ctx.scale, dx, lut, ctx.maxlut, x.stride(0), dx.stride(0), BLOCK=ctx.block)
221
+ return dx, None, None, None, None, None, None, None, None, None, None, None, None, None, None
222
+
223
+
224
+ class Softmax:
225
+ """Block-Sparse Softmax class; this class computes softmax on a block sparse matrix. It is also able to apply either/all of the following masks:
226
+ - relative position embedding
227
+ - key padding mask
228
+ - attention mask
229
+
230
+ For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
231
+ """
232
+
233
+ def sparse_softmax(*args, **kwargs):
234
+ return _sparse_softmax.apply(*args, **kwargs)
235
+
236
+ def make_lut(self, device):
237
+ """Generates the sparsity layout used in block-sparse softmax
238
+ """
239
+ key = (device, )
240
+ if key not in self.lut_cache:
241
+ self.lut_cache[key] = _sparse_softmax.make_lut(self.layout, self.block, device)
242
+ return self.lut_cache[key]
243
+
244
+ def __init__(self, layout, block, bench=False):
245
+ """Initialize the Block-Sparse Softmax class.
246
+
247
+ Arguments:
248
+ layout: required: sparsity layout tensor
249
+ block: required: an integer determining the block size.
250
+ bench: optional: set if you want to do benchmarking
251
+ """
252
+
253
+ self.num_blocks = layout.sum().item()
254
+ self.spdims = layout.shape
255
+ self.layout = layout
256
+ self.block = block
257
+ self.bench = bench
258
+ self.lut_cache = dict()
259
+
260
+ def __call__(self,
261
+ x,
262
+ scale=1.,
263
+ rpe=None,
264
+ key_padding_mask=None,
265
+ attn_mask=None,
266
+ key_padding_mask_mode='add',
267
+ attn_mask_mode='add'):
268
+ """Applies softmax on a Block-Sparse input tensor.
269
+
270
+ For more details about sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
271
+
272
+ Arguments:
273
+ x: required: a block-sparse tensor that softmax is applied on it; computation will be in place and result will be returned in the same tensor
274
+ scale: optional: a float value; x values will be multiplied by this value before normalization. Default value is 1.0.
275
+ rpe: optional: a tensor same dimension as x that is used as relative position embedding
276
+ key_padding_mask: optional: a mask tensor of size (BatchSize X SequenceLength)
277
+ attn_mask: optional: a mask tensor of size (SequenceLength X SequenceLength); currently only 2D is supported
278
+ key_padding_mask_mode: optional: a boolean determining if key_padding_mask needs to be added or multiplied
279
+ attn_mask_mode: optional: a boolean determining if attn_mask needs to be added or multiplied
280
+
281
+ Return:
282
+ x: a block-sparse tensor contains normalized input x using softmax; and masks applied if given
283
+ """
284
+
285
+ time_y = [None]
286
+ if rpe is not None and rpe.dtype != x.dtype:
287
+ raise ValueError('relative position embedding must be %s' % x.dtype)
288
+ if attn_mask is not None and attn_mask.dtype != x.dtype:
289
+ raise ValueError('Attention mask must be %s' % x.dtype)
290
+ if key_padding_mask is not None and key_padding_mask.dtype != x.dtype:
291
+ raise ValueError('Key padding mask must be %s' % x.dtype)
292
+ lut, maxlut = self.make_lut(x.device)
293
+ x = Softmax.sparse_softmax(x, scale, rpe, key_padding_mask, attn_mask, key_padding_mask_mode, attn_mask_mode,
294
+ self.spdims, self.block, lut, self.num_blocks, maxlut, self.bench, time_y)
295
+ self.time_y = time_y[0]
296
+ return x
venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/sparse_attention_utils.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ from torch.nn import functional as F
8
+ from deepspeed.ops.sparse_attention import BertSparseSelfAttention, SparsityConfig
9
+ '''
10
+ This file contains few utility functions to handle adapting pretrained model with sparse self-attention module.
11
+ '''
12
+
13
+
14
+ class SparseAttentionUtils:
15
+ """This class provides some utility functions that are use integrating sparse attention into transformer models.
16
+ Such utilities include extending position embeddings, replacing current self-attention layer with sparse attention, padding sequences to multiple of block size, etc.
17
+
18
+ """
19
+
20
+ @staticmethod
21
+ def extend_position_embedding(model, max_position):
22
+ """This function extends the position embedding weights of a model loaded from a checkpoint.
23
+ It assumes the new max position is bigger than the original max length.
24
+
25
+ Arguments:
26
+ model: required: a transformer model
27
+ max_position: required: an integer determining new position embedding size
28
+ Return:
29
+ model: updated model; in which position embedding weights have been extended based on new size
30
+ """
31
+
32
+ if hasattr(model, 'bert'):
33
+ original_max_position = model.bert.embeddings.position_embeddings.weight.size(0)
34
+ assert max_position > original_max_position
35
+ extend_multiples = max(1, max_position // original_max_position)
36
+ model.bert.embeddings.position_embeddings.weight.data = model.bert.embeddings.position_embeddings.weight.repeat(
37
+ extend_multiples, 1)
38
+ elif hasattr(model, 'roberta'):
39
+ # RoBERTa has positions 0 & 1 reserved, so embedding size is max position + 2
40
+ original_max_position, embed_size = model.roberta.embeddings.position_embeddings.weight.shape
41
+ original_max_position -= 2
42
+ extend_multiples = max(1, max_position // original_max_position)
43
+ assert max_position > original_max_position
44
+ max_position += 2
45
+ extended_position_embedding = model.roberta.embeddings.position_embeddings.weight.new_empty(
46
+ max_position, embed_size)
47
+ k = 2
48
+ for i in range(extend_multiples):
49
+ extended_position_embedding[k:(
50
+ k + original_max_position)] = model.roberta.embeddings.position_embeddings.weight[2:]
51
+ k += original_max_position
52
+ model.roberta.embeddings.position_embeddings.weight.data = extended_position_embedding
53
+ else:
54
+ raise ValueError(
55
+ 'Please extend \"extend_position_embedding\" function to support your model type. It currently only supports \"bert\" & \"roberta\"!'
56
+ )
57
+
58
+ model.config.max_position_embeddings = max_position
59
+ print(f'Extended position embeddings to {original_max_position * extend_multiples}')
60
+
61
+ return model
62
+
63
+ @staticmethod
64
+ def update_tokenizer_model_max_length(tokenizer, max_position):
65
+ """This function updates the position embedding length of a tokenizer to a new max position.
66
+
67
+ Arguments:
68
+ tokenizer: required: a transformer tokenizer
69
+ max_position: required: an integer determining new position embedding size
70
+ Return:
71
+ tokenizer: updated tokenizer; in which model maximum length has been extended based on new size
72
+ """
73
+
74
+ tokenizer.model_max_length = max_position
75
+ tokenizer.init_kwargs['model_max_length'] = max_position
76
+ print(f'updated tokenizer model max imum length to {max_position}')
77
+
78
+ return tokenizer
79
+
80
+ @staticmethod
81
+ def replace_model_self_attention_with_sparse_self_attention(
82
+ model,
83
+ max_position,
84
+ # SparsityConfig parameters needs to be set accordingly
85
+ sparsity_config=SparsityConfig(num_heads=4)):
86
+ """This function replaces the self attention layers in model encoder with sparse self attention.
87
+ It currently supports bert and roberta model and can be easily extended to any other models following similar steps here.
88
+ For sparsityConfig, refer to the config class.
89
+
90
+ Arguments:
91
+ model: required: a transformer model
92
+ max_position: required: an integer determining new position embedding size
93
+ sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on SparsityConfig class
94
+
95
+ Return:
96
+ model: updated model; in which self attention layer has been replaced with DeepSpeed Sparse Self Attention layer.
97
+ """
98
+
99
+ if hasattr(model, 'bert'):
100
+ model.config.max_position_embeddings = max_position
101
+ model.replace_self_attention_layer_with_sparse_self_attention_layer(model.config, model.bert.encoder.layer,
102
+ sparsity_config)
103
+ elif hasattr(model, 'roberta'):
104
+ model.config.max_position_embeddings = max_position + 2
105
+ model.replace_self_attention_layer_with_sparse_self_attention_layer(model.config,
106
+ model.roberta.encoder.layer,
107
+ sparsity_config)
108
+ else:
109
+ raise ValueError(
110
+ 'Please extend \"update_model_self_attention_to_sparse_self_attention\" function to support \
111
+ your model type. It currently only supports \"bert\" & \"roberta\"!')
112
+ return model
113
+
114
+ @staticmethod
115
+ def replace_self_attention_layer_with_sparse_self_attention_layer(
116
+ config,
117
+ layers,
118
+ # SparsityConfig parameters needs to be set accordingly
119
+ sparsity_config=SparsityConfig(num_heads=4)):
120
+ """This function replaces the self attention layers in attention layer with sparse self attention.
121
+ For sparsityConfig, refer to the config class.
122
+
123
+ Arguments:
124
+ config: required: transformer model config
125
+ layers: required: transformer model attention layers
126
+ sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on SparsityConfig class
127
+
128
+ Return:
129
+ layers: updated attention layers; in which self attention layers have been replaced with DeepSpeed Sparse Self Attention layer.
130
+ """
131
+
132
+ for layer in layers:
133
+ deepspeed_sparse_self_attn = BertSparseSelfAttention(config, sparsity_config)
134
+ deepspeed_sparse_self_attn.query = layer.attention.self.query
135
+ deepspeed_sparse_self_attn.key = layer.attention.self.key
136
+ deepspeed_sparse_self_attn.value = layer.attention.self.value
137
+
138
+ layer.attention.self = deepspeed_sparse_self_attn
139
+
140
+ return layers
141
+
142
+ @staticmethod
143
+ def pad_to_block_size(block_size, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds,
144
+ pad_token_id, model_embeddings):
145
+ """This function pads input tokens and attention mask on sequence length dimension to be multiple of block size.
146
+ This is a requirement for Sparse Transformer in which the self attention layer works on sequences of length multiple of block size.
147
+ It needs to be called in your model, such as BertModel, right before you calculate the embedding outputs.
148
+ Note)
149
+ 1- instead of passing your embedding layer to this function, you can simply add this function to your model. It can be more simplified if given attention_mask and/or token_type_ids are none.
150
+ 2- you need to call unpad function before returning your model output to unpad the encoder sequence output.
151
+
152
+ Arguments:
153
+ block_size: required: an integer determining the block size of sparsity config.
154
+ pad_token_id: required: an integer determining the pad token from the model config; such as bert.config.pad_token_id.
155
+ input_ids: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary
156
+ attention_mask: a torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences.
157
+ token_type_ids: a torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details).
158
+ position_ids: a torch.LongTensor of shape [batch_size, sequence_length] with the indices of positions of each input sequence tokens in the position embeddings.
159
+ inputs_embeds: an optional torch.FloatTensor of shape [batch_size, sequence_length, hidden_size] that contains embedded representation and can be passed instead of input_ids directly.
160
+ model_embeddings: an optional object. If inputs_embeds are not none, this will be your model embeddings such as BertEmbeddings from your model such as BertModel. You can move this function inside your model and use self.embeddings instead of passing this parameter.
161
+
162
+ Return:
163
+ pad_len: an integer determining how much inputs have been padded to transfer sequence length dimension to multiple of block size.
164
+ input_ids: if input_ids are not none padded input_ids otherwise none.
165
+ attention_mask: if attention_mask is not none padded attention_mask otherwise none.
166
+ token_type_ids: if token_type_ids are not none padded token_type_ids otherwise none.
167
+ position_ids: if position_ids are not none padded position_ids otherwise none.
168
+ inputs_embeds: if inputs_embeds are not none padded inputs_embeds otherwise none.
169
+ """
170
+
171
+ batch_size, seq_len = input_ids.shape if input_ids is not None else inputs_embeds.shape[:-1]
172
+
173
+ pad_len = (block_size - seq_len % block_size) % block_size
174
+ if pad_len > 0:
175
+ if inputs_embeds is not None:
176
+ pad_input_ids = inputs_embeds.new_full((batch_size, pad_len), pad_token_id, dtype=torch.long)
177
+ pad_inputs_embeds = model_embeddings(pad_input_ids)
178
+ inputs_embeds = torch.cat([inputs_embeds, pad_inputs_embeds], dim=-2)
179
+ # may not be needed as input_ids are not used if inputs_embeds are given
180
+ if input_ids is not None:
181
+ input_ids = F.pad(input_ids, (0, pad_len), value=pad_token_id)
182
+ if position_ids is not None:
183
+ # pad position_id with pad_token_id
184
+ position_ids = F.pad(position_ids, (0, pad_len), value=pad_token_id)
185
+ # pad attention mask without attention on the padding tokens
186
+ attention_mask = F.pad(attention_mask, (0, pad_len), value=False)
187
+ # pad token_type_ids with token_type_id = 0
188
+ token_type_ids = F.pad(token_type_ids, (0, pad_len), value=0)
189
+
190
+ return pad_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds
191
+
192
+ @staticmethod
193
+ def unpad_sequence_output(pad_len, sequence_output):
194
+ """This function unpads sequence output if inputs of the model were padded.
195
+ This is a requirement for Sparse Transformer in which the self attention layer works on sequences of length multiple of block size.
196
+ It needs to be called in your model, such as BertModel, right before you return the model outputs.
197
+
198
+ Arguments:
199
+ pad_len: required: an integer determining how much model inputs have been padded to transfer sequence length dimension to multiple of block size.
200
+ sequence_output: required: sequence output of the encoder layer.
201
+
202
+ Return:
203
+ sequence_output: unpaded sequence output of the encoder layer.
204
+ """
205
+
206
+ if (pad_len > 0):
207
+ sequence_output = sequence_output[:, :-pad_len]
208
+ return sequence_output
venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/sparse_self_attention.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch.nn as nn
7
+ import torch
8
+ from torch import distributed as dist
9
+ from deepspeed.ops.sparse_attention import SparsityConfig
10
+
11
+
12
+ class SparseSelfAttention(nn.Module):
13
+ """Implements an efficient Sparse Self Attention of Transformer layer based on `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509
14
+
15
+ For more information please see, TODO DeepSpeed Sparse Transformer.
16
+
17
+ For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial.
18
+ """
19
+
20
+ def __init__(
21
+ self,
22
+ # SparsityConfig parameters needs to be set accordingly
23
+ sparsity_config=SparsityConfig(num_heads=4),
24
+ key_padding_mask_mode='add',
25
+ attn_mask_mode='mul',
26
+ max_seq_length=2048):
27
+ """Initialize the sparse self attention layer.
28
+ Arguments:
29
+ sparsity_config: optional: this parameter determines sparsity pattern configuration; it is based on SparsityConfig class.
30
+ key_padding_mask_mode: optional: a string determining if key padding mask needs to be added, `add`, or be multiplied, `mul`.
31
+ attn_mask_mode: optional: a string determining if attention mask needs to be added, `add`, or be multiplied, `mul`.
32
+ max_seq_length: optional: the maximum sequence length this sparse attention module will be applied to; it controls the size of the master_layout.
33
+ """
34
+ super().__init__()
35
+
36
+ # sparsity information
37
+ self.sparsity_config = sparsity_config
38
+
39
+ # initialize sparse layout and register as buffer
40
+ master_layout = self.sparsity_config.make_layout(max_seq_length)
41
+ self.register_buffer("master_layout", master_layout)
42
+ self._need_layout_synchronization = True
43
+
44
+ # mask modes
45
+ self.key_padding_mask_mode = key_padding_mask_mode
46
+ self.attn_mask_mode = attn_mask_mode
47
+
48
+ ops = dict()
49
+
50
+ def get_layout(self, L):
51
+ # if layout is never synchronized across GPUs, broadcast the layout from global rank 0
52
+ if self._need_layout_synchronization and dist.is_initialized():
53
+ dist.broadcast(self.master_layout, src=0)
54
+ self._need_layout_synchronization = False
55
+
56
+ if (L % self.sparsity_config.block != 0):
57
+ raise ValueError(
58
+ f'Sequence Length, {L}, needs to be dividable by Block size {self.sparsity_config.block}!')
59
+
60
+ num_blocks = L // self.sparsity_config.block
61
+ return self.master_layout[..., :num_blocks, :num_blocks].cpu() # layout needs to be a CPU tensor
62
+
63
+ # add to cache
64
+ def get_ops(self, H, L):
65
+ from deepspeed.ops.sparse_attention.matmul import MatMul
66
+ from deepspeed.ops.sparse_attention.softmax import Softmax
67
+ if L not in SparseSelfAttention.ops:
68
+ sparsity_layout = self.get_layout(L)
69
+ sparse_dot_sdd_nt = MatMul(sparsity_layout, self.sparsity_config.block, 'sdd', trans_a=False, trans_b=True)
70
+
71
+ sparse_dot_dsd_nn = MatMul(sparsity_layout,
72
+ self.sparsity_config.block,
73
+ 'dsd',
74
+ trans_a=False,
75
+ trans_b=False)
76
+
77
+ sparse_softmax = Softmax(sparsity_layout, self.sparsity_config.block)
78
+
79
+ SparseSelfAttention.ops[L] = (sparse_dot_sdd_nt, sparse_dot_dsd_nn, sparse_softmax)
80
+ return SparseSelfAttention.ops[L]
81
+
82
+ def transpose_key_for_scores(self, x, L):
83
+ bsz, num_heads, seq_len, head_dim = x.size()
84
+ if seq_len != L:
85
+ return x.permute(0, 1, 3, 2)
86
+ return x
87
+
88
+ def transpose_mask_for_sparse(self, qtype, x, is_key_padding_mask=False):
89
+ x = x.type(qtype)
90
+ if is_key_padding_mask:
91
+ xdim = x.dim()
92
+ for d in range(xdim - 1, 0, -1):
93
+ x = x.squeeze(dim=d)
94
+ return x
95
+ return x.squeeze()
96
+
97
+ # forward pass
98
+ def forward(self, query, key, value, rpe=None, key_padding_mask=None, attn_mask=None):
99
+ """Applies forward phase of sparse self attention
100
+
101
+ Arguments:
102
+ query: required: query tensor
103
+ key: required: key tensor
104
+ value: required: value tensor
105
+ rpe: optional: a tensor same dimension as x that is used as relative position embedding
106
+ key_padding_mask: optional: a mask tensor of size (BatchSize X SequenceLength)
107
+ attn_mask: optional: a mask tensor of size (SequenceLength X SequenceLength); currently only 2D is supported
108
+ key_padding_mask_mode: optional: a boolean determining if key_padding_mask needs to be added or multiplied
109
+ attn_mask_mode: optional: a boolean determining if attn_mask needs to be added or multiplied
110
+
111
+ Return:
112
+ attn_output: a dense tensor containing attention context
113
+ """
114
+ assert query.dtype == torch.half, "sparse attention only supports training in fp16 currently, please file a github issue if you need fp32 support"
115
+ bsz, num_heads, tgt_len, head_dim = query.size()
116
+
117
+ # transpose back key if it is already transposed
118
+ key = self.transpose_key_for_scores(key, tgt_len)
119
+
120
+ # check that operation is supported
121
+ if query.shape != key.shape or key.shape != value.shape:
122
+ raise NotImplementedError('only self-attention is supported for now')
123
+
124
+ # squeeze key_padding_mask if it is given
125
+ if key_padding_mask is not None:
126
+ key_padding_mask = self.transpose_mask_for_sparse(query.dtype, key_padding_mask, is_key_padding_mask=True)
127
+
128
+ # squeeze attn_mask if it is given
129
+ if attn_mask is not None:
130
+ attn_mask = self.transpose_mask_for_sparse(query.dtype, attn_mask)
131
+
132
+ # cache look-up table computations etc
133
+ sparse_dot_sdd_nt, sparse_dot_dsd_nn, sparse_softmax = self.get_ops(num_heads, tgt_len)
134
+
135
+ scaling = float(head_dim)**-0.5
136
+
137
+ # attention scores
138
+ attn_output_weights = sparse_dot_sdd_nt(query, key)
139
+ attn_output_weights = sparse_softmax(attn_output_weights,
140
+ scale=scaling,
141
+ rpe=rpe,
142
+ key_padding_mask=key_padding_mask,
143
+ attn_mask=attn_mask,
144
+ key_padding_mask_mode=self.key_padding_mask_mode,
145
+ attn_mask_mode=self.attn_mask_mode)
146
+
147
+ # outputs
148
+ attn_output = sparse_dot_dsd_nn(attn_output_weights, value)
149
+ return attn_output
venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/sparsity_config.py ADDED
@@ -0,0 +1,727 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ import random
8
+
9
+
10
+ class SparsityConfig:
11
+ """Abstract Configuration class to store `sparsity configuration of a self attention layer`.
12
+ It contains shared property of different block-sparse sparsity patterns. However, each class needs to extend it based on required property and functionality.
13
+ """
14
+
15
+ def __init__(self, num_heads, block=16, different_layout_per_head=False):
16
+ """Initialize the Sparsity Pattern Config.
17
+
18
+ For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
19
+
20
+ Arguments:
21
+ num_heads: required: an integer determining number of attention heads of the layer.
22
+ block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
23
+ different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability.
24
+ """
25
+
26
+ self.num_heads = num_heads
27
+ self.block = block
28
+ self.different_layout_per_head = different_layout_per_head
29
+ self.num_layout_heads = num_heads if different_layout_per_head else 1
30
+
31
+ def setup_layout(self, seq_len):
32
+ """Create layout tensor for the given sequence length
33
+
34
+ Arguments:
35
+ seq_len: required: an integer determining number of attention heads of the layer.
36
+
37
+ Return:
38
+ layout: a tensor of dimension (num_heads, num_blocks, num_blocks) for sparsity layout of all head; initialized with zero
39
+ """
40
+
41
+ if (seq_len % self.block != 0):
42
+ raise ValueError(f'Sequence Length, {seq_len}, needs to be dividable by Block size {self.block}!')
43
+ num_blocks = seq_len // self.block
44
+ # TODO Currently we allocate layout per head; needs to be updated if heads share a single layout.
45
+ layout = torch.zeros((self.num_heads, num_blocks, num_blocks), dtype=torch.int64)
46
+ return layout
47
+
48
+ def check_and_propagate_first_head_layout(self, layout):
49
+ """If all heads require same sparsity layout, it propagate first head layout to all heads
50
+
51
+ Arguments:
52
+ layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
53
+
54
+ Return:
55
+ layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head
56
+ """
57
+
58
+ if not self.different_layout_per_head:
59
+ layout[1:self.num_heads, :, :] = layout[0, :, :]
60
+ return layout
61
+
62
+
63
+ class DenseSparsityConfig(SparsityConfig):
64
+ """Configuration class to store `Dense` configuration.
65
+ In reality, this is not sparse and all blocks are used. We keep it for the sake of comparison and comprehension.
66
+ """
67
+
68
+ def __init__(self, num_heads, block=16, different_layout_per_head=False):
69
+ """Initialize the Dense Sparsity Pattern Config.
70
+ In reality, this is not sparse and all blocks are used. We keep it for the sake of comparison and comprehension.
71
+
72
+ Arguments:
73
+ num_heads: required: an integer determining number of attention heads of the layer.
74
+ seq_len: required: an integer determining number of attention heads of the layer.
75
+ different_layout_per_head: optional: this is just for the sake of consistency with other sparsity formats; can ignore it for DenseSparsityConfig
76
+ """
77
+
78
+ super().__init__(num_heads, block, different_layout_per_head)
79
+
80
+ def make_layout(self, seq_len):
81
+ """Set 1 to all blocks of the layout meaning the pattern is dense; not sparse.
82
+
83
+ Arguments:
84
+ seq_len: required: an integer determining the underling sequence length; must be <= max sequence length
85
+
86
+ Return:
87
+ layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; for dense everything is 1
88
+ """
89
+
90
+ layout = self.setup_layout(seq_len)
91
+ layout[:, :, :] = 1
92
+ return layout
93
+
94
+
95
+ class FixedSparsityConfig(SparsityConfig):
96
+ """Configuration class to store `Fixed` sparsity configuration.
97
+ For more details about this sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509; this has been customized.
98
+ This class extends parent class of `SparsityConfig` and customizes it for `Fixed` sparsity.
99
+ """
100
+
101
+ def __init__(self,
102
+ num_heads,
103
+ block=16,
104
+ different_layout_per_head=False,
105
+ num_local_blocks=4,
106
+ num_global_blocks=1,
107
+ attention='bidirectional',
108
+ horizontal_global_attention=False,
109
+ num_different_global_patterns=1):
110
+ """Initialize `Fixed` Sparsity Pattern Config.
111
+
112
+ For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
113
+
114
+ Arguments:
115
+ num_heads: required: an integer determining number of attention heads of the layer.
116
+ block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
117
+ different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability.
118
+ num_local_blocks: optional: an integer determining the number of blocks in local attention window.
119
+ num_global_blocks: optional: an integer determining how many consecutive blocks in a local window is used as the representative of the window for global attention.
120
+ attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
121
+ horizontal_global_attention: optional: a boolean determining if blocks that are global representative of a local window, also attend to all other blocks. This is valid only if attention type is `bidirectional`. Looking at the attention matrix, that means global attention not only includes the vertical blocks, but also horizontal blocks.
122
+ num_different_global_patterns: optional: an integer determining number of different global attentions layouts. While global attention can be fixed by which block/s are representative of any local window, since there are multi-heads, each head can use a different global representative. For example, with 4 blocks local window and global attention size of 1 block, we can have 4 different versions in which the first, Second, third, or forth block of each local window can be global representative of that window. This parameter determines how many of such patterns we want. Of course, there is a limitation based on num_local_blocks and num_global_blocks.
123
+ """
124
+
125
+ super().__init__(num_heads, block, different_layout_per_head)
126
+
127
+ self.num_local_blocks = num_local_blocks
128
+
129
+ if (num_local_blocks % num_global_blocks != 0):
130
+ raise ValueError(
131
+ f'Number of blocks in a local window, {num_local_blocks}, must be dividable by number of global blocks, {num_global_blocks}!'
132
+ )
133
+ self.num_global_blocks = num_global_blocks
134
+
135
+ if (attention != 'unidirectional' and attention != 'bidirectional'):
136
+ raise NotImplementedError('only \"uni/bi-directional\" attentions are supported for now!')
137
+ self.attention = attention
138
+
139
+ if (attention != 'bidirectional' and horizontal_global_attention):
140
+ raise ValueError('only \"bi-directional\" attentions can support horizontal global attention!')
141
+ self.horizontal_global_attention = horizontal_global_attention
142
+
143
+ if (num_different_global_patterns > 1 and not different_layout_per_head):
144
+ raise ValueError(
145
+ f'Number of different layouts cannot be more than one when you have set a single layout for all heads! Set different_layout_per_head to True.'
146
+ )
147
+ if (num_different_global_patterns > (num_local_blocks // num_global_blocks)):
148
+ raise ValueError(
149
+ f'Number of layout versions (num_different_global_patterns), {num_different_global_patterns}, cannot be larger than number of local window blocks divided by number of global blocks, {num_local_blocks} / {num_global_blocks} = {num_local_blocks//num_global_blocks}!'
150
+ )
151
+ self.num_different_global_patterns = num_different_global_patterns
152
+
153
+ def set_local_layout(self, h, layout):
154
+ """Sets local attention layout used by the given head in the sparse attention.
155
+
156
+ Arguments:
157
+ h: required: an integer determining head index
158
+ layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
159
+
160
+ Return:
161
+ layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local layout is set
162
+ """
163
+
164
+ num_blocks = layout.shape[1]
165
+ for i in range(0, num_blocks, self.num_local_blocks):
166
+ end = min(i + self.num_local_blocks, num_blocks)
167
+ for row in range(i, end):
168
+ for col in range(i, (row + 1 if self.attention == 'unidirectional' else end)):
169
+ layout[h, row, col] = 1
170
+ return layout
171
+
172
+ def set_global_layout(self, h, layout):
173
+ """Sets global attention layout used by the given head in the sparse attention.
174
+
175
+ Currently we set global blocks starting from the last block of a local window to the first one. That means if a local window consists of 4 blocks and global attention size is one block, we use block #4 in each local window as global. If we have different layout per head, then other heads will get #3, #2, and #1. And if we have more heads (and different layout has set) than num of global attentions, multiple head may have same global attentions.
176
+ Note) if horizontal_global_attention is set, global blocks will be set both horizontally and vertically.
177
+
178
+ Arguments:
179
+ h: required: an integer determining head index
180
+ layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
181
+
182
+ Return:
183
+ layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set
184
+ """
185
+
186
+ num_blocks = layout.shape[1]
187
+ first_global_block_idx = self.num_local_blocks - (
188
+ 1 + h % self.num_different_global_patterns) * self.num_global_blocks
189
+
190
+ # set all global blocks except the last one if (in last local window)
191
+ end = num_blocks - (num_blocks % self.num_local_blocks)
192
+ for i in range(first_global_block_idx, end, self.num_local_blocks):
193
+
194
+ # vertical global attention
195
+ first_row = 0 if self.attention == 'bidirectional' else i
196
+ #(((i // self.num_local_blocks) + 1) * self.num_local_blocks)
197
+ #if (first_row < num_blocks):
198
+ layout[h, first_row:, i:i + self.num_global_blocks] = 1
199
+
200
+ # horizontal global attention; only in bidirectional attention
201
+ if (self.horizontal_global_attention):
202
+ layout[h, i:i + self.num_global_blocks, :] = 1
203
+
204
+ # set last global blocks; handle possible short last local window
205
+ if (end < num_blocks):
206
+ start = min(end + first_global_block_idx, num_blocks - self.num_global_blocks)
207
+ end = start + self.num_global_blocks
208
+
209
+ # vertical global attention
210
+ first_row = 0 if self.attention == 'bidirectional' else start
211
+ #(((start // self.num_local_blocks) + 1) * self.num_local_blocks)
212
+ #if (first_row < num_blocks):
213
+ layout[h, first_row:, start:end] = 1
214
+
215
+ # horizontal global attention
216
+ if (self.horizontal_global_attention):
217
+ layout[h, start:end, :] = 1
218
+ return layout
219
+
220
+ def make_layout(self, seq_len):
221
+ """Generates `Fixed` sparsity layout used by each head in the sparse attention.
222
+
223
+ Arguments:
224
+ seq_len: required: an integer determining number of attention heads of the layer.
225
+
226
+ Return:
227
+ layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `Fixed` sparsity layout of all head
228
+ """
229
+
230
+ layout = self.setup_layout(seq_len)
231
+ for h in range(0, self.num_layout_heads):
232
+ layout = self.set_local_layout(h, layout)
233
+ layout = self.set_global_layout(h, layout)
234
+
235
+ layout = self.check_and_propagate_first_head_layout(layout)
236
+ return layout
237
+
238
+
239
+ class VariableSparsityConfig(SparsityConfig):
240
+ """Configuration class to store `Variable` sparsity configuration.
241
+ This layout is an extension of FixedSparsityConfig in which:
242
+ - user can set random layout; default value is zero means no random block
243
+ - user can provide a list of local block sizes
244
+ - user can provide a list of global block indices.
245
+
246
+ For more details about `Fixed` sparsity config, please see `Generative Modeling with Sparse Transformers`: https://arxiv.org/abs/1904.10509; this has been customized.
247
+ This class extends parent class of `SparsityConfig` and customizes it for `Fixed` sparsity.
248
+ """
249
+
250
+ def __init__(self,
251
+ num_heads,
252
+ block=16,
253
+ different_layout_per_head=False,
254
+ num_random_blocks=0,
255
+ local_window_blocks=[4],
256
+ global_block_indices=[0],
257
+ global_block_end_indices=None,
258
+ attention='bidirectional',
259
+ horizontal_global_attention=False):
260
+ """Initialize `Variable` Sparsity Pattern Config.
261
+
262
+ For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
263
+
264
+ Arguments:
265
+ num_heads: required: an integer determining number of attention heads of the layer.
266
+ block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
267
+ different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability. Currently this sparsity config can only assign single layout to all heads; needs to be extended for different layout per head.
268
+ num_random_blocks: optional: an integer determining the number of random blocks in each block row.
269
+ local_window_blocks: optional: a list of integers determining the number of blocks in each local attention window. It assumes first number determines # of blocks in the first local window, second the second window, ..., and the last number determines the number of blocks in the remaining local windows.
270
+ global_block_indices: optional: a list of integers determining which blocks are considered as global attention. Given indices, determine the blocks that all other token blocks attend to and they attend to all other token blocks. Default value is only index 0. Notice that if global_block_end_indices parameter is set, this parameter is used as starting index of each global window.
271
+ global_block_end_indices: optional: a list of integers determining end indices of global window blocks. By default this is not used. But if it is set, it must have the same size of global_block_indices parameter, and combining this two parameters, for each index i, blocks from global_block_indices[i] to global_block_end_indices[i] (exclusive) are considered as global attention.
272
+ num_global_blocks: optional: an integer determining how many consecutive blocks in a local window is used as the representative of the window for global attention.
273
+ attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
274
+ horizontal_global_attention: optional: a boolean determining if blocks that are global representative of a local window, also attend to all other blocks. This is valid only if attention type is `bidirectional`. Looking at the attention matrix, that means global attention not only includes the vertical blocks, but also horizontal blocks.
275
+ """
276
+
277
+ super().__init__(num_heads, block, different_layout_per_head)
278
+
279
+ self.num_random_blocks = num_random_blocks
280
+ self.local_window_blocks = local_window_blocks
281
+ self.global_block_indices = global_block_indices
282
+
283
+ if (global_block_end_indices is not None):
284
+ if (len(global_block_indices) != len(global_block_end_indices)):
285
+ raise ValueError(
286
+ f'Global block start indices length, {len(global_block_indices)}, must be same as global block end indices length, {len(global_block_end_indices)}!'
287
+ )
288
+ for _, (start_idx, end_idx) in enumerate(zip(global_block_indices, global_block_end_indices)):
289
+ if start_idx >= end_idx:
290
+ raise ValueError(
291
+ f'Global block start index, {start_idx}, must be smaller than global block end index, {end_idx}!'
292
+ )
293
+ self.global_block_end_indices = global_block_end_indices
294
+
295
+ if (attention != 'unidirectional' and attention != 'bidirectional'):
296
+ raise NotImplementedError('only \"uni/bi-directional\" attentions are supported for now!')
297
+ self.attention = attention
298
+
299
+ if (attention != 'bidirectional' and horizontal_global_attention):
300
+ raise ValueError('only \"bi-directional\" attentions can support horizontal global attention!')
301
+ self.horizontal_global_attention = horizontal_global_attention
302
+
303
+ def set_random_layout(self, h, layout):
304
+ """Sets random attention layout used by the given head in the sparse attention.
305
+ Note) By default, it assumes there will be a unique random block layout for all heads; unless `different_layout_per_head` parameter is set in which each head can have a different random layout.
306
+
307
+ Arguments:
308
+ h: required: an integer determining head index
309
+ layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
310
+
311
+ Return:
312
+ layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which random layout is set
313
+ """
314
+
315
+ num_blocks = layout.shape[1]
316
+ if (num_blocks < self.num_random_blocks):
317
+ raise ValueError(
318
+ f'Number of random blocks, {self.num_random_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
319
+ )
320
+ for row in range(0, num_blocks):
321
+ rnd_cols = random.sample(range(0, num_blocks), self.num_random_blocks)
322
+ layout[h, row, rnd_cols] = 1
323
+ return layout
324
+
325
+ def set_local_layout(self, h, layout):
326
+ """Sets local attention layout used by the given head in the sparse attention.
327
+ Arguments:
328
+ h: required: an integer determining head index
329
+ layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
330
+
331
+ Return:
332
+ layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local layout is set
333
+ """
334
+
335
+ num_blocks = layout.shape[1]
336
+ start_block_idx = 0
337
+ end_block_idx = 0
338
+ for block_size in self.local_window_blocks:
339
+ end_block_idx += block_size
340
+ end_block_idx = min(end_block_idx, num_blocks)
341
+ for row in range(start_block_idx, end_block_idx):
342
+ for col in range(start_block_idx, (row + 1 if self.attention == 'unidirectional' else end_block_idx)):
343
+ layout[h, row, col] = 1
344
+ start_block_idx += block_size
345
+
346
+ # if there is any remaining not attended part, use the lats local window block size as local window for the remaining applicable local windows
347
+ for i in range(start_block_idx, num_blocks, block_size):
348
+ end_block_idx = min(i + block_size, num_blocks)
349
+ for row in range(i, end_block_idx):
350
+ for col in range(i, (row + 1 if self.attention == 'unidirectional' else end_block_idx)):
351
+ layout[h, row, col] = 1
352
+ return layout
353
+
354
+ def set_global_layout(self, h, layout):
355
+ """Sets global attention layout used by the given head in the sparse attention.
356
+
357
+ Arguments:
358
+ h: required: an integer determining head index
359
+ layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
360
+
361
+ Return:
362
+ layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set
363
+ """
364
+
365
+ num_blocks = layout.shape[1]
366
+ if (self.global_block_end_indices is None):
367
+ for idx in self.global_block_indices:
368
+ # if global block idx is in the range of the sequence blocks
369
+ if (idx < num_blocks):
370
+ #global rows
371
+ if (self.horizontal_global_attention):
372
+ layout[h, idx, :] = 1
373
+
374
+ #global columns
375
+ first_row = 0 if self.attention == 'bidirectional' else idx
376
+ layout[h, first_row:, idx] = 1
377
+ else:
378
+ for _, (start_idx, end_idx) in enumerate(zip(self.global_block_indices, self.global_block_end_indices)):
379
+ # if global block idx is in the range of the sequence blocks
380
+ if (start_idx < num_blocks):
381
+ end_idx = min(end_idx, num_blocks)
382
+ #global rows
383
+ if (self.horizontal_global_attention):
384
+ layout[h, start_idx:end_idx, :] = 1
385
+
386
+ #global columns
387
+ first_row = 0 if self.attention == 'bidirectional' else start_idx
388
+ layout[h, first_row:, start_idx:end_idx] = 1
389
+ return layout
390
+
391
+ def make_layout(self, seq_len):
392
+ """Generates `Variable` sparsity layout used by each head in the sparse attention.
393
+
394
+ Arguments:
395
+ seq_len: required: an integer determining number of attention heads of the layer.
396
+
397
+ Return:
398
+ layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `Variable` sparsity layout of all head
399
+ """
400
+
401
+ layout = self.setup_layout(seq_len)
402
+ for h in range(0, self.num_layout_heads):
403
+ layout = self.set_random_layout(h, layout)
404
+ layout = self.set_local_layout(h, layout)
405
+ layout = self.set_global_layout(h, layout)
406
+
407
+ layout = self.check_and_propagate_first_head_layout(layout)
408
+ return layout
409
+
410
+
411
+ class BigBirdSparsityConfig(SparsityConfig):
412
+ """Configuration class to store `BigBird` sparsity configuration.
413
+ For more details about this sparsity config, please see `Big Bird: Transformers for Longer Sequences`: https://arxiv.org/pdf/2007.14062.pdf
414
+ This class extends parent class of `SparsityConfig` and customizes it for `BigBird` sparsity.
415
+ """
416
+
417
+ def __init__(self,
418
+ num_heads,
419
+ block=16,
420
+ different_layout_per_head=False,
421
+ num_random_blocks=1,
422
+ num_sliding_window_blocks=3,
423
+ num_global_blocks=1,
424
+ attention='bidirectional'):
425
+ """Initialize the BigBird Sparsity Pattern Config.
426
+
427
+ For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
428
+
429
+ Arguments:
430
+ num_heads: required: an integer determining number of attention heads of the layer.
431
+ block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
432
+ different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability.
433
+ num_random_blocks: optional: an integer determining the number of random blocks in each block row.
434
+ num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding local attention window.
435
+ num_global_blocks: optional: an integer determining how many consecutive blocks, starting from index 0, are considered as global attention. Global block tokens will be attended by all other block tokens and will attend to all other block tokens as well.
436
+ attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
437
+ """
438
+
439
+ super().__init__(num_heads, block, different_layout_per_head)
440
+
441
+ self.num_random_blocks = num_random_blocks
442
+ self.num_sliding_window_blocks = num_sliding_window_blocks
443
+ self.num_global_blocks = num_global_blocks
444
+
445
+ if (attention != 'unidirectional' and attention != 'bidirectional'):
446
+ raise NotImplementedError('only \"uni/bi-directional\" attentions are supported for now!')
447
+ self.attention = attention
448
+
449
+ def set_random_layout(self, h, layout):
450
+ """Sets random attention layout used by the given head in the sparse attention.
451
+ Note) By default, it assumes there will be a unique random block layout for all heads; unless `different_layout_per_head` parameter is set in which each head can have a different random layout.
452
+
453
+ Arguments:
454
+ h: required: an integer determining head index
455
+ layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
456
+
457
+ Return:
458
+ layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which random layout is set
459
+ """
460
+
461
+ num_blocks = layout.shape[1]
462
+ if (num_blocks < self.num_random_blocks):
463
+ raise ValueError(
464
+ f'Number of random blocks, {self.num_random_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
465
+ )
466
+
467
+ for row in range(0, num_blocks):
468
+ sample_range = range(0, num_blocks) if self.attention == 'bidirectional' else range(0, row + 1)
469
+ rnd_cols = random.sample(sample_range, self.num_random_blocks)
470
+ layout[h, row, rnd_cols] = 1
471
+ return layout
472
+
473
+ def set_sliding_window_layout(self, h, layout):
474
+ """Sets sliding local attention layout used by the given head in the sparse attention.
475
+
476
+ Arguments:
477
+ h: required: an integer determining head index
478
+ layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
479
+
480
+ Return:
481
+ layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local sliding window layout is set
482
+ """
483
+
484
+ num_blocks = layout.shape[1]
485
+ if (num_blocks < self.num_sliding_window_blocks):
486
+ raise ValueError(
487
+ f'Number of sliding window blocks, {self.num_sliding_window_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
488
+ )
489
+
490
+ w = self.num_sliding_window_blocks // 2
491
+ for row in range(0, num_blocks):
492
+ start = max(0, row - w)
493
+ end = min(row + w + 1, num_blocks)
494
+ layout[h, row, start:end] = 1
495
+ return layout
496
+
497
+ def set_global_layout_itc(self, h, layout):
498
+ """Sets global attention layout used by the given head in the sparse attention.
499
+
500
+ Arguments:
501
+ h: required: an integer determining head index
502
+ layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
503
+
504
+ Return:
505
+ layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set
506
+ """
507
+
508
+ num_blocks = layout.shape[1]
509
+ if (num_blocks < self.num_global_blocks):
510
+ raise ValueError(
511
+ f'Number of global blocks, {self.num_global_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
512
+ )
513
+
514
+ #global rows
515
+ layout[h, 0:self.num_global_blocks, :] = 1
516
+
517
+ #global columns
518
+ layout[h, :, 0:self.num_global_blocks] = 1
519
+
520
+ if self.attention == 'unidirectional':
521
+ # zero out anything attending to the future
522
+ layout = torch.tril(layout)
523
+
524
+ return layout
525
+
526
+ def make_layout(self, seq_len):
527
+ """Generates `BigBird` sparsity layout used by each head in the sparse attention.
528
+
529
+ Arguments:
530
+ seq_len: required: an integer determining number of attention heads of the layer.
531
+
532
+ Return:
533
+ layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `BigBird` sparsity layout of all head
534
+ """
535
+
536
+ layout = self.setup_layout(seq_len)
537
+ for h in range(0, self.num_layout_heads):
538
+ layout = self.set_random_layout(h, layout)
539
+ layout = self.set_sliding_window_layout(h, layout)
540
+ layout = self.set_global_layout_itc(h, layout)
541
+
542
+ layout = self.check_and_propagate_first_head_layout(layout)
543
+ return layout
544
+
545
+
546
+ class BSLongformerSparsityConfig(SparsityConfig):
547
+ """Configuration class to store edited `Longformer` sparsity configuration.
548
+
549
+ Note) this is a block-sparse version of the Longformer which is slightly different than original Longformer; which is element-wise sparsity.
550
+
551
+ For more details about this sparsity config, please see `Longformer: The Long-Document Transformer`: https://arxiv.org/pdf/2004.05150.pdf
552
+ This class extends parent class of `SparsityConfig` and customizes it for `Longformer` sparsity.
553
+ """
554
+
555
+ def __init__(self,
556
+ num_heads,
557
+ block=16,
558
+ different_layout_per_head=False,
559
+ num_sliding_window_blocks=3,
560
+ global_block_indices=[0],
561
+ global_block_end_indices=None,
562
+ attention='bidirectional'):
563
+ """Initialize the edited `Longformer` Sparsity Pattern Config.
564
+
565
+ For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
566
+
567
+ Arguments:
568
+ num_heads: required: an integer determining number of attention heads of the layer.
569
+ block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
570
+ different_layout_per_head: optional: a boolean determining if each head should be assigned a different sparsity layout; default is false and this will be satisfied based on availability.
571
+
572
+ num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding local attention window.
573
+ global_block_indices: optional: a list of integers determining which blocks are considered as global attention. Given indices, determine the blocks that all other token blocks attend to and they attend to all other token blocks. Default value is only index 0. Notice that if global_block_end_indices parameter is set, this parameter is used as starting index of each global window.
574
+ global_block_end_indices: optional: a list of integers determining end indices of global window blocks. By default this is not used. But if it is set, it must have the same size of global_block_indices parameter, and combining this two parameters, for each index i, blocks from global_block_indices[i] to global_block_end_indices[i] (exclusive) are considered as global attention.
575
+ attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
576
+ """
577
+
578
+ super().__init__(num_heads, block, different_layout_per_head)
579
+
580
+ self.num_sliding_window_blocks = num_sliding_window_blocks
581
+ self.global_block_indices = global_block_indices
582
+ self.attention = attention
583
+
584
+ if (global_block_end_indices is not None):
585
+ if (len(global_block_indices) != len(global_block_end_indices)):
586
+ raise ValueError(
587
+ f'Global block start indices length, {len(global_block_indices)}, must be same as global block end indices length, {len(global_block_end_indices)}!'
588
+ )
589
+ for _, (start_idx, end_idx) in enumerate(zip(global_block_indices, global_block_end_indices)):
590
+ if start_idx >= end_idx:
591
+ raise ValueError(
592
+ f'Global block start index, {start_idx}, must be smaller than global block end index, {end_idx}!'
593
+ )
594
+ self.global_block_end_indices = global_block_end_indices
595
+
596
+ def set_sliding_window_layout(self, h, layout):
597
+ """Sets sliding local attention layout used by the given head in the sparse attention.
598
+
599
+ Arguments:
600
+ h: required: an integer determining head index
601
+ layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
602
+
603
+ Return:
604
+ layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local sliding window layout is set
605
+ """
606
+
607
+ num_blocks = layout.shape[1]
608
+ if (num_blocks < self.num_sliding_window_blocks):
609
+ raise ValueError(
610
+ f'Number of sliding window blocks, {self.num_sliding_window_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
611
+ )
612
+
613
+ w = self.num_sliding_window_blocks // 2
614
+ for row in range(0, num_blocks):
615
+ start = max(0, row - w)
616
+ end = min(row + w + 1, num_blocks)
617
+ layout[h, row, start:end] = 1
618
+ return layout
619
+
620
+ def set_global_layout(self, h, layout):
621
+ """Sets global attention layout used by the given head in the sparse attention.
622
+
623
+ Arguments:
624
+ h: required: an integer determining head index
625
+ layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
626
+
627
+ Return:
628
+ layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which global layout is set
629
+ """
630
+
631
+ num_blocks = layout.shape[1]
632
+ if (self.global_block_end_indices is None):
633
+ for idx in self.global_block_indices:
634
+ # if global block idx is in the range of the sequence blocks
635
+ if (idx < num_blocks):
636
+ #global rows
637
+ layout[h, idx, :] = 1
638
+
639
+ #global columns
640
+ layout[h, :, idx] = 1
641
+ else:
642
+ for _, (start_idx, end_idx) in enumerate(zip(self.global_block_indices, self.global_block_end_indices)):
643
+ # if global block idx is in the range of the sequence blocks
644
+ if (start_idx < num_blocks):
645
+ end_idx = min(end_idx, num_blocks)
646
+ #global rows
647
+ layout[h, start_idx:end_idx, :] = 1
648
+
649
+ #global columns
650
+ layout[h, :, start_idx:end_idx] = 1
651
+ if self.attention == 'unidirectional':
652
+ layout = torch.tril(layout)
653
+ return layout
654
+
655
+ def make_layout(self, seq_len):
656
+ """Generates edited `Longformer` sparsity layout used by each head in the sparse attention.
657
+
658
+ Arguments:
659
+ seq_len: required: an integer determining number of attention heads of the layer.
660
+
661
+ Return:
662
+ layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `BSLongformer` sparsity layout of all head
663
+ """
664
+
665
+ layout = self.setup_layout(seq_len)
666
+ for h in range(0, self.num_layout_heads):
667
+ layout = self.set_sliding_window_layout(h, layout)
668
+ layout = self.set_global_layout(h, layout)
669
+
670
+ layout = self.check_and_propagate_first_head_layout(layout)
671
+ return layout
672
+
673
+
674
+ class LocalSlidingWindowSparsityConfig(SparsityConfig):
675
+ """Configuration class to store `Local Sliding Window` sparsity configuration - a purely-local sliding window attention.
676
+ This class extends parent class of `SparsityConfig` and customizes it for `Local` sparsity.
677
+ """
678
+
679
+ def __init__(self, num_heads, block=16, num_sliding_window_blocks=3, attention='unidirectional'):
680
+ """Initialize the Local Sliding Window Sparsity Pattern Config.
681
+ For usage example please see, TODO DeepSpeed Sparse Transformer Tutorial
682
+ Arguments:
683
+ num_heads: required: an integer determining number of attention heads of the layer.
684
+ block: optional: an integer determining the block size. Current implementation of sparse self-attention is based on blocked sparse matrices. In which this parameter defines size of such blocks, `Block X Block`.
685
+ num_sliding_window_blocks: optional: an integer determining the number of blocks in sliding local attention window.
686
+ attention: optional: a string determining attention type. Attention can be `unidirectional`, such as autoregressive models, in which tokens attend only to tokens appear before them in the context. Considering that, the upper triangular of attention matrix is empty as above figure. Or it can be `bidirectional`, such as BERT, in which tokens can attend to any other tokens before or after them. Then, the upper triangular part of the attention matrix is mirror of the lower triangular in the above figure.
687
+ """
688
+
689
+ super().__init__(num_heads, block)
690
+ self.num_sliding_window_blocks = num_sliding_window_blocks
691
+ self.attention = attention
692
+
693
+ def set_sliding_window_layout(self, h, layout):
694
+ """Sets sliding local attention layout used by the given head in the sparse attention.
695
+ Arguments:
696
+ h: required: an integer determining head index
697
+ layout: required: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head; may not be completely set at this step
698
+ Return:
699
+ layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing sparsity layout of all head in which local sliding window layout is set
700
+ """
701
+
702
+ num_blocks = layout.shape[1]
703
+ if (num_blocks < self.num_sliding_window_blocks):
704
+ raise ValueError(
705
+ f'Number of sliding window blocks, {self.num_sliding_window_blocks}, must be smaller than overall number of blocks in a row, {num_blocks}!'
706
+ )
707
+
708
+ w = self.num_sliding_window_blocks // 2
709
+ for row in range(0, num_blocks):
710
+ start = max(0, row - w)
711
+ end = min(row + w + 1, num_blocks) if self.attention == "bidirectional" else row + 1
712
+ layout[h, row, start:end] = 1
713
+ return layout
714
+
715
+ def make_layout(self, seq_len):
716
+ """Generates `Local Sliding Window` sparsity layout used by each head in the sparse attention.
717
+ Arguments:
718
+ seq_len: required: an integer determining number of attention heads of the layer.
719
+ Return:
720
+ layout: a tensor of dimension (num_heads, num_blocks, num_blocks) containing `BigBird` sparsity layout of all head
721
+ """
722
+
723
+ layout = self.setup_layout(seq_len)
724
+ for h in range(0, self.num_layout_heads):
725
+ layout = self.set_sliding_window_layout(h, layout)
726
+ layout = self.check_and_propagate_first_head_layout(layout)
727
+ return layout
venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/trsrc/__init__.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import sys
7
+ import os
8
+
9
+
10
+ def _build_file_index(directory, suffix='.tr'):
11
+ """Build an index of source files and their basenames in a given directory.
12
+
13
+ Args:
14
+ directory (string): the directory to index
15
+ suffix (string): index files with this suffix
16
+
17
+ Returns:
18
+ list: A list of tuples of the form [(basename, absolute path), ...]
19
+ """
20
+
21
+ index = []
22
+
23
+ for fname in os.listdir(directory):
24
+ if fname.endswith(suffix):
25
+ basename = fname[:fname.rfind(suffix)] # strip the suffix
26
+ path = os.path.join(directory, fname)
27
+ index.append((basename, path))
28
+
29
+ return index
30
+
31
+
32
+ # Go over all local source files and parse them as strings
33
+ _module = sys.modules[_build_file_index.__module__]
34
+ _directory = os.path.dirname(os.path.realpath(__file__))
35
+ for name, fname in _build_file_index(_directory):
36
+ with open(fname, 'r') as fin:
37
+ setattr(_module, name, fin.read())
venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/trsrc/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.07 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/trsrc/matmul.tr ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a
8
+ https:github.com/ptillet/torch-blocksparse/blob/master/torch_blocksparse/matmul.py
9
+ */
10
+
11
+ __global__ void NAME (TYPE* A __readonly __noalias __aligned(16),
12
+ TYPE* B __readonly __noalias __aligned(16),
13
+ TYPE* C __noalias __aligned(16),
14
+ int lda __multipleof(8),
15
+ int ldb __multipleof(8),
16
+ int ldc __multipleof(8),
17
+ long stride_za __multipleof(8),
18
+ long stride_zb __multipleof(8),
19
+ long stride_zc __multipleof(8),
20
+ long stride_ha __multipleof(8),
21
+ long stride_hb __multipleof(8),
22
+ long stride_hc __multipleof(8),
23
+ int DS0, int DS1,
24
+ int SDD_K __multipleof(16),
25
+ int SDD_off_width,
26
+ int* lut, int* locks, int nlocks) {
27
+ /* ---------------- */
28
+ /* Prologue */
29
+ /* ---------------- */
30
+ // program ids
31
+ int pid0 = get_program_id(0);
32
+ int pid1 = get_program_id(1);
33
+ int pidz = get_program_id(2);
34
+ #ifdef SDD
35
+ // load LUT header
36
+ pid1 = pid1 + SDD_off_width;
37
+ int blockidm[TM] = (0 ... TM) / BLOCK;
38
+ int blockidn[TN] = (0 ... TN) / BLOCK;
39
+ int offlutm[TM] = blockidm*(TN/BLOCK)*4;
40
+ int offlutn[TN] = blockidn*4;
41
+ int *header = lut + pid1 * (TM/BLOCK) * (TN/BLOCK) * 4;
42
+ int z = *(header + 0);
43
+ int i[TM] = *(header + 1 + offlutm);
44
+ int j[TN] = *(header + 2 + offlutn);
45
+ int AS1 = SDD_K / TZ;
46
+ int lockid = select(TZ > 1, 1, 0);
47
+ int offka = pid0 * AS1;
48
+ int offkb = pid0 * AS1;
49
+ int offmc = 0;
50
+ int offnc = 0;
51
+ int offpa = 0;
52
+ int offpb = 0;
53
+ int maxid = TZ;
54
+ int offhc = 0;
55
+ int offha = z;
56
+ int offhb = z;
57
+ int ram[TM] = i*BLOCK + ((0 ... TM) % BLOCK);
58
+ int rbn[TN] = j*BLOCK + ((0 ... TN) % BLOCK);
59
+ #else
60
+ // load LUT header
61
+ int *header = lut + pid0 * 6;
62
+ int offset = *(header + 0);
63
+ int AS1 = *(header + 1);
64
+ int column = *(header + 2);
65
+ int depth = *(header + 3);
66
+ int lockid = *(header + 4);
67
+ int maxid = *(header + 5);
68
+ int *pinc = lut + offset;
69
+ int offhc = depth;
70
+ #ifdef DSD
71
+ // output offset
72
+ int offnc = pid1 * TN;
73
+ int offmc = column * TM;
74
+ int offpc = 0;
75
+ // dense input offset
76
+ int offnb = pid1 * TN;
77
+ int offkb __multipleof(8) = *pinc;
78
+ int offpb = 0;
79
+ // sparse input offset
80
+ int offma = 0;
81
+ int offka = 0;
82
+ long offpa __multipleof(8) = *(pinc + 1);
83
+ offpa = offpa * BLOCK * BLOCK;
84
+ int offha = 0;
85
+ int offhb = depth;
86
+ #endif
87
+ #ifdef DDS
88
+ // output offset
89
+ int offmc = pid1 * TM;
90
+ int offnc = column * TN;
91
+ int offpc = 0;
92
+ // dense input offset
93
+ int offma = pid1 * TM;
94
+ int offka __multipleof(8) = *pinc;
95
+ int offpa = 0;
96
+ // sparse input offset
97
+ int offnb = 0;
98
+ int offkb = 0;
99
+ long offpb __multipleof(8) = *(pinc + 1);
100
+ offpb = offpb * BLOCK * BLOCK;
101
+ int offha = depth;
102
+ int offhb = 0;
103
+ #endif
104
+ int ram[TM] = offma + 0 ... TM;
105
+ int rbn[TN] = offnb + 0 ... TN;
106
+ #endif
107
+ // initialize a, b pointers
108
+ int rka[TK] = offka + 0 ... TK;
109
+ int rkb[TK] = offkb + 0 ... TK;
110
+ TYPE* pa[TM, TK] = A + pidz * stride_za + offha * stride_ha + offpa + ram[:, newaxis] * STRIDE_AM + rka[newaxis, :] * STRIDE_AK;
111
+ TYPE* pb[TK, TN] = B + pidz * stride_zb + offhb * stride_hb + offpb + rbn[newaxis, :] * STRIDE_BN + rkb[:, newaxis] * STRIDE_BK;
112
+ // pre-fetch
113
+ #ifdef DDS
114
+ bool checkam[TM, TK] = ram[:, newaxis] < DS0;
115
+ #else
116
+ bool checkam[TM, TK] = AS1 > 0;
117
+ #endif
118
+ #ifdef DSD
119
+ bool checkbn[TK, TN] = rbn[newaxis, :] < DS0;
120
+ #else
121
+ bool checkbn[TK, TN] = AS1 > 0;
122
+ #endif
123
+ TYPE a[TM, TK] = checkam ? *pa : 0;
124
+ TYPE b[TK, TN] = checkbn ? *pb : 0;
125
+
126
+ /* ---------------- */
127
+ /* Inner Loop */
128
+ /* ---------------- */
129
+ // create result tile
130
+ float acc[TM, TN] = 0;
131
+ int step = TK;
132
+ for(int k = AS1; k > 0; k -= step) {
133
+ acc += a @ b;
134
+ // update pointers
135
+ #ifdef SDD
136
+ int inc_a = TK * STRIDE_AK;
137
+ int inc_b = TK * STRIDE_BK;
138
+ #else
139
+ pinc += 2;
140
+ #ifdef DSD
141
+ int inc_b __multipleof(8) = *pinc;
142
+ int inc_a __multipleof(8) = *(pinc + 1);
143
+ inc_b = inc_b * STRIDE_BK;
144
+ #endif
145
+ #ifdef DDS
146
+ int inc_a __multipleof(8) = *pinc;
147
+ int inc_b __multipleof(8) = *(pinc + 1);
148
+ inc_a = inc_a * STRIDE_AK;
149
+ #endif
150
+ #endif
151
+ pa += inc_a;
152
+ pb += inc_b;
153
+ // pre-fetch
154
+ bool checkak[TM, TK] = k > TK;
155
+ bool checkbk[TK, TN] = k > TK;
156
+ bool checka[TM, TK] = checkam && checkak;
157
+ bool checkb[TK, TN] = checkbk && checkbn;
158
+ a = *?(checka)pa;
159
+ b = *?(checkb)pb;
160
+ }
161
+ TYPE c[TM, TN] = acc;
162
+
163
+ /* ---------------- */
164
+ /* Epilogue */
165
+ /* ---------------- */
166
+ // initialize c pointers
167
+ #ifdef SDD
168
+ bool checkc[TM, TN] = 1;
169
+ // rematerialize
170
+ int rr_blockidm[TM] = (0 ... TM) / BLOCK;
171
+ int rr_blockidn[TN] = (0 ... TN) / BLOCK;
172
+ int rr_offlutm[TM] = rr_blockidm*(TN/BLOCK)*4;
173
+ int rr_offlutn[TN] = rr_blockidn*4;
174
+ int off_bkid[TM, TN] = 3 + rr_offlutm[:, newaxis] + rr_offlutn[newaxis, :];
175
+ int bkid[TM, TN] = *(header + off_bkid);
176
+ long offpc[TM, TN] = bkid * BLOCK * BLOCK;
177
+ // range within blocks
178
+ int rcm[TM] = (0 ... TM) % BLOCK;
179
+ int rcn[TN] = (0 ... TN) % BLOCK;
180
+ #else
181
+ int rcm[TM] = offmc + 0 ... TM;
182
+ int rcn[TN] = offnc + 0 ... TN;
183
+ #ifdef DSD
184
+ bool checkc[TM, TN] = rcn[newaxis, :] < DS0;
185
+ #endif
186
+ #ifdef DDS
187
+ bool checkc[TM, TN] = rcm[:, newaxis] < DS0;
188
+ #endif
189
+ #endif
190
+ TYPE* pc[TM, TN] = C + offpc + offhc*stride_hc + pidz*stride_zc + rcm[:, newaxis]*STRIDE_CM + rcn[newaxis, :]*STRIDE_CN;
191
+ // write-back directly
192
+ if(lockid == 0) {
193
+ *?(checkc) pc = c;
194
+ }
195
+ // accumulate partial result using spin-locks
196
+ else {
197
+ int *plock = locks + get_program_id(2)*nlocks*get_num_programs(1) + get_program_id(1)*nlocks + lockid - 1;
198
+ int *pcount = plock + get_num_programs(2)*get_num_programs(1)*nlocks;
199
+ for(int repeat = 1; repeat == 1; repeat = atomic_cas(plock, 0, 1));
200
+ int count = *pcount;
201
+ if(count == 0)
202
+ *?(checkc) pc = c;
203
+ else
204
+ *?(checkc) pc = c + *?(checkc)pc;
205
+ atomic_xchg(pcount, (count + 1) % maxid);
206
+ atomic_xchg(plock, 0);
207
+ }
208
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/trsrc/softmax_bwd.tr ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a
8
+ https:github.com/ptillet/torch-blocksparse/blob/master/torch_blocksparse/softmax.py
9
+ */
10
+
11
+ __global__ void softmax_bwd(TYPE * X __readonly __noalias __aligned(16),
12
+ float scale,
13
+ TYPE* DX __readonly __noalias __aligned(16),
14
+ int* LUT,
15
+ int sizemax,
16
+ long stride_zx __multipleof(BLOCK),
17
+ long stride_zdx __multipleof(BLOCK)) {
18
+ int pidhm = get_program_id(0);
19
+ int pidz = get_program_id(1);
20
+
21
+ // create index ranges
22
+ int rxm = pidhm % BLOCK;
23
+ int rbm = pidhm / BLOCK;
24
+ int rxn[TN] = (0 ... TN) % BLOCK;
25
+ int rbn[TN] = (0 ... TN) / BLOCK;
26
+
27
+ // extract information from look-up table
28
+ int* header = LUT + rbm * 2;
29
+ int size = *(header + 0);
30
+ int offset = *(header + 1);
31
+
32
+ // bounds checking on lut
33
+ bool check[TN] = rbn < size;
34
+ int rbmn[TN] = check ? rbn : size - 1;
35
+
36
+ // initialize pointers to block-sparse input
37
+ long blockid[TN] = *(LUT + offset + rbmn*4);
38
+
39
+ TYPE* px[TN] = X + pidz * stride_zx
40
+ + blockid * BLOCK * BLOCK
41
+ + rxm * BLOCK
42
+ + rxn;
43
+
44
+ TYPE* pdx[TN] = DX + pidz * stride_zdx
45
+ + blockid * BLOCK * BLOCK
46
+ + rxm * BLOCK
47
+ + rxn;
48
+
49
+ // compute fused softmax backward
50
+ TYPE x[TN] = check ? *px : 0;
51
+ TYPE dx[TN] = check ? *pdx : 0;
52
+ float Fdx[TN] = dx;
53
+ float Fx[TN] = x;
54
+ float Fxdx[TN] = Fdx*Fx;
55
+ float Fxdxsum = Fxdx[+];
56
+ float Fy[TN] = Fx * (Fdx - Fxdxsum) * scale;
57
+ TYPE y[TN] = Fy;
58
+
59
+ // write-back
60
+ *? (check)pdx = y;
61
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/sparse_attention/trsrc/softmax_fwd.tr ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a
8
+ https:github.com/ptillet/torch-blocksparse/blob/master/torch_blocksparse/softmax.py
9
+ */
10
+
11
+ __global__ void softmax_fwd(TYPE *X __readonly __noalias __aligned(16),
12
+ float scale,
13
+ int *LUT __readonly __noalias __aligned(16),
14
+ TYPE *RPE __readonly __noalias __aligned(16),
15
+ TYPE *KP_M __readonly __noalias __aligned(16),
16
+ TYPE *ATTN_M __readonly __noalias __aligned(16),
17
+ int num_blocks,
18
+ int sizemax,
19
+ long stride_zx __multipleof(BLOCK),
20
+ long stride_zrpe __multipleof(BLOCK),
21
+ int stride_hrpe __multipleof(BLOCK),
22
+ int stride_srpe __multipleof(BLOCK),
23
+ int stride_zkpm __multipleof(BLOCK),
24
+ int stride_zattnm __multipleof(BLOCK)){
25
+ int pidhm = get_program_id(0);
26
+ int pidz = get_program_id(1);
27
+
28
+ // create index ranges
29
+ int rxm = pidhm % BLOCK;
30
+ int rbm = pidhm / BLOCK;
31
+ int rxn[TN] = (0 ... TN) % BLOCK;
32
+ int rbn[TN] = (0 ... TN) / BLOCK;
33
+
34
+ // extract information from look-up table
35
+ int* header = LUT + rbm * 2;
36
+ int size = *(header + 0);
37
+ int offset = *(header + 1);
38
+
39
+ bool check[TN] = rbn < size;
40
+ int rbmn[TN] = check ? rbn : size - 1;
41
+
42
+ // block id and column id
43
+ long blockid [TN] = *(LUT + offset + rbmn*4 + 0);
44
+ long columnid[TN] = *(LUT + offset + rbmn*4 + 1);
45
+ long rowid [TN] = *(LUT + offset + rbmn*4 + 2);
46
+ long headid [TN] = *(LUT + offset + rbmn*4 + 3);
47
+
48
+ // pointers to X
49
+ TYPE* px[TN] = X + pidz * stride_zx
50
+ + blockid * BLOCK * BLOCK
51
+ + rxm * BLOCK
52
+ + rxn;
53
+ #ifdef APPLY_RPE
54
+ // pointers to relative position embedding
55
+ TYPE* prpe[TN] = RPE + pidz * stride_zrpe
56
+ + headid * stride_hrpe
57
+ + columnid * BLOCK
58
+ + rowid * BLOCK * stride_srpe
59
+ + rxm * stride_srpe
60
+ + rxn;
61
+ #endif
62
+
63
+ #ifdef APPLY_KP_MASK
64
+ // pointers to key padding mask
65
+ TYPE* pkp_m[TN] = KP_M + pidz * stride_zkpm
66
+ + columnid * BLOCK
67
+ + rxn;
68
+ #endif
69
+
70
+ #ifdef APPLY_ATTN_MASK
71
+ // pointers to attention mask
72
+ TYPE* pattn_m[TN] = ATTN_M + columnid * BLOCK
73
+ + rowid * BLOCK * stride_zattnm
74
+ + rxm * stride_zattnm
75
+ + rxn;
76
+ #endif
77
+
78
+ // load input
79
+ TYPE x[TN] = check ? *px : -INFINITY;
80
+
81
+ #ifdef APPLY_RPE
82
+ // load relative position embedding
83
+ TYPE rpe[TN] = check ? *prpe : 0;
84
+ #endif
85
+
86
+ #ifdef APPLY_KP_MASK
87
+ // load key-padding mask
88
+ TYPE kp_m[TN] = check ? *pkp_m : -INFINITY;
89
+ #endif
90
+
91
+ #ifdef APPLY_ATTN_MASK
92
+ // load attention mask
93
+ TYPE attn_m[TN] = check ? *pattn_m : -INFINITY;
94
+ #endif
95
+
96
+ // compute softmax in float
97
+ #ifdef APPLY_RPE
98
+ float Frpe[TN] = rpe;
99
+ #endif
100
+
101
+ #ifdef APPLY_KP_MASK
102
+ float Fkp_m[TN] = kp_m;
103
+ #endif
104
+
105
+ #ifdef APPLY_ATTN_MASK
106
+ float Fattn_m[TN] = attn_m;
107
+ #endif
108
+
109
+ #ifdef KP_MASK_MUL
110
+ Fkp_m = (Fkp_m == 0) ? (float[TN])-INFINITY : 0;
111
+ #endif
112
+
113
+ #ifdef ATTN_MASK_MUL
114
+ Fattn_m = (Fattn_m == 0) ? (float[TN])-INFINITY : 0;
115
+ #endif
116
+
117
+ float Fx[TN] = x;
118
+
119
+ #ifdef APPLY_SCALE
120
+ Fx = Fx * scale; // apply scale
121
+ #endif
122
+
123
+ #ifdef APPLY_RPE
124
+ Fx = Fx + Frpe; // apply relative position embedding
125
+ #endif
126
+
127
+ #ifdef APPLY_KP_MASK
128
+ Fx = Fx + Fkp_m; // apply key padding mask
129
+ #endif
130
+
131
+ #ifdef APPLY_ATTN_MASK
132
+ Fx = Fx + Fattn_m; // apply attention mask
133
+ #endif
134
+
135
+ float Fxmax = Fx[max];
136
+ float Fy[TN] = exp(Fx - Fxmax);
137
+ float Fysum = (check ? Fy : 0)[+];
138
+
139
+ // write-back in half/float
140
+ TYPE y[TN] = Fy;
141
+ TYPE ysum = Fysum;
142
+ *?(check)px = y / ysum;
143
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .transformer import DeepSpeedTransformerLayer, DeepSpeedTransformerConfig
7
+ from .inference.config import DeepSpeedInferenceConfig
8
+ from ...model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
9
+ from .inference.moe_inference import DeepSpeedMoEInferenceConfig, DeepSpeedMoEInference
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .config import DeepSpeedInferenceConfig
7
+ from ....model_implementations.transformers.ds_transformer import DeepSpeedTransformerInference
8
+ from .moe_inference import DeepSpeedMoEInferenceConfig, DeepSpeedMoEInference
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (463 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/config.cpython-310.pyc ADDED
Binary file (5.02 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/diffusers_2d_transformer.cpython-310.pyc ADDED
Binary file (562 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/diffusers_transformer_block.cpython-310.pyc ADDED
Binary file (3.18 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/ds_mlp.cpython-310.pyc ADDED
Binary file (3.36 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/triton_ops.cpython-310.pyc ADDED
Binary file (3.1 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/bias_add.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Optional
7
+ import torch
8
+ from deepspeed.ops.op_builder import SpatialInferenceBuilder
9
+
10
+ spatial_cuda_module = None
11
+
12
+
13
+ def nhwc_bias_add(activation: torch.Tensor,
14
+ bias: torch.Tensor,
15
+ other: Optional[torch.Tensor] = None,
16
+ other_bias: Optional[torch.Tensor] = None) -> torch.Tensor:
17
+ global spatial_cuda_module
18
+ if spatial_cuda_module is None:
19
+ spatial_cuda_module = SpatialInferenceBuilder().load()
20
+
21
+ if other is None:
22
+ return spatial_cuda_module.nhwc_bias_add(activation, bias)
23
+ elif other_bias is None:
24
+ return spatial_cuda_module.nhwc_bias_add_add(activation, bias, other)
25
+ else:
26
+ return spatial_cuda_module.nhwc_bias_add_bias_add(activation, bias, other, other_bias)
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/config.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import json
7
+ import torch
8
+ from deepspeed.utils.types import ActivationFuncType, NormType
9
+
10
+
11
+ class TransformerConfig():
12
+
13
+ def __init__(self, hidden_size, intermediate_size, heads, num_hidden_layers):
14
+ self.layer_id = -1
15
+ self.hidden_size = hidden_size
16
+ self.intermediate_size = intermediate_size
17
+ self.heads = heads
18
+ self.num_hidden_layers = num_hidden_layers
19
+
20
+
21
+ class DeepSpeedInferenceConfig(TransformerConfig):
22
+ """Initialize the DeepSpeed Transformer Config.
23
+ Arguments:
24
+ hidden_size: The hidden size of the transformer layer
25
+ intermediate_size: The intermediate size of the feed-forward part of transformer layer
26
+ heads: The number of heads in the self-attention of the transformer layer
27
+ num_hidden_layers: The number of transformer layers
28
+ layer_norm_eps: The epsilon value for the layer norm
29
+ local_rank: Optional: The rank of GPU running the transformer kernel, it is not required
30
+ to use if the model already set the current device, otherwise need to set it
31
+ so that the transformer kernel can work on the right device
32
+ mp_size (optional): This argument is mainly used to create the parameters on the kernel side
33
+ using model-parallel architecture. If the client model already takes care of this, there is no
34
+ need to pass this argument.
35
+ pre_layer_norm: Select between Pre-LN or Post-LN transformer architecture
36
+ stochastic_mode: Enable for high performance, please note that this flag has some level of
37
+ non-determinism and can produce different results on different runs. However, we have seen
38
+ that by enabling it, the pretraining tasks such as BERT are not affected and can obtain
39
+ a high accuracy level. On the other hand, for the downstream tasks, such as fine-tuning, we recommend
40
+ to turn it off in order to be able to reproduce the same result through the regular kernel execution.
41
+
42
+ scale_attention: If true, both q and k are scaled by 1/sqrt(attention_heads) before attention computation.
43
+ return_tuple: if True, returns the transformer output as a tuple, otherwise returns as a tensor
44
+ bigscience_bloom: This flag is added temporarily for supporting the BLOOM-176B model architecture.
45
+ use_triton: This flag is to enable triton kernels in inference or not.
46
+ invert_mask: If True, the attention mask is inverted when passed to attention block.
47
+ """
48
+
49
+ def __init__(self,
50
+ hidden_size=-1,
51
+ intermediate_size=-1,
52
+ heads=-1,
53
+ num_hidden_layers=-1,
54
+ layer_norm_eps=1e-12,
55
+ local_rank=-1,
56
+ mp_size=1,
57
+ dtype=torch.float16,
58
+ pre_layer_norm=True,
59
+ norm_type=NormType.LayerNorm,
60
+ stochastic_mode=False,
61
+ scale_attention=True,
62
+ triangular_masking=True,
63
+ local_attention=False,
64
+ window_size=256,
65
+ rotary_dim=-1,
66
+ rotate_half=False,
67
+ rotate_every_two=True,
68
+ return_tuple=True,
69
+ mlp_after_attn=True,
70
+ mlp_act_func_type=ActivationFuncType.GELU,
71
+ training_mp_size=1,
72
+ bigscience_bloom=False,
73
+ max_out_tokens=1024,
74
+ min_out_tokens=1,
75
+ enable_qkv_quantization=False,
76
+ use_mup=False,
77
+ scale_attn_by_inverse_layer_idx=False,
78
+ return_single_tuple=False,
79
+ set_empty_params=False,
80
+ transposed_mode=False,
81
+ use_triton=False,
82
+ triton_autotune=False,
83
+ num_kv=-1,
84
+ rope_theta=10000,
85
+ invert_mask=True):
86
+ super(DeepSpeedInferenceConfig,
87
+ self).__init__(hidden_size, (intermediate_size if intermediate_size > 0 else 4 * hidden_size), heads,
88
+ num_hidden_layers)
89
+ self.dtype = dtype
90
+ self.pre_layer_norm = pre_layer_norm
91
+ self.norm_type = norm_type
92
+ self.local_rank = local_rank
93
+ self.stochastic_mode = stochastic_mode
94
+ self.epsilon = layer_norm_eps
95
+ self.mp_size = mp_size
96
+ self.scale_attention = scale_attention
97
+ self.triangular_masking = triangular_masking
98
+ self.local_attention = local_attention
99
+ self.window_size = window_size
100
+ self.rotary_dim = rotary_dim
101
+ self.rotate_half = rotate_half
102
+ self.rotate_every_two = rotate_every_two
103
+ self.return_tuple = return_tuple
104
+ self.mlp_after_attn = mlp_after_attn
105
+ self.mlp_act_func_type = mlp_act_func_type
106
+ self.specialized_mode = False
107
+ self.training_mp_size = training_mp_size
108
+ self.bigscience_bloom = bigscience_bloom
109
+ self.max_out_tokens = max_out_tokens
110
+ self.min_out_tokens = min_out_tokens
111
+ self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
112
+ self.enable_qkv_quantization = enable_qkv_quantization
113
+ self.use_mup = use_mup
114
+ self.return_single_tuple = return_single_tuple
115
+ self.set_empty_params = set_empty_params
116
+ self.transposed_mode = transposed_mode
117
+ self.use_triton = use_triton
118
+ self.triton_autotune = triton_autotune
119
+ self.num_kv = num_kv
120
+ self.rope_theta = rope_theta
121
+ self.invert_mask = invert_mask
122
+
123
+ @classmethod
124
+ def from_dict(cls, json_object):
125
+ config = DeepSpeedInferenceConfig()
126
+ for key, value in json_object.items():
127
+ config.__dict__[key] = value
128
+ return config
129
+
130
+ @classmethod
131
+ def from_json_file(cls, json_file):
132
+ with open(json_file, "r", encoding='utf-8') as reader:
133
+ text = reader.read()
134
+ return cls.from_dict(json.loads(text))
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/diffusers_2d_transformer.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+
7
+ class Diffusers2DTransformerConfig():
8
+
9
+ def __init__(self, int8_quantization=False):
10
+ self.int8_quantization = int8_quantization
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/diffusers_attention.py ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import math
7
+ import torch
8
+ from torch.autograd import Function
9
+ import torch.nn as nn
10
+ from packaging import version as pkg_version
11
+ from deepspeed.utils.logging import log_dist
12
+ from deepspeed.accelerator import get_accelerator
13
+ from deepspeed.ops.op_builder import InferenceBuilder
14
+
15
+ # Cuda modules will be imported if needed
16
+ inference_module = None
17
+ minus_inf = -10000.0
18
+ triton_flash_attn = None
19
+
20
+
21
+ def load_triton_flash_attn():
22
+ global triton_flash_attn
23
+ try:
24
+ import triton
25
+ except ImportError:
26
+ raise ImportError("Please install triton 2.0+ or `pip install deepspeed[sd]`")
27
+
28
+ if pkg_version.parse(triton.__version__) < pkg_version.parse("2.0"):
29
+ raise ImportError("Please install triton 2.0+ or `pip install deepspeed[sd]`")
30
+
31
+ from .triton_ops import triton_flash_attn
32
+
33
+
34
+ class DeepSpeedDiffusersAttentionFunction(Function):
35
+
36
+ @staticmethod
37
+ def forward(ctx, input, context, input_mask, config, attn_qkvw, attn_qw, attn_kw, attn_vw, attn_qkvb,
38
+ num_attention_heads_per_partition, norm_factor, hidden_size_per_partition, attn_ow, attn_ob,
39
+ do_out_bias, score_context_func, linear_func, triton_flash_attn_kernel, rope_theta):
40
+
41
+ def _transpose_for_context(x):
42
+ x = x.permute(0, 2, 1, 3)
43
+ new_x_layer_shape = x.size()[:-2] + \
44
+ (hidden_size_per_partition,)
45
+ return x.reshape(*new_x_layer_shape)
46
+
47
+ def _transpose_for_scores(x):
48
+ attention_head_size = x.shape[-1] // num_attention_heads_per_partition
49
+ new_x_shape = x.size()[:-1] + (num_attention_heads_per_partition, attention_head_size)
50
+ x = x.reshape(*new_x_shape)
51
+ x = x.permute(0, 2, 1, 3)
52
+ return x.contiguous()
53
+
54
+ def selfAttention_fp(input, context, input_mask):
55
+ if config.dtype in [torch.half, torch.float16] and input.dtype == torch.float32:
56
+ input = input.half()
57
+ head_size = input.shape[-1] // config.heads
58
+ do_flash_attn = (head_size <= 128)
59
+ scale = (1 / norm_factor) * (1 / norm_factor)
60
+ if do_flash_attn and context is None:
61
+ qkv_out = linear_func(input, attn_qkvw, attn_qkvb if attn_qkvb is not None else attn_qkvw, attn_qkvb
62
+ is not None, do_flash_attn, config.heads, False, rope_theta)
63
+
64
+ context_layer = triton_flash_attn_kernel(qkv_out[0], qkv_out[1], qkv_out[2], scale,
65
+ input.shape[-2] % 128 == 0)
66
+ context_layer = _transpose_for_context(context_layer[:, :, :, :head_size])
67
+
68
+ else:
69
+ do_flash_attn = False
70
+ if context is not None:
71
+ query = torch.matmul(input, attn_qw)
72
+ key = torch.matmul(context, attn_kw)
73
+ value = torch.matmul(context, attn_vw)
74
+ else:
75
+ qkv = torch.matmul(input, attn_qkvw)
76
+ query, key, value = qkv.chunk(3, dim=-1)
77
+ query = query.contiguous()
78
+ key = key.contiguous()
79
+ value = value.contiguous()
80
+ query, key, value = inference_module.pad_transform_fp16(query, key, value, config.heads, do_flash_attn)
81
+ attention_scores = (torch.matmul(query, key.transpose(-1, -2)) * scale).softmax(dim=-1)
82
+ context_layer = _transpose_for_context(torch.matmul(attention_scores, value))
83
+
84
+ output = linear_func(context_layer, attn_ow, attn_ob, do_out_bias, False, config.heads, False, rope_theta)
85
+ return output
86
+
87
+ output = selfAttention_fp(input, context, input_mask)
88
+
89
+ return output
90
+
91
+ @staticmethod
92
+ def backward(ctx, grad_output, grad_output1, grad_output2, grad_output3):
93
+ raise RuntimeError('You are running with DeepSpeed Inference mode. \
94
+ Please switch to Training mode for running backward!')
95
+
96
+
97
+ class DeepSpeedDiffusersAttention(nn.Module):
98
+ """Initialize the DeepSpeed Transformer Layer.
99
+ Arguments:
100
+ layer_id: The layer index starting from 0, e.g. if model has 24 transformer layers,
101
+ layer_id will be 0,1,2...23 when each layer object is instantiated
102
+ config: An object of DeepSpeedInferenceConfig
103
+ """
104
+ layer_id = 0
105
+
106
+ def __init__(
107
+ self,
108
+ config,
109
+ ):
110
+ super(DeepSpeedDiffusersAttention, self).__init__()
111
+
112
+ self.config = config
113
+ self.config.layer_id = DeepSpeedDiffusersAttention.layer_id
114
+ DeepSpeedDiffusersAttention.layer_id += 1
115
+ device = get_accelerator().current_device_name() if config.bigscience_bloom else 'cpu'
116
+ qkv_size_per_partition = (self.config.hidden_size // self.config.mp_size) * 3
117
+
118
+ data_type = self.config.dtype
119
+ data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype
120
+ global inference_module
121
+ if inference_module is None:
122
+ builder = InferenceBuilder()
123
+ inference_module = builder.load()
124
+
125
+ if DeepSpeedDiffusersAttention.layer_id == 1:
126
+ log_dist(f"DeepSpeed-Attention config: {self.config.__dict__}", [0])
127
+
128
+ self.attn_qkvw = nn.Parameter(torch.empty(self.config.hidden_size,
129
+ qkv_size_per_partition,
130
+ dtype=data_type,
131
+ device=device),
132
+ requires_grad=False)
133
+ self.attn_kw = nn.Parameter(torch.empty(self.config.hidden_size,
134
+ self.config.hidden_size,
135
+ dtype=data_type,
136
+ device=device),
137
+ requires_grad=False)
138
+ self.attn_vw = nn.Parameter(torch.empty(self.config.hidden_size,
139
+ self.config.hidden_size,
140
+ dtype=data_type,
141
+ device=device),
142
+ requires_grad=False)
143
+ self.attn_qw = nn.Parameter(torch.empty(self.config.hidden_size,
144
+ self.config.hidden_size,
145
+ dtype=data_type,
146
+ device=device),
147
+ requires_grad=False)
148
+ self.attn_qkvb = nn.Parameter(torch.empty(qkv_size_per_partition, dtype=data_type_fp, device=device),
149
+ requires_grad=False)
150
+ out_size_per_partition = self.config.hidden_size // self.config.mp_size
151
+ self.attn_ow = nn.Parameter(torch.empty(out_size_per_partition,
152
+ self.config.hidden_size,
153
+ dtype=data_type,
154
+ device=device),
155
+ requires_grad=False)
156
+
157
+ self.attn_ob = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
158
+ requires_grad=False)
159
+ self.do_out_bias = True
160
+
161
+ if triton_flash_attn is None:
162
+ load_triton_flash_attn()
163
+ self.triton_flash_attn_kernel = triton_flash_attn()
164
+ self.num_attention_heads_per_partition = self.config.heads // self.config.mp_size
165
+ self.hidden_size_per_partition = self.config.hidden_size // self.config.mp_size
166
+ self.hidden_size_per_attention_head = self.config.hidden_size // self.config.heads
167
+
168
+ self.norm_factor = math.sqrt(math.sqrt(self.config.hidden_size // self.config.heads))
169
+
170
+ if self.config.scale_attn_by_inverse_layer_idx is True:
171
+ self.norm_factor *= math.sqrt(self.config.layer_id + 1)
172
+ # https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/gpt2/modeling_gpt2.py#L191
173
+
174
+ if self.config.dtype in [torch.float16, torch.int8]:
175
+ self.score_context_func = inference_module.softmax_context_fp16
176
+ self.linear_func = inference_module.linear_layer_fp16
177
+ self.allocate_workspace = inference_module.allocate_workspace_fp16
178
+ else:
179
+ self.score_context_func = inference_module.softmax_context_fp32
180
+ self.linear_func = inference_module.linear_layer_fp32
181
+ self.allocate_workspace = inference_module.allocate_workspace_fp32
182
+
183
+ def forward(self, input, context=None, input_mask=None):
184
+ if self.config.layer_id == 0:
185
+ self.allocate_workspace(self.config.hidden_size, self.config.heads,
186
+ input.size()[1],
187
+ input.size()[0], DeepSpeedDiffusersAttention.layer_id, self.config.mp_size, False,
188
+ 0, self.config.max_out_tokens, self.config.min_out_tokens)
189
+ output = DeepSpeedDiffusersAttentionFunction.apply(input, context, input_mask, self.config, self.attn_qkvw,
190
+ self.attn_qw, self.attn_kw, self.attn_vw, self.attn_qkvb,
191
+ self.num_attention_heads_per_partition, self.norm_factor,
192
+ self.hidden_size_per_partition, self.attn_ow, self.attn_ob,
193
+ self.do_out_bias, self.score_context_func, self.linear_func,
194
+ self.triton_flash_attn_kernel, self.config.rope_theta)
195
+
196
+ return output
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/diffusers_transformer_block.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+
9
+ from deepspeed import module_inject
10
+ from .diffusers_attention import DeepSpeedDiffusersAttention
11
+ from .bias_add import nhwc_bias_add
12
+ from .diffusers_2d_transformer import Diffusers2DTransformerConfig
13
+ from deepspeed.ops.op_builder import InferenceBuilder, SpatialInferenceBuilder
14
+ from deepspeed.utils.types import ActivationFuncType
15
+
16
+ # Ops will be loaded on demand
17
+ transformer_cuda_module = None
18
+ spatial_cuda_module = None
19
+
20
+
21
+ def load_transformer_module():
22
+ global transformer_cuda_module
23
+ if transformer_cuda_module is None:
24
+ transformer_cuda_module = InferenceBuilder().load()
25
+ return transformer_cuda_module
26
+
27
+
28
+ def load_spatial_module():
29
+ global spatial_cuda_module
30
+ if spatial_cuda_module is None:
31
+ spatial_cuda_module = SpatialInferenceBuilder().load()
32
+ return spatial_cuda_module
33
+
34
+
35
+ class DeepSpeedDiffusersTransformerBlock(nn.Module):
36
+
37
+ def __init__(self, equivalent_module: nn.Module, config: Diffusers2DTransformerConfig):
38
+ super(DeepSpeedDiffusersTransformerBlock, self).__init__()
39
+ self.quantizer = module_inject.GroupQuantizer(q_int8=config.int8_quantization)
40
+ # Ensure ops are built by the time we start running
41
+ self.config = config
42
+
43
+ self.ff1_w = self.quantizer.quantize(
44
+ nn.Parameter(equivalent_module.ff.net[0].proj.weight.data, requires_grad=False))
45
+ self.ff1_b = nn.Parameter(equivalent_module.ff.net[0].proj.bias.data, requires_grad=False)
46
+ self.ff2_w = self.quantizer.quantize(nn.Parameter(equivalent_module.ff.net[2].weight.data,
47
+ requires_grad=False))
48
+ self.ff2_b = nn.Parameter(equivalent_module.ff.net[2].bias.data, requires_grad=False)
49
+
50
+ self.norm1_g = nn.Parameter(equivalent_module.norm1.weight.data, requires_grad=False)
51
+ self.norm1_b = nn.Parameter(equivalent_module.norm1.bias.data, requires_grad=False)
52
+ self.norm1_eps = equivalent_module.norm1.eps
53
+
54
+ self.norm2_g = nn.Parameter(equivalent_module.norm2.weight.data, requires_grad=False)
55
+ self.norm2_b = nn.Parameter(equivalent_module.norm2.bias.data, requires_grad=False)
56
+ self.norm2_eps = equivalent_module.norm2.eps
57
+
58
+ self.norm3_g = nn.Parameter(equivalent_module.norm3.weight.data, requires_grad=False)
59
+ self.norm3_b = nn.Parameter(equivalent_module.norm3.bias.data, requires_grad=False)
60
+ self.norm3_eps = equivalent_module.norm3.eps
61
+
62
+ self.attn_1 = equivalent_module.attn1
63
+ self.attn_2 = equivalent_module.attn2
64
+
65
+ # Pull the bias in if we can
66
+ if isinstance(self.attn_1, DeepSpeedDiffusersAttention):
67
+ self.attn_1.do_out_bias = False
68
+ self.attn_1_bias = self.attn_1.attn_ob
69
+ else:
70
+ self.attn_1_bias = nn.Parameter(torch.zeros_like(self.norm2_g), requires_grad=False)
71
+
72
+ # Pull the bias in if we can
73
+ if isinstance(self.attn_2, DeepSpeedDiffusersAttention):
74
+ self.attn_2.do_out_bias = False
75
+ self.attn_2_bias = self.attn_2.attn_ob
76
+ else:
77
+ self.attn_2_bias = nn.Paramaeter(torch.zeros_like(self.norm3_g), requires_grad=False)
78
+
79
+ self.transformer_cuda_module = load_transformer_module()
80
+ load_spatial_module()
81
+
82
+ def forward(self, hidden_states, context=None, timestep=None, **kwargs):
83
+ # In v0.12.0 of diffuser, several new kwargs were added. Capturing
84
+ # those with kwargs to maintain backward compatibility
85
+
86
+ # In v0.11.0 of diffusers, the kwarg was changed from 'context' to 'encoder_hidden_states'
87
+ # This is so we can support older and newer versions of diffusers
88
+ if "encoder_hidden_states" in kwargs and kwargs["encoder_hidden_states"] is not None:
89
+ context = kwargs["encoder_hidden_states"]
90
+
91
+ out_norm_1 = self.transformer_cuda_module.layer_norm(hidden_states, self.norm1_g, self.norm1_b, self.norm1_eps)
92
+ out_attn_1 = self.attn_1(out_norm_1)
93
+
94
+ out_norm_2, out_attn_1 = self.transformer_cuda_module.layer_norm_residual_store_pre_ln_res(
95
+ out_attn_1, self.attn_1_bias, hidden_states, self.norm2_g, self.norm2_b, self.norm2_eps)
96
+ out_attn_2 = self.attn_2(out_norm_2, context=context)
97
+ out_norm_3, out_attn_2 = self.transformer_cuda_module.layer_norm_residual_store_pre_ln_res(
98
+ out_attn_2, self.attn_2_bias, out_attn_1, self.norm3_g, self.norm3_b, self.norm3_eps)
99
+
100
+ out_ff1 = nn.functional.linear(out_norm_3, self.ff1_w)
101
+ out_geglu = self.transformer_cuda_module.gated_activation(out_ff1, self.ff1_b, ActivationFuncType.GATED_GELU)
102
+
103
+ out_ff2 = nn.functional.linear(out_geglu, self.ff2_w)
104
+ return nhwc_bias_add(out_ff2, self.ff2_b, other=out_attn_2)
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/ds_attention.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import math
7
+ import torch
8
+ import torch.nn as nn
9
+ from deepspeed import comm as dist
10
+ from deepspeed.accelerator import get_accelerator
11
+ from .op_binding import LinearOp, VectorMatMulOp, SoftmaxContextOp, QKVGemmOp, SoftmaxOp
12
+
13
+ minus_inf = -10000.0
14
+
15
+
16
+ class DeepSpeedSelfAttention(nn.Module):
17
+ num_layers = 0
18
+ _qkv_buffers = []
19
+
20
+ def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count=1):
21
+ super(DeepSpeedSelfAttention, self).__init__()
22
+ self.config = config
23
+ data_type = self.config.dtype
24
+ data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype
25
+ self.config.layer_id = DeepSpeedSelfAttention.num_layers
26
+ DeepSpeedSelfAttention.num_layers = DeepSpeedSelfAttention.num_layers + 1
27
+ device = get_accelerator().current_device_name() #if config.bigscience_bloom else 'cpu'
28
+ if self.config.set_empty_params:
29
+ self.attn_qw = None
30
+ self.attn_qb = None
31
+ self.attn_kw = None
32
+ self.attn_kb = None
33
+ self.attn_vw = None
34
+ self.attn_vb = None
35
+ self.attn_qkvw = None
36
+ self.attn_qkvb = None
37
+ self.attn_ow = None
38
+ self.attn_ob = None
39
+ else:
40
+ qkv_size_per_partition = (self.config.hidden_size // self.config.mp_size) * 3 if config.num_kv < 0 else \
41
+ ((self.config.heads + self.config.num_kv * 2) // self.config.mp_size) * (self.config.hidden_size // self.config.heads)
42
+ self.attn_qkvw = nn.Parameter(torch.empty(self.config.hidden_size,
43
+ qkv_size_per_partition,
44
+ dtype=data_type,
45
+ device=device),
46
+ requires_grad=False)
47
+ self.attn_qkvb = nn.Parameter(torch.empty(qkv_size_per_partition, dtype=data_type_fp, device=device),
48
+ requires_grad=False)
49
+ out_size_per_partition = self.config.hidden_size // self.config.mp_size
50
+ self.attn_ow = nn.Parameter(torch.empty(out_size_per_partition,
51
+ self.config.hidden_size,
52
+ dtype=data_type,
53
+ device=device),
54
+ requires_grad=False)
55
+
56
+ self.attn_ob = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
57
+ requires_grad=False)
58
+
59
+ self.num_attention_heads_per_partition = self.config.heads // self.config.mp_size
60
+ self.num_kv_partition = self.config.num_kv // self.config.mp_size
61
+ self.hidden_size_per_partition = self.config.hidden_size // self.config.mp_size
62
+ self.hidden_size_per_attention_head = self.config.hidden_size // self.config.heads
63
+
64
+ self.mp_group = mp_group
65
+
66
+ # used for quantization
67
+ self.q_scales = q_scales
68
+ self.q_groups = q_groups
69
+ self.merge_count = int(math.log2(merge_count))
70
+
71
+ self.norm_factor = math.sqrt(self.config.hidden_size // self.config.heads)
72
+ if not config.use_mup:
73
+ self.norm_factor = math.sqrt(self.norm_factor)
74
+
75
+ if self.config.scale_attn_by_inverse_layer_idx is True:
76
+ self.norm_factor *= math.sqrt(self.config.layer_id + 1)
77
+ # https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/gpt2/modeling_gpt2.py#L191
78
+
79
+ self.qkv_func = QKVGemmOp(config)
80
+ self.score_context_func = SoftmaxContextOp(config)
81
+ self.linear_func = LinearOp(config)
82
+ self.vector_matmul_func = VectorMatMulOp(config)
83
+ if len(DeepSpeedSelfAttention._qkv_buffers) == 0:
84
+ DeepSpeedSelfAttention._qkv_buffers = [
85
+ torch.empty(self.hidden_size_per_partition * 3,
86
+ self.config.hidden_size,
87
+ dtype=data_type_fp,
88
+ device=device),
89
+ torch.empty(self.hidden_size_per_partition * 3, dtype=data_type_fp, device=device)
90
+ ]
91
+
92
+ def compute_attention(self, qkv_out, input_mask, layer_past, alibi):
93
+ if isinstance(qkv_out, list) or isinstance(qkv_out, tuple):
94
+ qkv_out = qkv_out[0]
95
+
96
+ no_masking = input_mask is None
97
+
98
+ if no_masking:
99
+ input_mask = torch.empty(1)
100
+
101
+ attn_key_value = self.score_context_func(
102
+ query_key_value=qkv_out,
103
+ attn_mask=((1 - input_mask).to(qkv_out.dtype) *
104
+ minus_inf) if input_mask.dtype == torch.int64 else input_mask,
105
+ heads=self.num_attention_heads_per_partition,
106
+ num_kv=self.num_kv_partition,
107
+ norm_factor=(1 / self.norm_factor if self.config.scale_attention else 1.0),
108
+ no_masking=no_masking,
109
+ layer_id=self.config.layer_id,
110
+ num_layers=DeepSpeedSelfAttention.num_layers,
111
+ alibi=alibi)
112
+
113
+ context_layer, key_layer, value_layer = attn_key_value
114
+ return context_layer, key_layer, value_layer
115
+
116
+ def _merge_qkv(self):
117
+ qvkw = DeepSpeedSelfAttention._qkv_buffers[0]
118
+ qvkw[:self.hidden_size_per_partition, :] = self.attn_qw # type: ignore
119
+ qvkw[self.hidden_size_per_partition:2 * self.hidden_size_per_partition, :] = self.attn_kw # type: ignore
120
+ qvkw[2 * self.hidden_size_per_partition:, :] = self.attn_vw # type: ignore
121
+ if self.attn_qb is not None:
122
+ qvkb = DeepSpeedSelfAttention._qkv_buffers[1]
123
+ qvkb[:self.hidden_size_per_partition] = self.attn_qb
124
+ qvkb[self.hidden_size_per_partition:2 * self.hidden_size_per_partition] = self.attn_kb # type: ignore
125
+ qvkb[2 * self.hidden_size_per_partition:] = self.attn_vb # type: ignore
126
+ return DeepSpeedSelfAttention._qkv_buffers
127
+
128
+ def forward(self,
129
+ input,
130
+ input_mask,
131
+ head_mask=None,
132
+ layer_past=None,
133
+ get_present=False,
134
+ encoder_hidden_states=None,
135
+ encoder_attention_mask=None,
136
+ output_attentions=False,
137
+ norm_w=None,
138
+ norm_b=None,
139
+ alibi=None):
140
+ if self.attn_qkvw is None:
141
+ self._attn_qkvw, self._attn_qkvb = self._merge_qkv()
142
+ else:
143
+ self._attn_qkvw = self.attn_qkvw
144
+ self._attn_qkvb = self.attn_qkvb
145
+ if not self.config.pre_layer_norm:
146
+ qkv_out = self.linear_func(input=input,
147
+ weight=self._attn_qkvw,
148
+ bias=self._attn_qkvb,
149
+ add_bias=self.attn_qkvb is not None,
150
+ do_flash_attn=False,
151
+ num_heads=self.num_attention_heads_per_partition,
152
+ num_layers=DeepSpeedSelfAttention.num_layers)
153
+ else:
154
+ qkv_out = self.qkv_func(input=input,
155
+ weight=self._attn_qkvw,
156
+ bias=self._attn_qkvb,
157
+ gamma=norm_w,
158
+ beta=norm_b)
159
+
160
+ context_layer, key_layer, value_layer = self.compute_attention(qkv_out=qkv_out,
161
+ input_mask=input_mask,
162
+ layer_past=layer_past,
163
+ alibi=alibi)
164
+
165
+ output = self.vector_matmul_func(input=context_layer, weight=self.attn_ow)
166
+ inp_norm = qkv_out[-1]
167
+
168
+ if self.config.mlp_after_attn and self.mp_group is not None and dist.get_world_size(group=self.mp_group) > 1:
169
+ dist.all_reduce(output, group=self.mp_group)
170
+ return (output, key_layer, value_layer, context_layer, inp_norm)
171
+
172
+
173
+ class BloomSelfAttention(DeepSpeedSelfAttention):
174
+
175
+ def __init__(self, *args, **kwargs):
176
+ super(BloomSelfAttention, self).__init__(*args, **kwargs)
177
+ self.softmax_func = SoftmaxOp(self.config)
178
+
179
+ ########### This part is taken/modified form the HF modeling_bloom.py ################
180
+ # Reference: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py
181
+
182
+ def _transpose_for_context(self, x):
183
+ x = x.permute(0, 2, 1, 3).contiguous()
184
+ new_x_layer_shape = x.size()[:-2] + \
185
+ (self.hidden_size_per_partition,)
186
+ return x.view(*new_x_layer_shape).contiguous()
187
+
188
+ def _split_tensor_along_last_dim(self, tensor, num_partitions, contiguous_split_chunks=True):
189
+ """Split a tensor along its last dimension.
190
+
191
+ Args:
192
+ tensor: ([`torch.tensor`], *required*):
193
+ input tensor to split
194
+ num_partitions ([`int`], *required*):
195
+ number of partitions to split the tensor
196
+ contiguous_split_chunks ([`bool`], *optional*, default=`False`)::
197
+ If True, make each chunk contiguous in memory.
198
+ """
199
+ # Get the size and dimension.
200
+ last_dim = tensor.dim() - 1
201
+ numerator, denominator = tensor.size()[last_dim], num_partitions
202
+ if not (numerator % denominator == 0):
203
+ raise ValueError(f"{numerator} is not divisible by {denominator}")
204
+ last_dim_size = numerator // denominator
205
+ # Split.
206
+ tensor_list = torch.split(tensor, last_dim_size, dim=last_dim)
207
+ # Note: torch.split does not create contiguous tensors by default.
208
+ if contiguous_split_chunks:
209
+ return tuple(chunk.contiguous() for chunk in tensor_list)
210
+
211
+ return tensor_list
212
+
213
+ def compute_attention(self, qkv_out, input_mask, layer_past, alibi):
214
+ if isinstance(qkv_out, list) or isinstance(qkv_out, tuple):
215
+ qkv_out = qkv_out[0]
216
+
217
+ no_masking = input_mask is None
218
+
219
+ if no_masking:
220
+ input_mask = torch.empty(1)
221
+
222
+ mixed_x_layer = qkv_out
223
+ alibi = alibi.to(get_accelerator().current_device_name())
224
+ head_dim = self.hidden_size_per_partition // self.num_attention_heads_per_partition
225
+ new_tensor_shape = mixed_x_layer.size()[:-1] + (self.num_attention_heads_per_partition, 3 * head_dim)
226
+ mixed_x_layer = mixed_x_layer.view(*new_tensor_shape)
227
+
228
+ query_layer, key_layer, value_layer = self._split_tensor_along_last_dim(mixed_x_layer, 3)
229
+
230
+ # [batch_size, head_dim, q_length, k_length]
231
+ output_size = (query_layer.size(0), query_layer.size(2), query_layer.size(1), key_layer.size(1))
232
+ # [batch_size, q_length, num_heads, head_dim] -> [q_length, batch_size * num_heads, head_dim]
233
+ query_layer = query_layer.transpose(1, 2).reshape(output_size[0] * output_size[1], output_size[2], -1)
234
+ # [batch_size, k_length, num_heads, head_dim] -> [k_length, batch_size * num_heads, head_dim]
235
+ key_layer = key_layer.transpose(1, 2).reshape(output_size[0] * output_size[1], output_size[3],
236
+ -1).transpose(-1, -2)
237
+ value_layer = value_layer.transpose(1, 2).reshape(output_size[0] * output_size[1], output_size[3], -1)
238
+ if layer_past is not None:
239
+ past_key, past_value = layer_past
240
+ # concatenate along seq_length dimension -> [batch_size, qk_length, num_heads, head_dim]
241
+ key_layer = torch.cat((past_key.type_as(key_layer), key_layer), dim=-1)
242
+ value_layer = torch.cat((past_value.type_as(value_layer), value_layer), dim=-2)
243
+
244
+ presents = (key_layer, value_layer)
245
+ # Raw attention scores. [batch_size * num_heads, q_length, k_length]
246
+ matmul_result = torch.matmul(query_layer, key_layer)
247
+ # change view to [batch_size, num_heads, q_length, k_length]
248
+ attention_scores = matmul_result.view(output_size[0], output_size[1], output_size[2], -1)
249
+
250
+ offset = dist.get_rank() * self.num_attention_heads_per_partition if dist.is_initialized() else 0
251
+ target_dtype = torch.float16 if self.config.dtype == torch.int8 else self.config.dtype
252
+
253
+ # When using the hybrid engine with BLOOM, input_mask needs to be converted from torch.bool -> torch.int64
254
+ if input_mask.dtype == torch.bool:
255
+ input_mask = input_mask.long()
256
+
257
+ # Invert input_mask per transformer implementation (eg, in BLOOM, it's already inverted)
258
+ if self.config.invert_mask:
259
+ input_mask = 1 - input_mask
260
+
261
+ attention_probs = self.softmax_func(attn_scores=attention_scores,
262
+ attn_mask=input_mask.to(target_dtype) * minus_inf,
263
+ alibi=alibi,
264
+ triangular=(self.config.triangular_masking
265
+ and (attention_scores.shape[-2] > 1)),
266
+ recompute=False,
267
+ local_attention=False,
268
+ window_size=1,
269
+ async_op=False,
270
+ layer_scale=1 / (self.norm_factor * self.norm_factor),
271
+ head_offset=offset)
272
+
273
+ # change view [batch_size x num_heads, q_length, k_length]
274
+ attention_probs_reshaped = attention_probs.view(*matmul_result.shape)
275
+
276
+ # matmul: [batch_size * num_heads, q_length, head_dim]
277
+ context_layer = torch.bmm(attention_probs_reshaped, value_layer)
278
+
279
+ # change view [batch_size, num_heads, q_length, head_dim]
280
+ context_layer = context_layer.view(
281
+ context_layer.size(0) // self.num_attention_heads_per_partition, self.num_attention_heads_per_partition,
282
+ context_layer.size(1), context_layer.shape[-1])
283
+
284
+ context_layer = self._transpose_for_context(context_layer)
285
+ key_layer = presents[0]
286
+ value_layer = presents[1]
287
+
288
+ return context_layer, key_layer, value_layer
289
+
290
+ ###################### End of HF modeling_bloom addition ########################
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/ds_mlp.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import math
7
+ import torch
8
+ import torch.nn as nn
9
+ from deepspeed import comm as dist
10
+ from deepspeed.utils.types import GATED_ACTIVATION_TYPES
11
+ from deepspeed.accelerator import get_accelerator
12
+ from .op_binding import MLPGemmOp, VectorMatMulOp, GELUGemmOp, ResidualAddOp
13
+
14
+
15
+ class DeepSpeedMLP(nn.Module):
16
+ _inter_w_buffers = []
17
+
18
+ def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count=1, mlp_extra_grouping=False):
19
+ super(DeepSpeedMLP, self).__init__()
20
+
21
+ self.config = config
22
+
23
+ data_type = torch.int8 if self.config.dtype == torch.int8 else self.config.dtype
24
+ data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype
25
+ device = get_accelerator().current_device_name()
26
+
27
+ proj_factor = 2 if self.config.mlp_act_func_type in GATED_ACTIVATION_TYPES else 1
28
+ self.config.intermediate_size = self.config.intermediate_size if self.config.intermediate_size > 0 else 4 * self.config.hidden_size
29
+ self.intm_w_sz_per_partition = self.config.intermediate_size * proj_factor // self.config.mp_size
30
+ self.intm_o_sz_per_partition = self.config.intermediate_size // self.config.mp_size
31
+
32
+ if self.config.set_empty_params:
33
+ self.attn_nw = None
34
+ self.attn_nb = None
35
+ self.inter_w = None
36
+ self.inter_b = None
37
+ self.inter_up_w = None
38
+ self.inter_up_b = None
39
+ self.inter_gate_w = None
40
+ self.inter_gate_b = None
41
+ self.output_w = None
42
+ self.output_b = None
43
+ else:
44
+ self.attn_nw = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
45
+ requires_grad=False)
46
+ self.attn_nb = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
47
+ requires_grad=False)
48
+
49
+ self.inter_w = nn.Parameter(torch.empty(self.config.hidden_size,
50
+ self.intm_w_sz_per_partition,
51
+ dtype=data_type,
52
+ device=device),
53
+ requires_grad=False)
54
+ self.inter_b = nn.Parameter(torch.empty(self.intm_w_sz_per_partition, dtype=data_type_fp, device=device),
55
+ requires_grad=False)
56
+ self.output_w = nn.Parameter(torch.empty(self.intm_o_sz_per_partition,
57
+ self.config.hidden_size,
58
+ dtype=data_type,
59
+ device=device),
60
+ requires_grad=False)
61
+ self.output_b = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device),
62
+ requires_grad=False)
63
+
64
+ # used for quantization
65
+ self.q_scales = q_scales
66
+ self.q_groups = q_groups * 2 if mlp_extra_grouping else q_groups
67
+ self.merge_count = int(math.log2(merge_count))
68
+ self.mp_group = mp_group
69
+
70
+ self.mlp_gemm_func = MLPGemmOp(config)
71
+ self.vector_matmul_func = VectorMatMulOp(config)
72
+ self.fused_gemm_gelu = GELUGemmOp(config)
73
+ self.residual_add_func = ResidualAddOp(config)
74
+
75
+ if len(DeepSpeedMLP._inter_w_buffers) == 0:
76
+ DeepSpeedMLP._inter_w_buffers = [
77
+ torch.empty(self.intm_w_sz_per_partition, self.config.hidden_size, dtype=data_type, device=device),
78
+ torch.empty(self.intm_w_sz_per_partition, dtype=data_type_fp, device=device)
79
+ ]
80
+
81
+ def _merge_inter_w(self):
82
+ inter_w = DeepSpeedMLP._inter_w_buffers[0]
83
+ inter_w[:self.intm_w_sz_per_partition // 2, :] = self.inter_up_w # type: ignore
84
+ inter_w[self.intm_w_sz_per_partition // 2:, :] = self.inter_gate_w # type: ignore
85
+ if self.inter_up_b is not None:
86
+ inter_b = DeepSpeedMLP._inter_w_buffers[1]
87
+ inter_b[:self.intm_w_sz_per_partition // 2] = self.inter_up_b # type: ignore
88
+ inter_b[self.intm_w_sz_per_partition // 2:] = self.inter_gate_b # type: ignore
89
+ return DeepSpeedMLP._inter_w_buffers
90
+
91
+ def forward(self, input, residual, residual_norm, bias):
92
+ if self.inter_w is None:
93
+ self._inter_w, self._inter_b = self._merge_inter_w()
94
+ else:
95
+ self._inter_w = self.inter_w
96
+ self._inter_b = self.inter_b
97
+
98
+ residual_add = None
99
+ if self.attn_nw is None:
100
+ output = self.fused_gemm_gelu(input=residual_norm,
101
+ weight=self._inter_w,
102
+ bias=self._inter_b,
103
+ weight_out=self.output_w)
104
+ else:
105
+ output, residual_add = self.mlp_gemm_func(input=input,
106
+ residual=residual,
107
+ weight_interm=self._inter_w,
108
+ weight_out=self.output_w,
109
+ input_bias=bias,
110
+ bias=self._inter_b,
111
+ gamma=self.attn_nw,
112
+ beta=self.attn_nb)
113
+
114
+ residual = self.residual_add_func(hidden_state=output,
115
+ residual=residual,
116
+ add_bias=bias is not None,
117
+ attention_output=input,
118
+ attention_bias=bias if bias is not None else self.output_b,
119
+ final_bias=self.output_b,
120
+ residual_add=residual_add)
121
+ if self.mp_group is not None and dist.get_world_size(group=self.mp_group) > 1:
122
+ dist.all_reduce(residual, group=self.mp_group)
123
+
124
+ return residual
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/moe_inference.py ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import json
7
+ import math
8
+ import torch
9
+ from torch.autograd import Function
10
+ # accelerator modules will be imported if needed
11
+ inference_module = None
12
+ specialized_mode = None
13
+ import torch.nn as nn
14
+ from .ds_attention import DeepSpeedSelfAttention
15
+ from .config import DeepSpeedInferenceConfig
16
+ from ....moe.sharded_moe import TopKGate
17
+ from deepspeed import comm as dist
18
+ from deepspeed.accelerator import get_accelerator
19
+ from deepspeed.ops.op_builder import InferenceBuilder
20
+
21
+
22
+ class DeepSpeedMoEInferenceConfig(DeepSpeedInferenceConfig):
23
+ """Initialize the DeepSpeed Transformer Config.
24
+ Arguments:
25
+ hidden_size: The hidden size of the transformer layer
26
+ intermediate_size: The intermediate size of the feed-forward part of transformer layer
27
+ heads: The number of heads in the self-attention of the transformer layer
28
+ num_hidden_layers: The number of transformer layers
29
+ layer_norm_eps: The epsilon value for the layer norm
30
+ local_rank: Optional: The rank of GPU running the transformer kernel, it is not required
31
+ to use if the model already set the current device, otherwise need to set it
32
+ so that the transformer kernel can work on the right device
33
+ mp_size (optional): This argument is mainly used to create the parameters on the kernel side
34
+ using model-parallel architecture. If the client model already takes care of this, there is no
35
+ need to pass this argument.
36
+ fp16: Enable half-precision computation
37
+ bf16: Enable bf16 floating point computation
38
+ pre_layer_norm: Select between Pre-LN or Post-LN transformer architecture
39
+ stochastic_mode: Enable for high performance, please note that this flag has some level of
40
+ non-determinism and can produce different results on different runs. However, we have seen
41
+ that by enabling it, the pretraining tasks such as BERT are not affected and can obtain
42
+ a high accuracy level. On the other hand, for the downstream tasks, such as fine-tuning, we recommend
43
+ to turn it off in order to be able to reproduce the same result through the regular kernel execution.
44
+
45
+ scale_attention: If true, both q and k are scaled by 1/sqrt(attention_heads) before attention computation.
46
+ return_tuple: if True, returns the transformer output as a tuple, otherwise returns as a tensor
47
+ """
48
+
49
+ def __init__(self,
50
+ hidden_size=-1,
51
+ intermediate_size=-1,
52
+ heads=-1,
53
+ num_hidden_layers=-1,
54
+ layer_norm_eps=1e-12,
55
+ local_rank=-1,
56
+ mp_size=1,
57
+ fp16=False,
58
+ bf16=False,
59
+ q_int8=False,
60
+ pre_layer_norm=True,
61
+ stochastic_mode=False,
62
+ scale_attention=True,
63
+ triangular_masking=True,
64
+ local_attention=False,
65
+ window_size=256,
66
+ return_tuple=True,
67
+ moe_experts=1,
68
+ global_experts=1,
69
+ k=1,
70
+ capacity_factor=1.,
71
+ eval_capacity_factor=1.,
72
+ min_capacity=1,
73
+ noisy_gate_policy=None,
74
+ drop_tokens=True,
75
+ use_rts=False,
76
+ mlp_type='standard',
77
+ scale_attn_by_inverse_layer_idx=False):
78
+ super(DeepSpeedMoEInferenceConfig,
79
+ self).__init__(hidden_size, (intermediate_size if intermediate_size > 0 else 4 * hidden_size), heads,
80
+ num_hidden_layers, layer_norm_eps, local_rank, mp_size, fp16, bf16, q_int8,
81
+ pre_layer_norm, stochastic_mode, scale_attention, triangular_masking, local_attention,
82
+ window_size, return_tuple)
83
+ self.moe_experts = moe_experts
84
+ self.k = k
85
+ self.capacity_factor = capacity_factor
86
+ self.eval_capacity_factor = eval_capacity_factor
87
+ self.min_capacity = min_capacity
88
+ self.noisy_gate_policy = noisy_gate_policy
89
+ self.drop_tokens = drop_tokens
90
+ self.use_rts = use_rts
91
+ self.global_experts = global_experts
92
+ self.mlp_type = mlp_type
93
+ self.scale_attn_by_inverse_layer_idx = scale_attn_by_inverse_layer_idx
94
+
95
+ @classmethod
96
+ def from_dict(cls, json_object):
97
+ config = DeepSpeedInferenceConfig()
98
+ for key, value in json_object.items():
99
+ config.__dict__[key] = value
100
+ return config
101
+
102
+ @classmethod
103
+ def from_json_file(cls, json_file):
104
+ with open(json_file, "r", encoding='utf-8') as reader:
105
+ text = reader.read()
106
+ return cls.from_dict(json.loads(text))
107
+
108
+
109
+ class DeepSpeedMLPFunction(Function):
110
+
111
+ @staticmethod
112
+ def forward(ctx, input, inter_w, inter_b, config, output_b, output_w, q_scales, q_groups, merge_count, mp_group,
113
+ async_op):
114
+ if config.q_int8:
115
+ intermediate = inference_module.fused_gemm_gelu_int8(input, inter_w, inter_b, config.epsilon, q_scales[2],
116
+ (q_groups * (2**merge_count)), config.pre_layer_norm)
117
+ output = inference_module.vector_matmul_int8(intermediate, output_w, q_scales[3], q_groups, (merge_count))
118
+ else:
119
+ mlp_gemm_func = inference_module.fused_gemm_gelu_fp16 if config.fp16 else \
120
+ inference_module.fused_gemm_gelu_fp32
121
+
122
+ output = mlp_gemm_func(input, inter_w, inter_b, output_w, config.epsilon, config.pre_layer_norm, async_op)
123
+ if mp_group is not None and dist.get_world_size(group=mp_group) > 1:
124
+ dist.all_reduce(output, group=mp_group, async_op=async_op)
125
+
126
+ return output + output_b
127
+
128
+ @staticmethod
129
+ def backward(ctx, grad_output):
130
+ raise RuntimeError('You are running with DeepSpeed Inference mode. \
131
+ Please switch to Training mode for running backward!')
132
+
133
+
134
+ class DeepSpeedMoEMLP(nn.Module):
135
+
136
+ def __init__(self, config, q_scales=None, q_groups=1, merge_count=1, mlp_extra_grouping=False, mp_group=None):
137
+ super(DeepSpeedMoEMLP, self).__init__()
138
+
139
+ self.config = config
140
+ self.attn_nw = nn.Parameter(torch.Tensor(self.config.hidden_size))
141
+ self.attn_nb = nn.Parameter(torch.Tensor(self.config.hidden_size))
142
+ interm_size = self.config.intermediate_size // (1 if mp_group is None else dist.get_world_size(group=mp_group))
143
+ self.inter_w = nn.Parameter(torch.Tensor(self.config.hidden_size, interm_size))
144
+ self.inter_b = nn.Parameter(torch.Tensor(interm_size))
145
+ self.output_w = nn.Parameter(torch.Tensor((interm_size), self.config.hidden_size))
146
+ self.output_b = nn.Parameter(torch.Tensor(self.config.hidden_size))
147
+
148
+ # used for quantization
149
+ self.q_scales = q_scales
150
+ self.q_groups = q_groups * 2 if mlp_extra_grouping else q_groups
151
+ self.merge_count = int(math.log2(merge_count))
152
+ self.mp_group = mp_group
153
+
154
+ def forward(self, input, async_op=False):
155
+ return DeepSpeedMLPFunction.apply(input, self.inter_w, self.inter_b, self.config, self.output_b, self.output_w,
156
+ self.q_scales, self.q_groups, self.merge_count, self.mp_group, async_op)
157
+
158
+
159
+ class DeepSpeedMoEInference(nn.Module):
160
+ """Initialize the DeepSpeed MoE Transformer Layer.
161
+ Arguments:
162
+ layer_id: The layer index starting from 0, e.g. if model has 24 transformer layers,
163
+ layer_id will be 0,1,2...23 when each layer object is instantiated
164
+ config: An object of DeepSpeedInferenceConfig
165
+ mp_group: Model parallelism group initialized on the modeling side.
166
+ quantize_scales: This argument groups all the layers' scales used for quantization
167
+ quantize_groups: Number of groups used for quantizing the model
168
+ merge_count: Shows the number of model-parallel checkpoints merged before running inference.
169
+ We use this argument to control the quantization scale for the model parameters if a bigger
170
+ quantize-grouping than 1 is used.
171
+ mlp_extra_grouping: This flag is used to show a 2x higher number of groups used for the MLP part
172
+ of a Transformer layer. We use this feature for quantization to reduce the convergence impact
173
+ for specific downstream tasks.
174
+ """
175
+ layer_id = 0
176
+
177
+ def __init__(self,
178
+ config,
179
+ mp_group=None,
180
+ ep_group=None,
181
+ expert_mp_group=None,
182
+ quantize_scales=None,
183
+ quantize_groups=1,
184
+ merge_count=1,
185
+ mlp_extra_grouping=False):
186
+ super(DeepSpeedMoEInference, self).__init__()
187
+
188
+ self.config = config
189
+ self.config.layer_id = DeepSpeedMoEInference.layer_id
190
+ global inference_module
191
+ global specialized_mode
192
+ if inference_module is None:
193
+ specialized_mode = False
194
+ # InferenceSpecializedBuilder is not among DeepSpeed provided builder yet, so we infer by builder name string
195
+ builder = get_accelerator().create_op_builder("InferenceSpecializedBuilder")
196
+ if builder is not None and builder.is_compatible():
197
+ inference_module = builder.load()
198
+ specialized_mode = True
199
+ else:
200
+ inference_module = InferenceBuilder().load()
201
+ self.config.specialized_mode = specialized_mode
202
+ assert self.config.dtype != torch.bfloat16, "DeepSpeed MoE Transformer Inference not yet tested for bfloat support"
203
+
204
+ DeepSpeedMoEInference.layer_id += 1
205
+ self.attention = DeepSpeedSelfAttention(self.config, mp_group, quantize_scales, quantize_groups, merge_count)
206
+ self.attn_nw = nn.Parameter(torch.Tensor(self.config.hidden_size))
207
+ self.attn_nb = nn.Parameter(torch.Tensor(self.config.hidden_size))
208
+
209
+ self.norm_w = nn.Parameter(torch.Tensor(self.config.hidden_size))
210
+ self.norm_b = nn.Parameter(torch.Tensor(self.config.hidden_size))
211
+
212
+ if config.mlp_type == 'residual':
213
+ self.res_mlp = DeepSpeedMoEMLP(config, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping,
214
+ mp_group)
215
+ self.res_coef = nn.Parameter(torch.Tensor(self.config.hidden_size, 2))
216
+ self.coef_func = inference_module.softmax_fp16 if self.config.dtype in [torch.float16, torch.int8] else \
217
+ inference_module.softmax_fp32
218
+ self.vector_matmul_func = inference_module.vector_matmul_fp16 if self.config.dtype == torch.float16 else \
219
+ inference_module.vector_matmul_fp32
220
+
221
+ config.mp_size = 1
222
+ self.mlp = nn.ModuleList(
223
+ DeepSpeedMoEMLP(config, quantize_scales, quantize_groups, merge_count, mlp_extra_grouping, expert_mp_group)
224
+ for i in range(self.config.moe_experts))
225
+
226
+ self.moe_gate = TopKGate(self.config.hidden_size, self.config.global_experts, self.config.k,
227
+ self.config.capacity_factor, self.config.eval_capacity_factor,
228
+ self.config.min_capacity, self.config.noisy_gate_policy, self.config.drop_tokens,
229
+ self.config.use_rts, self.ep_group)
230
+
231
+ self.ep_group = ep_group
232
+ self.mp_group = mp_group
233
+ self.expert_mp_group = expert_mp_group
234
+
235
+ print("DeepSpeed MoE Transformer Inference config is ", self.config.__dict__)
236
+
237
+ self.bias_residual_func = inference_module.bias_residual_fp16 if self.config.dtype in [torch.float16, torch.int8] else \
238
+ inference_module.bias_residual_fp32
239
+ self.ds_layernorm = inference_module.layer_norm_fp16 if self.config.dtype in [torch.float16, torch.int8] else \
240
+ inference_module.layer_norm_fp32
241
+ self.einsum_sec_sm_ecm = inference_module.einsum_sec_sm_ecm_fp16 if self.config.dtype in [torch.float16, torch.int8] else \
242
+ inference_module.einsum_sec_sm_ecm_fp32
243
+
244
+ def res_coef_func(self, inp, async_op):
245
+ inp = self.vector_matmul_func(inp, self.res_coef, async_op)
246
+ return self.coef_func(inp, torch.empty(1), False, False, False, 256, async_op)
247
+
248
+ def moe_gate_einsum(self, attention_output):
249
+ _, combined_weights, dispatch_mask, _ = self.moe_gate(
250
+ attention_output.view(-1, self.config.hidden_size),
251
+ None,
252
+ )
253
+ dispatched_attention = self.einsum_sec_sm_ecm(dispatch_mask.type_as(attention_output),
254
+ attention_output.view(-1, self.config.hidden_size))
255
+ return dispatched_attention, combined_weights
256
+
257
+ def expert_exec(self, dispatched_input):
258
+ dispatched_input = dispatched_input.reshape(self.config.global_experts // self.config.moe_experts,
259
+ self.config.moe_experts, -1, self.config.hidden_size)
260
+
261
+ chunks = dispatched_input.chunk(self.config.moe_experts, dim=1)
262
+ expert_outputs = torch.empty((
263
+ self.config.moe_experts,
264
+ chunks[0].shape[0],
265
+ ) + chunks[0].shape[2:],
266
+ dtype=dispatched_input.dtype,
267
+ device=dispatched_input.device)
268
+ for chunk, expert in zip(chunks, range(len(self.mlp))):
269
+ expert_outputs[expert] = self.mlp[expert](chunk.view(-1, dispatched_input.shape[-2],
270
+ dispatched_input.shape[-1]))
271
+ return expert_outputs
272
+
273
+ def _alltoall(self, dispatched_attention):
274
+ if dist.get_world_size(group=self.ep_group) > 1:
275
+ dispatched_input = torch.empty_like(dispatched_attention)
276
+ dist.all_to_all_single(dispatched_input, dispatched_attention, group=self.ep_group)
277
+ return dispatched_input
278
+ else:
279
+ return dispatched_attention
280
+
281
+ def scale_expert_output(self, attention_output, expert_output, combined_weights):
282
+ combined_output = torch.matmul(
283
+ combined_weights.type_as(attention_output).reshape(combined_weights.shape[0], -1),
284
+ expert_output.reshape(-1, expert_output.shape[-1]))
285
+ return combined_output.reshape(attention_output.shape)
286
+
287
+ def forward(self,
288
+ input,
289
+ input_mask=None,
290
+ attention_mask=None,
291
+ head_mask=None,
292
+ layer_past=None,
293
+ get_key_value=False,
294
+ get_present=False,
295
+ encoder_output=None,
296
+ enc_dec_attn_mask=None,
297
+ encoder_hidden_states=None,
298
+ encoder_attention_mask=None,
299
+ use_cache=False,
300
+ output_attentions=False):
301
+ get_present = (get_present or get_key_value or use_cache)
302
+ input_mask = input_mask if attention_mask is None else attention_mask
303
+ input_type = input.dtype
304
+
305
+ if (self.config.dtype in [torch.float16, torch.int8]) and input_type == torch.float:
306
+ input = input.half()
307
+
308
+ with torch.no_grad():
309
+ attention_output = self.attention(input, input_mask, head_mask, layer_past, get_present,
310
+ encoder_hidden_states, encoder_attention_mask, output_attentions,
311
+ self.norm_w, self.norm_b)
312
+
313
+ if get_present:
314
+ attention_output, p_key, p_value = attention_output[0:3]
315
+ presents = (p_key, p_value)
316
+ elif output_attentions:
317
+ attention_output, _, _, context_output = attention_output[0:4]
318
+ else:
319
+ attention_output = attention_output[0]
320
+
321
+ residual_add = attention_output + self.attention.attn_ob
322
+ attention_output = self.ds_layernorm(residual_add, self.attn_nw, self.attn_nb, self.config.epsilon)
323
+
324
+ if self.config.mlp_type == 'residual':
325
+ res_mlp_out = self.res_mlp(attention_output, async_op=True)
326
+ res_coef_out = self.res_coef_func(attention_output, async_op=True)
327
+
328
+ if self.expert_mp_group is not None:
329
+ world_size = dist.get_world_size(group=self.expert_mp_group)
330
+ gather_buffer = torch.zeros(world_size * attention_output.numel(),
331
+ dtype=attention_output.dtype,
332
+ device=attention_output.device)
333
+ dist.all_gather_into_tensor(gather_buffer, attention_output, group=self.expert_mp_group)
334
+ attention_output = gather_buffer.view(-1, *attention_output.size()[1:])
335
+
336
+ ############## MoE Gating + Experts ###############
337
+ dispatched_attention, combined_weights = self.moe_gate_einsum(attention_output)
338
+ dispatched_input = self._alltoall(dispatched_attention)
339
+ expert_outputs = self.expert_exec(dispatched_input)
340
+ expert_output = self._alltoall(expert_outputs)
341
+ output = self.scale_expert_output(attention_output, expert_output, combined_weights)
342
+ ################################################
343
+
344
+ if self.expert_mp_group is not None:
345
+ output = output.split(output.shape[0] // dist.get_world_size(group=self.expert_mp_group),
346
+ dim=0)[dist.get_rank(group=self.expert_mp_group)]
347
+
348
+ if self.config.mlp_type == 'residual':
349
+ inference_module.moe_res_matmul(res_mlp_out, res_coef_out, output)
350
+
351
+ output = self.bias_residual_func(output, residual_add, torch.empty(1))
352
+
353
+ if not self.config.pre_layer_norm:
354
+ output = self.ds_layernorm(output, self.norm_w, self.norm_b, self.config.epsilon)
355
+
356
+ if input_type != output.dtype:
357
+ output = output.to(input_type)
358
+
359
+ if get_present:
360
+ output = (output, presents)
361
+
362
+ if self.config.return_tuple:
363
+ return output if type(output) is tuple else (output, )
364
+ else:
365
+ return output
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .linear import LinearOp
7
+ from .vector_matmul import VectorMatMulOp
8
+ from .softmax_context import SoftmaxContextOp
9
+ from .qkv_gemm import QKVGemmOp
10
+ from .softmax import SoftmaxOp
11
+ from .mlp_gemm import MLPGemmOp
12
+ from .gelu_gemm import GELUGemmOp
13
+ from .residual_add import ResidualAddOp
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (580 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/base.cpython-310.pyc ADDED
Binary file (821 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/gelu_gemm.cpython-310.pyc ADDED
Binary file (1.78 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/linear.cpython-310.pyc ADDED
Binary file (2.66 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/mlp_gemm.cpython-310.pyc ADDED
Binary file (2.99 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/qkv_gemm.cpython-310.pyc ADDED
Binary file (3.59 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/op_binding/__pycache__/residual_add.cpython-310.pyc ADDED
Binary file (1.93 kB). View file