applied-ai-018 commited on
Commit
4429f2c
·
verified ·
1 Parent(s): a1797ca

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adagrad/cpu_adagrad.cpp +256 -0
  2. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/cpu_adam.cpp +16 -0
  3. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/cpu_adam_impl.cpp +312 -0
  4. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/fused_adam_frontend.cpp +25 -0
  5. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/multi_tensor_adam.cu +203 -0
  6. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/multi_tensor_apply.cuh +132 -0
  7. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.cpp +342 -0
  8. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.h +38 -0
  9. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_types.cpp +76 -0
  10. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_types.h +59 -0
  11. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.cpp +126 -0
  12. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.h +79 -0
  13. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.cpp +104 -0
  14. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.h +59 -0
  15. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_pin_tensor.cpp +45 -0
  16. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_pin_tensor.h +27 -0
  17. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.cpp +125 -0
  18. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.h +31 -0
  19. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.cpp +298 -0
  20. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.h +77 -0
  21. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.cpp +135 -0
  22. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.h +46 -0
  23. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/py_ds_aio.cpp +46 -0
  24. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_test/single_process_config.json +29 -0
  25. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/adam/fused_adam.cpp +48 -0
  26. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/ccl.cpp +344 -0
  27. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/shm.cpp +686 -0
  28. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/shm.h +12 -0
  29. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/shm_interface.cpp +120 -0
  30. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/lion/fused_lion.cpp +43 -0
  31. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_grad_bias.h +250 -0
  32. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_pipelined.h +592 -0
  33. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_rescale_output.h +251 -0
  34. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_thread_apply_logsumexp.h +168 -0
  35. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma.h +119 -0
  36. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_base.h +181 -0
  37. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_multistage.h +714 -0
  38. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_pipelined.h +388 -0
  39. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/find_default_mma.h +191 -0
  40. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/mma_accum_lambda_iterator.h +347 -0
  41. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/mma_from_smem.h +1939 -0
  42. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/epilogue_predicated_tile_iterator.h +691 -0
  43. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/make_residual_last.h +91 -0
  44. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_access_iterator_residual_last.h +1964 -0
  45. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_iterator_atomic.h +886 -0
  46. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_iterator_residual_last.h +1938 -0
  47. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/transpose_warp_iterator.h +57 -0
  48. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/warp_iterator_from_smem.h +269 -0
  49. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/transform/bias_broadcast.h +148 -0
  50. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/transform/tile_smem_loader.h +93 -0
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adagrad/cpu_adagrad.cpp ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "cpu_adagrad.h"
7
+ #include <torch/extension.h>
8
+ #include <iostream>
9
+ #include <memory>
10
+ #include <type_traits>
11
+ #include <unordered_map>
12
+ #if defined(__ENABLE_CUDA__)
13
+ #include <cuda_runtime_api.h>
14
+ #include "cublas_v2.h"
15
+ #include "cuda.h"
16
+ #include "curand.h"
17
+ #include "custom_cuda_layers.h"
18
+ #endif
19
+
20
+ static std::unordered_map<int, std::shared_ptr<void>> s_optimizers;
21
+
22
+ // C++ interface
23
+
24
+ void Adagrad_Optimizer::Step_1(float* _params,
25
+ float* grads,
26
+ float* _exp_avg_sq,
27
+ size_t _param_size,
28
+ ds_half_precision_t* dev_params,
29
+ bool half_precision)
30
+ {
31
+ size_t rounded_size = 0;
32
+ #if defined(__AVX512__) or defined(__AVX256__)
33
+ Step_AVX<1>(
34
+ &rounded_size, _params, grads, _exp_avg_sq, _param_size, dev_params, half_precision);
35
+ #endif
36
+ if (_param_size > rounded_size) {
37
+ float step_size = -1 * _alpha;
38
+ ds_half_precision_t* grads_cast_h;
39
+ ds_half_precision_t* params_cast_h;
40
+ if (half_precision) {
41
+ grads_cast_h = reinterpret_cast<ds_half_precision_t*>(grads);
42
+ params_cast_h = reinterpret_cast<ds_half_precision_t*>(_params);
43
+ }
44
+ for (size_t t = rounded_size; t < _param_size; t += TILE) {
45
+ size_t copy_size = TILE;
46
+ if ((t + TILE) > _param_size) copy_size = _param_size - t;
47
+ size_t offset = copy_size + t;
48
+ #if defined(__ENABLE_CUDA__)
49
+ if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); }
50
+ #elif defined(__ENABLE_CANN__)
51
+ if ((t / TILE) >= 2) { aclrtSynchronizeStream(_streams[_buf_index].stream()); }
52
+ #endif
53
+ #pragma omp parallel for
54
+ for (size_t k = t; k < offset; k++) {
55
+ float grad = half_precision ? (float)grads_cast_h[k] : grads[k];
56
+ float param = half_precision ? (float)params_cast_h[k] : _params[k];
57
+ float momentum = grads[k];
58
+ float variance = _exp_avg_sq[k];
59
+ if (_weight_decay > 0) { grad = param * _weight_decay + grad; }
60
+
61
+ variance += grad * grad;
62
+
63
+ grad = sqrt(variance);
64
+ grad += _eps;
65
+ grad = momentum / grad;
66
+ param = grad * step_size + param;
67
+ #if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__)
68
+ if (dev_params) _doubled_buffer[_buf_index][k - t] = param;
69
+ #endif
70
+ if (half_precision)
71
+ params_cast_h[k] = (ds_half_precision_t)param;
72
+ else
73
+ _params[k] = param;
74
+ // STORE UPDATE TERM TO GRAD'S MEMORY
75
+ grads[k] = grad * step_size;
76
+ _exp_avg_sq[k] = variance;
77
+ }
78
+ #if defined(__ENABLE_CUDA__)
79
+ if (dev_params) {
80
+ launch_param_update(
81
+ _doubled_buffer[_buf_index], dev_params + t, (copy_size), _streams[_buf_index]);
82
+ _buf_index = !_buf_index;
83
+ }
84
+ #elif defined(__ENABLE_CANN__)
85
+ if (dev_params) {
86
+ size_t memcpy_size = copy_size * sizeof(_doubled_buffer[_buf_index][0]);
87
+ aclrtMemcpy(dev_params + t,
88
+ memcpy_size,
89
+ _doubled_buffer[_buf_index],
90
+ memcpy_size,
91
+ aclrtMemcpyKind::ACL_MEMCPY_HOST_TO_DEVICE);
92
+
93
+ _buf_index = !_buf_index;
94
+ }
95
+ #endif
96
+ }
97
+ }
98
+ }
99
+
100
+ void Adagrad_Optimizer::Step_4(float* _params,
101
+ float* grads,
102
+ float* _exp_avg_sq,
103
+ size_t _param_size,
104
+ ds_half_precision_t* dev_params,
105
+ bool half_precision)
106
+ {
107
+ size_t rounded_size = 0;
108
+ #if defined(__AVX512__) or defined(__AVX256__)
109
+ Step_AVX<4>(
110
+ &rounded_size, _params, grads, _exp_avg_sq, _param_size, dev_params, half_precision);
111
+ #endif
112
+ if (_param_size > rounded_size)
113
+ Step_1((_params + rounded_size),
114
+ (grads + rounded_size),
115
+ (_exp_avg_sq + rounded_size),
116
+ (_param_size - rounded_size),
117
+ (dev_params != nullptr ? (dev_params + rounded_size) : dev_params),
118
+ half_precision);
119
+ }
120
+
121
+ int create_adagrad_optimizer(int optimizer_id,
122
+ float alpha = 1e-2,
123
+ float eps = 1e-8,
124
+ float weight_decay = 0,
125
+ bool should_log = false)
126
+ {
127
+ auto opt = std::make_shared<Adagrad_Optimizer>(alpha, eps, weight_decay);
128
+
129
+ s_optimizers[optimizer_id] = opt;
130
+
131
+ if (should_log) {
132
+ std::string avx_type = "";
133
+ #if defined(__AVX512__)
134
+ avx_type = "AVX512";
135
+ #else
136
+ #if defined(__AVX256__)
137
+ avx_type = "AVX2";
138
+ #else
139
+ avx_type = "scalar";
140
+ #endif
141
+ #endif
142
+
143
+ printf("Adagrad Optimizer #%d is created with %s arithmetic capability.\n",
144
+ optimizer_id,
145
+ avx_type.c_str());
146
+ printf("Config: alpha=%f, weight_decay=%f\n", alpha, weight_decay);
147
+ }
148
+
149
+ return 0;
150
+ }
151
+
152
+ void Adagrad_Optimizer::Step_8(float* _params,
153
+ float* grads,
154
+ float* _exp_avg_sq,
155
+ size_t _param_size,
156
+ ds_half_precision_t* dev_params,
157
+ bool half_precision)
158
+ {
159
+ size_t rounded_size = 0;
160
+ #if defined(__AVX512__) or defined(__AVX256__)
161
+ Step_AVX<8>(
162
+ &rounded_size, _params, grads, _exp_avg_sq, _param_size, dev_params, half_precision);
163
+ #endif
164
+ if (_param_size > rounded_size)
165
+ Step_4((_params + rounded_size),
166
+ (grads + rounded_size),
167
+ (_exp_avg_sq + rounded_size),
168
+ (_param_size - rounded_size),
169
+ (dev_params != nullptr ? (dev_params + rounded_size) : dev_params),
170
+ half_precision);
171
+ }
172
+
173
+ int ds_adagrad_step(int optimizer_id,
174
+ size_t step,
175
+ float lr,
176
+ float epsilon,
177
+ float weight_decay,
178
+ torch::Tensor& params,
179
+ torch::Tensor& grads,
180
+ torch::Tensor& exp_avg_sq)
181
+ {
182
+ auto params_c = params.contiguous();
183
+ auto grads_c = grads.contiguous();
184
+ auto exp_avg_sq_c = exp_avg_sq.contiguous();
185
+
186
+ float* params_ptr = (float*)params_c.data_ptr();
187
+ float* grads_ptr = (float*)grads_c.data_ptr();
188
+ float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr();
189
+
190
+ std::shared_ptr<Adagrad_Optimizer> opt =
191
+ std::static_pointer_cast<Adagrad_Optimizer>(s_optimizers[optimizer_id]);
192
+ opt->IncrementStep(step);
193
+ opt->update_state(lr, epsilon, weight_decay);
194
+ opt->Step_8(params_ptr, grads_ptr, exp_avg_sq_ptr, params_c.numel());
195
+
196
+ #if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__)
197
+ opt->SynchronizeStreams();
198
+ #endif
199
+ return 0;
200
+ }
201
+
202
+ int ds_adagrad_step_plus_copy(int optimizer_id,
203
+ size_t step,
204
+ float lr,
205
+ float epsilon,
206
+ float weight_decay,
207
+ torch::Tensor& params,
208
+ torch::Tensor& grads,
209
+ torch::Tensor& exp_avg_sq,
210
+ torch::Tensor& gpu_params)
211
+ {
212
+ #if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__)
213
+ auto params_c = params.contiguous();
214
+ auto gpu_params_c = gpu_params.contiguous();
215
+ auto exp_avg_sq_c = exp_avg_sq.contiguous();
216
+ auto grads_c = grads.contiguous();
217
+
218
+ float* params_ptr = (float*)params_c.data_ptr();
219
+ float* grads_ptr = (float*)grads_c.data_ptr();
220
+ ds_half_precision_t* gpu_params_ptr = (ds_half_precision_t*)gpu_params_c.data_ptr();
221
+ float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr();
222
+
223
+ std::shared_ptr<Adagrad_Optimizer> opt =
224
+ std::static_pointer_cast<Adagrad_Optimizer>(s_optimizers[optimizer_id]);
225
+ opt->IncrementStep(step);
226
+ opt->update_state(lr, epsilon, weight_decay);
227
+ opt->Step_8(params_ptr,
228
+ grads_ptr,
229
+ exp_avg_sq_ptr,
230
+ params_c.numel(),
231
+ gpu_params_ptr,
232
+ (params.options().dtype() == at::kHalf));
233
+
234
+ opt->SynchronizeStreams();
235
+ #else
236
+ assert(false);
237
+ #endif
238
+ return 0;
239
+ }
240
+
241
+ int destroy_adagrad_optimizer(int optimizer_id)
242
+ {
243
+ s_optimizers.erase(optimizer_id);
244
+
245
+ return 0;
246
+ }
247
+
248
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
249
+ {
250
+ m.def("adagrad_update", &ds_adagrad_step, "DeepSpeed CPU Adagrad update (C++)");
251
+ m.def("adagrad_update_copy",
252
+ &ds_adagrad_step_plus_copy,
253
+ "DeepSpeed CPU Adagrad update and param copy (C++)");
254
+ m.def("create_adagrad", &create_adagrad_optimizer, "DeepSpeed CPU Adagrad (C++)");
255
+ m.def("destroy_adagrad", &destroy_adagrad_optimizer, "DeepSpeed CPU Adagrad destroy (C++)");
256
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/cpu_adam.cpp ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "cpu_adam.h"
7
+
8
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
9
+ {
10
+ m.def("adam_update", &ds_adam_step, "DeepSpeed CPU Adam update (C++)");
11
+ m.def("adam_update_copy",
12
+ &ds_adam_step_plus_copy,
13
+ "DeepSpeed CPU Adam update and param copy (C++)");
14
+ m.def("create_adam", &create_adam_optimizer, "DeepSpeed CPU Adam (C++)");
15
+ m.def("destroy_adam", &destroy_adam_optimizer, "DeepSpeed CPU Adam destroy (C++)");
16
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/cpu_adam_impl.cpp ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <torch/extension.h>
7
+ #include <cassert>
8
+ #include <iostream>
9
+ #include <memory>
10
+ #include <type_traits>
11
+ #include <unordered_map>
12
+ #include "cpu_adam.h"
13
+
14
+ #if defined(__ENABLE_CUDA__)
15
+ #include <cuda_runtime_api.h>
16
+ #include "cublas_v2.h"
17
+ #include "cuda.h"
18
+ #include "curand.h"
19
+ #include "custom_cuda_layers.h"
20
+ #endif
21
+
22
+ static std::unordered_map<int, std::shared_ptr<void>> s_optimizers;
23
+
24
+ // C++ interface
25
+
26
+ void Adam_Optimizer::Step_1(float* _params,
27
+ float* grads,
28
+ float* _exp_avg,
29
+ float* _exp_avg_sq,
30
+ size_t _param_size,
31
+ ds_half_precision_t* dev_params,
32
+ bool half_precision)
33
+ {
34
+ size_t rounded_size = 0;
35
+ #if defined(__AVX512__) or defined(__AVX256__)
36
+ Step_AVX<1>(&rounded_size,
37
+ _params,
38
+ grads,
39
+ _exp_avg,
40
+ _exp_avg_sq,
41
+ _param_size,
42
+ dev_params,
43
+ half_precision);
44
+ #endif
45
+ if (_param_size > rounded_size) {
46
+ float betta1_minus1 = 1 - _betta1;
47
+ float betta2_minus1 = 1 - _betta2;
48
+
49
+ float step_size = -1 * _alpha / _bias_correction1;
50
+ float w_decay = -1 * _alpha * _weight_decay;
51
+ ds_half_precision_t* grads_cast_h;
52
+ ds_half_precision_t* params_cast_h;
53
+ if (half_precision) {
54
+ grads_cast_h = reinterpret_cast<ds_half_precision_t*>(grads);
55
+ params_cast_h = reinterpret_cast<ds_half_precision_t*>(_params);
56
+ }
57
+
58
+ for (size_t t = rounded_size; t < _param_size; t += TILE) {
59
+ size_t copy_size = TILE;
60
+ if ((t + TILE) > _param_size) copy_size = _param_size - t;
61
+ size_t offset = copy_size + t;
62
+ #if defined(__ENABLE_CUDA__)
63
+ if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); }
64
+ #elif defined(__ENABLE_CANN__)
65
+ if ((t / TILE) >= 2) { aclrtSynchronizeStream(_streams[_buf_index].stream()); }
66
+ #endif
67
+ #pragma omp parallel for
68
+ for (size_t k = t; k < offset; k++) {
69
+ float grad = half_precision ? (float)grads_cast_h[k] : grads[k];
70
+ float param = half_precision ? (float)params_cast_h[k] : _params[k];
71
+ float momentum = _exp_avg[k];
72
+ float variance = _exp_avg_sq[k];
73
+ if (_weight_decay > 0 && !_adamw_mode) { grad = param * _weight_decay + grad; }
74
+ momentum = momentum * _betta1;
75
+ momentum = grad * betta1_minus1 + momentum;
76
+
77
+ variance = variance * _betta2;
78
+ grad = grad * grad;
79
+ variance = grad * betta2_minus1 + variance;
80
+
81
+ grad = sqrt(variance);
82
+ grad = grad * _bias_correction2 + _eps;
83
+ grad = momentum / grad;
84
+ if (_weight_decay > 0 && _adamw_mode) { param += w_decay * param; }
85
+ param = grad * step_size + param;
86
+ #if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__)
87
+ if (dev_params) _doubled_buffer[_buf_index][k - t] = param;
88
+ #endif
89
+ if (half_precision)
90
+ params_cast_h[k] = (ds_half_precision_t)param;
91
+ else
92
+ _params[k] = param;
93
+ _exp_avg[k] = momentum;
94
+ _exp_avg_sq[k] = variance;
95
+ }
96
+ #if defined(__ENABLE_CUDA__)
97
+ if (dev_params) {
98
+ launch_param_update(
99
+ _doubled_buffer[_buf_index], dev_params + t, (copy_size), _streams[_buf_index]);
100
+
101
+ _buf_index = !_buf_index;
102
+ }
103
+ #elif defined(__ENABLE_CANN__)
104
+ if (dev_params) {
105
+ size_t memcpy_size = copy_size * sizeof(_doubled_buffer[_buf_index][0]);
106
+ aclrtMemcpy(dev_params + t,
107
+ memcpy_size,
108
+ _doubled_buffer[_buf_index],
109
+ memcpy_size,
110
+ aclrtMemcpyKind::ACL_MEMCPY_HOST_TO_DEVICE);
111
+
112
+ _buf_index = !_buf_index;
113
+ }
114
+ #endif
115
+ }
116
+ }
117
+ }
118
+
119
+ void Adam_Optimizer::Step_4(float* _params,
120
+ float* grads,
121
+ float* _exp_avg,
122
+ float* _exp_avg_sq,
123
+ size_t _param_size,
124
+ ds_half_precision_t* dev_params,
125
+ bool half_precision)
126
+ {
127
+ size_t rounded_size = 0;
128
+ #if defined(__AVX512__) or defined(__AVX256__)
129
+ Step_AVX<4>(&rounded_size,
130
+ _params,
131
+ grads,
132
+ _exp_avg,
133
+ _exp_avg_sq,
134
+ _param_size,
135
+ dev_params,
136
+ half_precision);
137
+ #endif
138
+ if (_param_size > rounded_size)
139
+ Step_1((_params + rounded_size),
140
+ (grads + rounded_size),
141
+ (_exp_avg + rounded_size),
142
+ (_exp_avg_sq + rounded_size),
143
+ (_param_size - rounded_size),
144
+ (dev_params != nullptr ? (dev_params + rounded_size) : dev_params),
145
+ half_precision);
146
+ }
147
+
148
+ int create_adam_optimizer(int optimizer_id,
149
+ float alpha,
150
+ float betta1,
151
+ float betta2,
152
+ float eps,
153
+ float weight_decay,
154
+ bool adamw_mode,
155
+ bool should_log)
156
+ {
157
+ auto opt =
158
+ std::make_shared<Adam_Optimizer>(alpha, betta1, betta2, eps, weight_decay, adamw_mode);
159
+
160
+ s_optimizers[optimizer_id] = opt;
161
+
162
+ if (should_log) {
163
+ std::string avx_type = "";
164
+ #if defined(__AVX512__)
165
+ avx_type = "AVX512";
166
+ #else
167
+ #if defined(__AVX256__)
168
+ avx_type = "AVX2";
169
+ #else
170
+ avx_type = "scalar";
171
+ #endif
172
+ #endif
173
+
174
+ printf("Adam Optimizer #%d is created with %s arithmetic capability.\n",
175
+ optimizer_id,
176
+ avx_type.c_str());
177
+ printf("Config: alpha=%f, betas=(%f, %f), weight_decay=%f, adam_w=%d\n",
178
+ alpha,
179
+ betta1,
180
+ betta2,
181
+ weight_decay,
182
+ (int)adamw_mode);
183
+ }
184
+
185
+ return 0;
186
+ }
187
+
188
+ void Adam_Optimizer::Step_8(float* _params,
189
+ float* grads,
190
+ float* _exp_avg,
191
+ float* _exp_avg_sq,
192
+ size_t _param_size,
193
+ ds_half_precision_t* dev_params,
194
+ bool half_precision)
195
+ {
196
+ size_t rounded_size = 0;
197
+ #if defined(__AVX512__) or defined(__AVX256__)
198
+ Step_AVX<8>(&rounded_size,
199
+ _params,
200
+ grads,
201
+ _exp_avg,
202
+ _exp_avg_sq,
203
+ _param_size,
204
+ dev_params,
205
+ half_precision);
206
+ #endif
207
+ if (_param_size > rounded_size)
208
+ Step_4((_params + rounded_size),
209
+ (grads + rounded_size),
210
+ (_exp_avg + rounded_size),
211
+ (_exp_avg_sq + rounded_size),
212
+ (_param_size - rounded_size),
213
+ (dev_params != nullptr ? (dev_params + rounded_size) : dev_params),
214
+ half_precision);
215
+ }
216
+
217
+ int ds_adam_step(int optimizer_id,
218
+ size_t step,
219
+ float lr,
220
+ float beta1,
221
+ float beta2,
222
+ float epsilon,
223
+ float weight_decay,
224
+ bool bias_correction,
225
+ torch::Tensor& params,
226
+ torch::Tensor& grads,
227
+ torch::Tensor& exp_avg,
228
+ torch::Tensor& exp_avg_sq)
229
+ {
230
+ auto params_c = params.contiguous();
231
+ auto grads_c = grads.contiguous();
232
+ auto exp_avg_c = exp_avg.contiguous();
233
+ auto exp_avg_sq_c = exp_avg_sq.contiguous();
234
+
235
+ // assert(params.options().dtype() == grads.options().dtype());
236
+
237
+ float* params_ptr = (float*)params_c.data_ptr();
238
+ float* grads_ptr = (float*)grads_c.data_ptr();
239
+ float* exp_avg_ptr = (float*)exp_avg_c.data_ptr();
240
+ float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr();
241
+
242
+ std::shared_ptr<Adam_Optimizer> opt =
243
+ std::static_pointer_cast<Adam_Optimizer>(s_optimizers[optimizer_id]);
244
+ opt->IncrementStep(step, beta1, beta2);
245
+ opt->update_state(lr, epsilon, weight_decay, bias_correction);
246
+
247
+ opt->Step_8(params_ptr,
248
+ grads_ptr,
249
+ exp_avg_ptr,
250
+ exp_avg_sq_ptr,
251
+ params_c.numel(),
252
+ nullptr,
253
+ (params.options().dtype() == at::kHalf));
254
+
255
+ #if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__)
256
+ opt->SynchronizeStreams();
257
+ #endif
258
+ return 0;
259
+ }
260
+
261
+ int ds_adam_step_plus_copy(int optimizer_id,
262
+ size_t step,
263
+ float lr,
264
+ float beta1,
265
+ float beta2,
266
+ float epsilon,
267
+ float weight_decay,
268
+ bool bias_correction,
269
+ torch::Tensor& params,
270
+ torch::Tensor& grads,
271
+ torch::Tensor& exp_avg,
272
+ torch::Tensor& exp_avg_sq,
273
+ torch::Tensor& device_params)
274
+ {
275
+ #if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__)
276
+ auto params_c = params.contiguous();
277
+ auto device_params_c = device_params.contiguous();
278
+ auto exp_avg_c = exp_avg.contiguous();
279
+ auto exp_avg_sq_c = exp_avg_sq.contiguous();
280
+ auto grads_c = grads.contiguous();
281
+
282
+ float* params_ptr = (float*)params_c.data_ptr();
283
+ float* grads_ptr = (float*)grads_c.data_ptr();
284
+ ds_half_precision_t* device_params_ptr = (ds_half_precision_t*)device_params_c.data_ptr();
285
+ float* exp_avg_ptr = (float*)exp_avg_c.data_ptr();
286
+ float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr();
287
+
288
+ std::shared_ptr<Adam_Optimizer> opt =
289
+ std::static_pointer_cast<Adam_Optimizer>(s_optimizers[optimizer_id]);
290
+ opt->IncrementStep(step, beta1, beta2);
291
+ opt->update_state(lr, epsilon, weight_decay, bias_correction);
292
+ opt->Step_8(params_ptr,
293
+ grads_ptr,
294
+ exp_avg_ptr,
295
+ exp_avg_sq_ptr,
296
+ params_c.numel(),
297
+ device_params_ptr,
298
+ (params.options().dtype() == at::kHalf));
299
+
300
+ opt->SynchronizeStreams();
301
+ #else
302
+ assert(false);
303
+ #endif
304
+ return 0;
305
+ }
306
+
307
+ int destroy_adam_optimizer(int optimizer_id)
308
+ {
309
+ s_optimizers.erase(optimizer_id);
310
+
311
+ return 0;
312
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/fused_adam_frontend.cpp ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <torch/extension.h>
7
+
8
+ void multi_tensor_adam_cuda(int chunk_size,
9
+ at::Tensor noop_flag,
10
+ std::vector<std::vector<at::Tensor>> tensor_lists,
11
+ const float lr,
12
+ const float beta1,
13
+ const float beta2,
14
+ const float epsilon,
15
+ const int step,
16
+ const int mode,
17
+ const int bias_correction,
18
+ const float weight_decay);
19
+
20
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
21
+ {
22
+ m.def("multi_tensor_adam",
23
+ &multi_tensor_adam_cuda,
24
+ "Compute and apply gradient update to parameters for Adam optimizer");
25
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/multi_tensor_adam.cu ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Copyright NVIDIA/apex
8
+ This file is adapted from fused adam in NVIDIA/apex, commit a109f85
9
+ */
10
+
11
+ #include <ATen/ATen.h>
12
+ #include <ATen/AccumulateType.h>
13
+ #include <ATen/cuda/CUDAContext.h>
14
+ #include <ATen/cuda/Exceptions.h>
15
+ // Another possibility:
16
+ // #include <torch/all.h>
17
+
18
+ #include <assert.h>
19
+
20
+ #include "multi_tensor_apply.cuh"
21
+ #include "type_shim.h"
22
+
23
+ #define BLOCK_SIZE 512
24
+ #define ILP 4
25
+
26
+ typedef enum : int {
27
+ ADAM_MODE_0 = 0, // L2 regularization mode
28
+ ADAM_MODE_1 = 1 // Decoupled weight decay mode(AdamW)
29
+ } adamMode_t;
30
+
31
+ using MATH_T = float;
32
+
33
+ template <typename T, typename index_t>
34
+ struct AdamFunctor {
35
+ __device__ __forceinline__ void operator()(int chunk_size,
36
+ volatile int* noop_gmem,
37
+ TensorListMetadata<4>& tl,
38
+ const float beta1,
39
+ const float beta2,
40
+ const float beta1_correction,
41
+ const float beta2_correction,
42
+ const float epsilon,
43
+ const float lr,
44
+ adamMode_t mode,
45
+ const float decay)
46
+ {
47
+ // I'd like this kernel to propagate infs/nans.
48
+ // if(*noop_gmem == 1)
49
+ // return;
50
+
51
+ index_t tensor_loc = tl.block_to_tensor[blockIdx.x];
52
+
53
+ // potentially use to pass in list of scalar
54
+ // int tensor_num = tl.start_tensor_this_launch + tensor_loc;
55
+
56
+ index_t chunk_idx = tl.block_to_chunk[blockIdx.x];
57
+ index_t n = tl.sizes[tensor_loc];
58
+
59
+ T* g = (T*)tl.addresses[0][tensor_loc];
60
+ g += chunk_idx * chunk_size;
61
+
62
+ T* p = (T*)tl.addresses[1][tensor_loc];
63
+ p += chunk_idx * chunk_size;
64
+
65
+ T* m = (T*)tl.addresses[2][tensor_loc];
66
+ m += chunk_idx * chunk_size;
67
+
68
+ T* v = (T*)tl.addresses[3][tensor_loc];
69
+ v += chunk_idx * chunk_size;
70
+
71
+ n -= chunk_idx * chunk_size;
72
+
73
+ // see note in multi_tensor_scale_kernel.cu
74
+ for (index_t i_start = 0; i_start < n && i_start < chunk_size;
75
+ i_start += blockDim.x * ILP) {
76
+ MATH_T r_g[ILP];
77
+ MATH_T r_p[ILP];
78
+ MATH_T r_m[ILP];
79
+ MATH_T r_v[ILP];
80
+ #pragma unroll
81
+ for (int ii = 0; ii < ILP; ii++) {
82
+ int i = i_start + threadIdx.x + ii * blockDim.x;
83
+ if (i < n && i < chunk_size) {
84
+ r_g[ii] = g[i];
85
+ r_p[ii] = p[i];
86
+ r_m[ii] = m[i];
87
+ r_v[ii] = v[i];
88
+ } else {
89
+ r_g[ii] = MATH_T(0);
90
+ r_p[ii] = MATH_T(0);
91
+ r_m[ii] = MATH_T(0);
92
+ r_v[ii] = MATH_T(0);
93
+ }
94
+ }
95
+ #pragma unroll
96
+ for (int ii = 0; ii < ILP; ii++) {
97
+ if (mode == ADAM_MODE_0) { // L2
98
+ r_g[ii] = r_g[ii] + (decay * r_p[ii]);
99
+ r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii];
100
+ r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii];
101
+ MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
102
+ MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
103
+ MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
104
+ MATH_T update = next_m_unbiased / denom;
105
+ r_p[ii] = r_p[ii] - (lr * update);
106
+ } else { // weight decay
107
+ r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii];
108
+ r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii];
109
+ MATH_T next_m_unbiased = r_m[ii] / beta1_correction;
110
+ MATH_T next_v_unbiased = r_v[ii] / beta2_correction;
111
+ MATH_T denom = sqrtf(next_v_unbiased) + epsilon;
112
+ MATH_T update = (next_m_unbiased / denom) + (decay * r_p[ii]);
113
+ r_p[ii] = r_p[ii] - (lr * update);
114
+ }
115
+ }
116
+ #pragma unroll
117
+ for (int ii = 0; ii < ILP; ii++) {
118
+ int i = i_start + threadIdx.x + ii * blockDim.x;
119
+ if (i < n && i < chunk_size) {
120
+ p[i] = r_p[ii];
121
+ m[i] = r_m[ii];
122
+ v[i] = r_v[ii];
123
+ }
124
+ }
125
+ }
126
+ }
127
+ };
128
+
129
+ void multi_tensor_adam_cuda(int chunk_size,
130
+ at::Tensor noop_flag,
131
+ std::vector<std::vector<at::Tensor>> tensor_lists,
132
+ const float lr,
133
+ const float beta1,
134
+ const float beta2,
135
+ const float epsilon,
136
+ const int step,
137
+ const int mode,
138
+ const int bias_correction,
139
+ const float weight_decay)
140
+ {
141
+ using namespace at;
142
+
143
+ // Handle bias correction mode
144
+ float bias_correction1 = 1.0f, bias_correction2 = 1.0f;
145
+ if (bias_correction == 1) {
146
+ bias_correction1 = 1 - std::pow(beta1, step);
147
+ bias_correction2 = 1 - std::pow(beta2, step);
148
+ }
149
+
150
+ size_t max_size = 0;
151
+ bool requires_64bit_indexing = false;
152
+ for (auto it = tensor_lists.begin(); it != tensor_lists.end(); it++) {
153
+ for (auto it2 = it->begin(); it2 != it->end(); it2++) {
154
+ if (it2->numel() > max_size) {
155
+ max_size = it2->numel();
156
+ if (max_size >= INT_MAX) {
157
+ requires_64bit_indexing = true;
158
+ break;
159
+ }
160
+ }
161
+ }
162
+ if (requires_64bit_indexing) { break; }
163
+ }
164
+
165
+ // Assume single type across p,g,m1,m2 now
166
+ if (requires_64bit_indexing) {
167
+ DISPATCH_DOUBLE_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(),
168
+ 0,
169
+ "adam",
170
+ multi_tensor_apply<4>((int64_t)BLOCK_SIZE,
171
+ (int64_t)chunk_size,
172
+ noop_flag,
173
+ tensor_lists,
174
+ AdamFunctor<scalar_t_0, int64_t>(),
175
+ beta1,
176
+ beta2,
177
+ bias_correction1,
178
+ bias_correction2,
179
+ epsilon,
180
+ lr,
181
+ (adamMode_t)mode,
182
+ weight_decay);)
183
+ } else {
184
+ DISPATCH_DOUBLE_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(),
185
+ 0,
186
+ "adam",
187
+ multi_tensor_apply<4>(BLOCK_SIZE,
188
+ chunk_size,
189
+ noop_flag,
190
+ tensor_lists,
191
+ AdamFunctor<scalar_t_0, int32_t>(),
192
+ beta1,
193
+ beta2,
194
+ bias_correction1,
195
+ bias_correction2,
196
+ epsilon,
197
+ lr,
198
+ (adamMode_t)mode,
199
+ weight_decay);)
200
+ }
201
+
202
+ AT_CUDA_CHECK(cudaGetLastError());
203
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/multi_tensor_apply.cuh ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Copyright NVIDIA/apex
8
+ This file is adapted from fused adam in NVIDIA/apex, commit a109f85
9
+ */
10
+
11
+ #include <ATen/ATen.h>
12
+ #include <ATen/AccumulateType.h>
13
+ #include <ATen/cuda/CUDAContext.h>
14
+ #include <ATen/cuda/Exceptions.h>
15
+ #include <c10/cuda/CUDAGuard.h>
16
+ #include "compat.h"
17
+
18
+ #include <assert.h>
19
+
20
+ // #include <iostream>
21
+
22
+ // This header is the one-stop shop for all your multi-tensor apply needs.
23
+
24
+ // TODO: Kernel arg size limit may be <4KB for some other cards (ie Jetson)
25
+ constexpr int depth_to_max_tensors[5] = {110, 64, 48, 36, 30};
26
+ constexpr int depth_to_max_blocks[5] = {320, 320, 320, 320, 320};
27
+
28
+ template <int n>
29
+ struct TensorListMetadata {
30
+ void* addresses[n][depth_to_max_tensors[n - 1]];
31
+ int sizes[depth_to_max_tensors[n - 1]];
32
+ unsigned char block_to_tensor[depth_to_max_blocks[n - 1]];
33
+ int block_to_chunk[depth_to_max_blocks[n - 1]]; // I fear this needs to be a full int.
34
+ int start_tensor_this_launch;
35
+ };
36
+
37
+ template <typename T, typename U, typename... ArgTypes>
38
+ __global__ void multi_tensor_apply_kernel(int64_t chunk_size,
39
+ volatile int* noop_flag,
40
+ T tl,
41
+ U callable,
42
+ ArgTypes... args)
43
+ {
44
+ // Hand the chunk information to the user-supplied functor to process however it likes.
45
+ callable(chunk_size, noop_flag, tl, args...);
46
+ }
47
+
48
+ template <int depth, typename T, typename... ArgTypes>
49
+ void multi_tensor_apply(int64_t block_size,
50
+ int64_t chunk_size,
51
+ const at::Tensor& noop_flag,
52
+ const std::vector<std::vector<at::Tensor>>& tensor_lists,
53
+ T callable,
54
+ ArgTypes... args)
55
+ {
56
+ TORCH_CHECK(tensor_lists.size() == depth, "tensor_lists.size() != depth");
57
+ int len0 = tensor_lists[0].size();
58
+ TORCH_CHECK(len0 > 0, "tensor_lists[0].size() is not > 0");
59
+ auto ref_device = tensor_lists[0][0].device();
60
+ TORCH_CHECK(ref_device.type() == at::kCUDA, "expected input to be on cuda");
61
+ for (int l = 0; l < tensor_lists.size(); l++) // No range-based for because I need indices
62
+ {
63
+ TORCH_CHECK(tensor_lists[l].size() == len0, "Size mismatch among tensor lists");
64
+ for (int t = 0; t < tensor_lists[l].size(); t++) {
65
+ // TODO: Print which tensor fails.
66
+ bool contiguous_memory = tensor_lists[l][t].is_contiguous();
67
+ #ifdef VERSION_GE_1_5
68
+ contiguous_memory = (contiguous_memory ||
69
+ tensor_lists[l][t].is_contiguous(at::MemoryFormat::ChannelsLast));
70
+ #endif
71
+ TORCH_CHECK(contiguous_memory, "A tensor was not contiguous.");
72
+ TORCH_CHECK(tensor_lists[l][t].device() == ref_device,
73
+ "A tensor was not on the same device as the first tensor");
74
+ TORCH_CHECK(tensor_lists[l][t].numel() == tensor_lists[0][t].numel(), "Size mismatch");
75
+ }
76
+ }
77
+
78
+ int ntensors = tensor_lists[0].size();
79
+
80
+ TensorListMetadata<depth> tl;
81
+
82
+ const at::cuda::OptionalCUDAGuard device_guard(device_of(tensor_lists[0][0]));
83
+ auto stream = at::cuda::getCurrentCUDAStream();
84
+
85
+ tl.start_tensor_this_launch = 0;
86
+ int loc_block_info = 0;
87
+ int loc_tensor_info = 0;
88
+ for (int t = 0; t < ntensors; t++) {
89
+ tl.sizes[loc_tensor_info] = tensor_lists[0][t].numel();
90
+ for (int d = 0; d < depth; d++)
91
+ tl.addresses[d][loc_tensor_info] = tensor_lists[d][t].data_ptr();
92
+ loc_tensor_info++;
93
+
94
+ auto chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1) / chunk_size;
95
+
96
+ for (auto chunk = 0; chunk < chunks_this_tensor; chunk++) {
97
+ // std::cout << chunks_this_tensor << std::endl;
98
+ tl.block_to_tensor[loc_block_info] = loc_tensor_info - 1;
99
+ tl.block_to_chunk[loc_block_info] = chunk;
100
+ loc_block_info++;
101
+
102
+ bool tensors_full = (loc_tensor_info == depth_to_max_tensors[depth - 1] &&
103
+ chunk == chunks_this_tensor - 1);
104
+ bool blocks_full = (loc_block_info == depth_to_max_blocks[depth - 1]);
105
+ bool last_chunk = (t == ntensors - 1 && chunk == chunks_this_tensor - 1);
106
+ if (tensors_full || blocks_full || last_chunk) {
107
+ // using accscalar_t = acc_type<scalar_t, true>;
108
+ multi_tensor_apply_kernel<<<loc_block_info, block_size, 0, stream>>>(
109
+ chunk_size, noop_flag.DATA_PTR<int>(), tl, callable, args...);
110
+
111
+ AT_CUDA_CHECK(cudaGetLastError());
112
+
113
+ // Reset. The control flow possibilities here make my brain hurt.
114
+ loc_block_info = 0;
115
+ if (chunk == chunks_this_tensor - 1) {
116
+ // std::cout << "Hit case 1 " << cond1 << " " << cond2 << " " << cond3 <<
117
+ // std::endl;
118
+ loc_tensor_info = 0;
119
+ tl.start_tensor_this_launch = t + 1;
120
+ } else {
121
+ // std::cout << "Hit case 2 " << cond1 << " " << cond2 << " " << cond3 <<
122
+ // std::endl;
123
+ tl.sizes[0] = tl.sizes[loc_tensor_info - 1];
124
+ for (int d = 0; d < depth; d++)
125
+ tl.addresses[d][0] = tl.addresses[d][loc_tensor_info - 1];
126
+ loc_tensor_info = 1;
127
+ tl.start_tensor_this_launch = t;
128
+ }
129
+ }
130
+ }
131
+ }
132
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.cpp ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
8
+ */
9
+
10
+ #include <assert.h>
11
+ #include <stdlib.h>
12
+ #include <string.h>
13
+
14
+ #include <fcntl.h>
15
+ #include <libaio.h>
16
+ #include <sys/mman.h>
17
+ #include <sys/stat.h>
18
+ #include <sys/types.h>
19
+ #include <time.h>
20
+ #include <unistd.h>
21
+
22
+ #include <algorithm>
23
+ #include <chrono>
24
+ #include <cmath>
25
+ #include <cstring>
26
+ #include <fstream>
27
+ #include <iostream>
28
+ #include <limits>
29
+ #include <numeric>
30
+ #include <string>
31
+ #include <vector>
32
+
33
+ #include "deepspeed_aio_common.h"
34
+
35
+ using namespace std;
36
+ using namespace std::chrono;
37
+
38
+ #define DEBUG_DS_AIO_PERF 0
39
+ #define DEBUG_DS_AIO_SUBMIT_PERF 0
40
+
41
+ static const std::string c_library_name = "deepspeed_aio";
42
+
43
+ static void _report_aio_statistics(const char* tag,
44
+ const std::vector<std::chrono::duration<double>>& latencies)
45
+ __attribute__((unused));
46
+
47
+ static void _report_aio_statistics(const char* tag,
48
+ const std::vector<std::chrono::duration<double>>& latencies)
49
+ {
50
+ std::vector<double> lat_usec;
51
+ for (auto& lat : latencies) { lat_usec.push_back(lat.count() * 1e6); }
52
+ const auto min_lat = *(std::min_element(lat_usec.begin(), lat_usec.end()));
53
+ const auto max_lat = *(std::max_element(lat_usec.begin(), lat_usec.end()));
54
+ const auto avg_lat = std::accumulate(lat_usec.begin(), lat_usec.end(), 0) / lat_usec.size();
55
+
56
+ std::cout << c_library_name << ": latency statistics(usec) " << tag
57
+ << " min/max/avg = " << min_lat << " " << max_lat << " " << avg_lat << std::endl;
58
+ }
59
+
60
+ static void _get_aio_latencies(std::vector<std::chrono::duration<double>>& raw_latencies,
61
+ struct deepspeed_aio_latency_t& summary_latencies)
62
+ {
63
+ std::vector<double> lat_usec;
64
+ for (auto& lat : raw_latencies) { lat_usec.push_back(lat.count() * 1e6); }
65
+ summary_latencies._min_usec = *(std::min_element(lat_usec.begin(), lat_usec.end()));
66
+ summary_latencies._max_usec = *(std::max_element(lat_usec.begin(), lat_usec.end()));
67
+ summary_latencies._avg_usec =
68
+ std::accumulate(lat_usec.begin(), lat_usec.end(), 0) / lat_usec.size();
69
+ }
70
+
71
+ static void _do_io_submit_singles(const long long int n_iocbs,
72
+ const long long int iocb_index,
73
+ std::unique_ptr<aio_context>& aio_ctxt,
74
+ std::vector<std::chrono::duration<double>>& submit_times)
75
+ {
76
+ for (auto i = 0; i < n_iocbs; ++i) {
77
+ const auto st = std::chrono::high_resolution_clock::now();
78
+ const auto submit_ret = io_submit(aio_ctxt->_io_ctxt, 1, aio_ctxt->_iocbs.data() + i);
79
+ submit_times.push_back(std::chrono::high_resolution_clock::now() - st);
80
+ #if DEBUG_DS_AIO_SUBMIT_PERF
81
+ printf("submit(usec) %f io_index=%lld buf=%p len=%lu off=%llu \n",
82
+ submit_times.back().count() * 1e6,
83
+ iocb_index,
84
+ aio_ctxt->_iocbs[i]->u.c.buf,
85
+ aio_ctxt->_iocbs[i]->u.c.nbytes,
86
+ aio_ctxt->_iocbs[i]->u.c.offset);
87
+ #endif
88
+ assert(submit_ret > 0);
89
+ }
90
+ }
91
+
92
+ static void _do_io_submit_block(const long long int n_iocbs,
93
+ const long long int iocb_index,
94
+ std::unique_ptr<aio_context>& aio_ctxt,
95
+ std::vector<std::chrono::duration<double>>& submit_times)
96
+ {
97
+ const auto st = std::chrono::high_resolution_clock::now();
98
+ const auto submit_ret = io_submit(aio_ctxt->_io_ctxt, n_iocbs, aio_ctxt->_iocbs.data());
99
+ submit_times.push_back(std::chrono::high_resolution_clock::now() - st);
100
+ #if DEBUG_DS_AIO_SUBMIT_PERF
101
+ printf("submit(usec) %f io_index=%lld nr=%lld buf=%p len=%lu off=%llu \n",
102
+ submit_times.back().count() * 1e6,
103
+ iocb_index,
104
+ n_iocbs,
105
+ aio_ctxt->_iocbs[0]->u.c.buf,
106
+ aio_ctxt->_iocbs[0]->u.c.nbytes,
107
+ aio_ctxt->_iocbs[0]->u.c.offset);
108
+ #endif
109
+ assert(submit_ret > 0);
110
+ }
111
+
112
+ static int _do_io_complete(const long long int min_completes,
113
+ const long long int max_completes,
114
+ std::unique_ptr<aio_context>& aio_ctxt,
115
+ std::vector<std::chrono::duration<double>>& reap_times)
116
+ {
117
+ const auto start_time = std::chrono::high_resolution_clock::now();
118
+ long long int n_completes = io_pgetevents(aio_ctxt->_io_ctxt,
119
+ min_completes,
120
+ max_completes,
121
+ aio_ctxt->_io_events.data(),
122
+ nullptr,
123
+ nullptr);
124
+ reap_times.push_back(std::chrono::high_resolution_clock::now() - start_time);
125
+ assert(n_completes >= min_completes);
126
+ return n_completes;
127
+ }
128
+
129
+ void do_aio_operation_sequential(const bool read_op,
130
+ std::unique_ptr<aio_context>& aio_ctxt,
131
+ std::unique_ptr<io_xfer_ctxt>& xfer_ctxt,
132
+ deepspeed_aio_config_t* config,
133
+ deepspeed_aio_perf_t* perf)
134
+ {
135
+ struct io_prep_context prep_ctxt(read_op, xfer_ctxt, aio_ctxt->_block_size, &aio_ctxt->_iocbs);
136
+
137
+ const auto num_io_blocks = static_cast<long long int>(
138
+ ceil(static_cast<double>(xfer_ctxt->_num_bytes) / aio_ctxt->_block_size));
139
+ #if DEBUG_DS_AIO_PERF
140
+ const auto io_op_name = std::string(read_op ? "read" : "write");
141
+ std::cout << c_library_name << ": start " << io_op_name << " " << xfer_ctxt->_num_bytes
142
+ << " bytes with " << num_io_blocks << " io blocks" << std::endl;
143
+ #endif
144
+
145
+ std::vector<std::chrono::duration<double>> submit_times;
146
+ std::vector<std::chrono::duration<double>> reap_times;
147
+ const auto max_queue_bytes =
148
+ static_cast<long long int>(aio_ctxt->_queue_depth * aio_ctxt->_block_size);
149
+
150
+ auto start = std::chrono::high_resolution_clock::now();
151
+ for (long long iocb_index = 0; iocb_index < num_io_blocks;
152
+ iocb_index += aio_ctxt->_queue_depth) {
153
+ const auto start_offset = iocb_index * aio_ctxt->_block_size;
154
+ const auto start_buffer = (char*)xfer_ctxt->_mem_buffer + start_offset;
155
+ const auto n_iocbs =
156
+ min(static_cast<long long>(aio_ctxt->_queue_depth), (num_io_blocks - iocb_index));
157
+ const auto num_bytes = min(max_queue_bytes, (xfer_ctxt->_num_bytes - start_offset));
158
+ prep_ctxt.prep_iocbs(n_iocbs, num_bytes, start_buffer, start_offset);
159
+
160
+ if (config->_single_submit) {
161
+ _do_io_submit_singles(n_iocbs, iocb_index, aio_ctxt, submit_times);
162
+ } else {
163
+ _do_io_submit_block(n_iocbs, iocb_index, aio_ctxt, submit_times);
164
+ }
165
+
166
+ _do_io_complete(n_iocbs, n_iocbs, aio_ctxt, reap_times);
167
+ }
168
+ const std::chrono::duration<double> elapsed = std::chrono::high_resolution_clock::now() - start;
169
+
170
+ if (perf) {
171
+ _get_aio_latencies(submit_times, perf->_submit);
172
+ _get_aio_latencies(reap_times, perf->_complete);
173
+ perf->_e2e_usec = elapsed.count() * 1e6;
174
+ perf->_e2e_rate_GB = (xfer_ctxt->_num_bytes / elapsed.count() / 1e9);
175
+ }
176
+
177
+ #if DEBUG_DS_AIO_PERF
178
+ _report_aio_statistics("submit", submit_times);
179
+ _report_aio_statistics("complete", reap_times);
180
+ #endif
181
+
182
+ #if DEBUG_DS_AIO_PERF
183
+ std::cout << c_library_name << ": runtime(usec) " << elapsed.count() * 1e6
184
+ << " rate(GB/sec) = " << (xfer_ctxt->_num_bytes / elapsed.count() / 1e9) << std::endl;
185
+ #endif
186
+
187
+ #if DEBUG_DS_AIO_PERF
188
+ std::cout << c_library_name << ": finish " << io_op_name << " " << xfer_ctxt->_num_bytes
189
+ << " bytes " << std::endl;
190
+ #endif
191
+ }
192
+
193
+ void do_aio_operation_overlap(const bool read_op,
194
+ std::unique_ptr<aio_context>& aio_ctxt,
195
+ std::unique_ptr<io_xfer_ctxt>& xfer_ctxt,
196
+ deepspeed_aio_config_t* config,
197
+ deepspeed_aio_perf_t* perf)
198
+ {
199
+ struct io_prep_generator io_gen(read_op, xfer_ctxt, aio_ctxt->_block_size);
200
+
201
+ #if DEBUG_DS_AIO_PERF
202
+ const auto io_op_name = std::string(read_op ? "read" : "write");
203
+ std::cout << c_library_name << ": start " << io_op_name << " " << xfer_ctxt->_num_bytes
204
+ << " bytes with " << io_gen._num_io_blocks << " io blocks" << std::endl;
205
+ #endif
206
+
207
+ std::vector<std::chrono::duration<double>> submit_times;
208
+ std::vector<std::chrono::duration<double>> reap_times;
209
+
210
+ auto request_iocbs = aio_ctxt->_queue_depth;
211
+ auto n_pending_iocbs = 0;
212
+ const auto min_completes = 1;
213
+ auto start = std::chrono::high_resolution_clock::now();
214
+ while (true) {
215
+ const auto n_iocbs = io_gen.prep_iocbs(request_iocbs - n_pending_iocbs, &aio_ctxt->_iocbs);
216
+ if (n_iocbs > 0) {
217
+ if (config->_single_submit) {
218
+ _do_io_submit_singles(
219
+ n_iocbs, (io_gen._next_iocb_index - n_iocbs), aio_ctxt, submit_times);
220
+ } else {
221
+ _do_io_submit_block(
222
+ n_iocbs, (io_gen._next_iocb_index - n_iocbs), aio_ctxt, submit_times);
223
+ }
224
+ }
225
+
226
+ n_pending_iocbs += n_iocbs;
227
+ assert(n_pending_iocbs <= aio_ctxt->_queue_depth);
228
+
229
+ if (n_pending_iocbs == 0) { break; }
230
+
231
+ const auto n_complete =
232
+ _do_io_complete(min_completes, n_pending_iocbs, aio_ctxt, reap_times);
233
+ n_pending_iocbs -= n_complete;
234
+ }
235
+
236
+ const std::chrono::duration<double> elapsed = std::chrono::high_resolution_clock::now() - start;
237
+
238
+ if (perf) {
239
+ _get_aio_latencies(submit_times, perf->_submit);
240
+ _get_aio_latencies(reap_times, perf->_complete);
241
+ perf->_e2e_usec = elapsed.count() * 1e6;
242
+ perf->_e2e_rate_GB = (xfer_ctxt->_num_bytes / elapsed.count() / 1e9);
243
+ }
244
+
245
+ #if DEBUG_DS_AIO_PERF
246
+ _report_aio_statistics("submit", submit_times);
247
+ _report_aio_statistics("complete", reap_times);
248
+ #endif
249
+
250
+ #if DEBUG_DS_AIO_PERF
251
+ std::cout << c_library_name << ": runtime(usec) " << elapsed.count() * 1e6
252
+ << " rate(GB/sec) = " << (xfer_ctxt->_num_bytes / elapsed.count() / 1e9) << std::endl;
253
+ #endif
254
+
255
+ #if DEBUG_DS_AIO_PERF
256
+ std::cout << c_library_name << ": finish " << io_op_name << " " << xfer_ctxt->_num_bytes
257
+ << " bytes " << std::endl;
258
+ #endif
259
+ }
260
+
261
+ void report_file_error(const char* filename, const std::string file_op, const int error_code)
262
+ {
263
+ std::string err_msg = file_op + std::string(" failed on ") + std::string(filename) +
264
+ " error = " + std::to_string(error_code);
265
+ std::cerr << c_library_name << ": " << err_msg << std::endl;
266
+ }
267
+
268
+ int open_file(const char* filename, const bool read_op)
269
+ {
270
+ const int flags = read_op ? (O_RDONLY | O_DIRECT) : (O_WRONLY | O_CREAT | O_DIRECT);
271
+ #if defined(__ENABLE_CANN__)
272
+ int* flags_ptr = (int*)&flags;
273
+ *flags_ptr = read_op ? (O_RDONLY) : (O_WRONLY | O_CREAT);
274
+ #endif
275
+ const int mode = 0600;
276
+ const auto fd = open(filename, flags, mode);
277
+ if (fd == -1) {
278
+ const auto error_code = errno;
279
+ const auto error_msg = read_op ? " open for read " : " open for write ";
280
+ report_file_error(filename, error_msg, error_code);
281
+ return -1;
282
+ }
283
+ return fd;
284
+ }
285
+
286
+ int regular_read(const char* filename, std::vector<char>& buffer)
287
+ {
288
+ long long int num_bytes;
289
+ const auto f_size = get_file_size(filename, num_bytes);
290
+ assert(f_size != -1);
291
+ buffer.resize(num_bytes);
292
+ const auto fd = open(filename, O_RDONLY, 0600);
293
+ assert(fd != -1);
294
+ long long int read_bytes = 0;
295
+ auto r = 0;
296
+ do {
297
+ const auto buffer_ptr = buffer.data() + read_bytes;
298
+ const auto bytes_to_read = num_bytes - read_bytes;
299
+ r = read(fd, buffer_ptr, bytes_to_read);
300
+ read_bytes += r;
301
+ } while (r > 0);
302
+
303
+ if (read_bytes != num_bytes) {
304
+ std::cerr << "read error "
305
+ << " read_bytes (read) = " << read_bytes << " num_bytes (fstat) = " << num_bytes
306
+ << std::endl;
307
+ }
308
+ assert(read_bytes == num_bytes);
309
+ close(fd);
310
+ return 0;
311
+ }
312
+
313
+ static bool _validate_buffer(const char* filename, void* aio_buffer, const long long int num_bytes)
314
+ {
315
+ std::vector<char> regular_buffer;
316
+ const auto reg_ret = regular_read(filename, regular_buffer);
317
+ assert(0 == reg_ret);
318
+ std::cout << "regular read of " << filename << " returned " << regular_buffer.size() << " bytes"
319
+ << std::endl;
320
+
321
+ if (static_cast<long long int>(regular_buffer.size()) != num_bytes) { return false; }
322
+
323
+ return (0 == memcmp(aio_buffer, regular_buffer.data(), regular_buffer.size()));
324
+ }
325
+
326
+ bool validate_aio_operation(const bool read_op,
327
+ const char* filename,
328
+ void* aio_buffer,
329
+ const long long int num_bytes)
330
+ {
331
+ const auto msg_suffix = std::string("deepspeed_aio_") +
332
+ std::string(read_op ? "read()" : "write()") +
333
+ std::string("using read()");
334
+
335
+ if (false == _validate_buffer(filename, aio_buffer, num_bytes)) {
336
+ std::cout << "Fail: correctness of " << msg_suffix << std::endl;
337
+ return false;
338
+ }
339
+
340
+ std::cout << "Pass: correctness of " << msg_suffix << std::endl;
341
+ return true;
342
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.h ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
8
+ */
9
+
10
+ #include <deepspeed_aio_utils.h>
11
+ #include <stdlib.h>
12
+ #include <memory>
13
+ #include <string>
14
+
15
+ using namespace std;
16
+
17
+ void do_aio_operation_sequential(const bool read_op,
18
+ std::unique_ptr<aio_context>& aio_ctxt,
19
+ std::unique_ptr<io_xfer_ctxt>& xfer_ctxt,
20
+ deepspeed_aio_config_t* config,
21
+ deepspeed_aio_perf_t* perf);
22
+
23
+ void do_aio_operation_overlap(const bool read_op,
24
+ std::unique_ptr<aio_context>& aio_ctxt,
25
+ std::unique_ptr<io_xfer_ctxt>& xfer_ctxt,
26
+ deepspeed_aio_config_t* config,
27
+ deepspeed_aio_perf_t* perf);
28
+
29
+ int open_file(const char* filename, const bool read_op);
30
+
31
+ void report_file_error(const char* filename, const std::string file_op, const int error_code);
32
+
33
+ int regular_read(const char* filename, std::vector<char>& buffer);
34
+
35
+ bool validate_aio_operation(const bool read_op,
36
+ const char* filename,
37
+ void* aio_buffer,
38
+ const long long int num_bytes);
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_types.cpp ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
8
+ */
9
+
10
+ #include <cmath>
11
+
12
+ #include "deepspeed_aio_utils.h"
13
+
14
+ using namespace std;
15
+
16
+ const int c_block_size = 128 * 1024;
17
+ const int c_io_queue_depth = 8;
18
+
19
+ deepspeed_aio_config_t::deepspeed_aio_config_t()
20
+ : _block_size(c_block_size),
21
+ _queue_depth(c_io_queue_depth),
22
+ _single_submit(false),
23
+ _overlap_events(false),
24
+ _lock_memory(false)
25
+ {
26
+ }
27
+
28
+ deepspeed_aio_config_t::deepspeed_aio_config_t(const int block_size,
29
+ const int queue_depth,
30
+ const bool single_submit,
31
+ const bool overlap_events,
32
+ const bool lock_memory)
33
+ : _block_size(block_size),
34
+ _queue_depth(queue_depth),
35
+ _single_submit(single_submit),
36
+ _overlap_events(overlap_events),
37
+ _lock_memory(lock_memory)
38
+ {
39
+ }
40
+
41
+ void deepspeed_aio_latency_t::dump(const std::string tag)
42
+ {
43
+ std::cout << tag << _min_usec << " " << _max_usec << " " << _avg_usec << " " << std::endl;
44
+ }
45
+
46
+ void deepspeed_aio_latency_t::accumulate(const struct deepspeed_aio_latency_t& other)
47
+ {
48
+ _min_usec += other._min_usec;
49
+ _max_usec += other._max_usec;
50
+ _avg_usec += other._avg_usec;
51
+ }
52
+
53
+ void deepspeed_aio_latency_t::scale(const float scaler)
54
+ {
55
+ _min_usec *= scaler;
56
+ _max_usec *= scaler;
57
+ _avg_usec *= scaler;
58
+ }
59
+
60
+ aio_context::aio_context(const int block_size, const int queue_depth)
61
+ {
62
+ _block_size = block_size;
63
+ _queue_depth = queue_depth;
64
+ for (auto i = 0; i < queue_depth; ++i) {
65
+ _iocbs.push_back((struct iocb*)calloc(1, sizeof(struct iocb)));
66
+ }
67
+ _io_events.resize(queue_depth);
68
+ io_queue_init(queue_depth, &_io_ctxt);
69
+ }
70
+
71
+ aio_context::~aio_context()
72
+ {
73
+ for (auto& iocb : _iocbs) { free(iocb); }
74
+ _io_events.resize(0);
75
+ io_queue_release(_io_ctxt);
76
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_types.h ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
8
+ */
9
+
10
+ #include <libaio.h>
11
+ #include <stdlib.h>
12
+
13
+ #include <string>
14
+ #include <vector>
15
+
16
+ using namespace std;
17
+
18
+ struct deepspeed_aio_latency_t {
19
+ double _min_usec;
20
+ double _max_usec;
21
+ double _avg_usec;
22
+
23
+ void dump(const std::string tag);
24
+ void accumulate(const deepspeed_aio_latency_t&);
25
+ void scale(const float value);
26
+ };
27
+
28
+ struct deepspeed_aio_perf_t {
29
+ deepspeed_aio_latency_t _submit;
30
+ deepspeed_aio_latency_t _complete;
31
+ double _e2e_usec;
32
+ double _e2e_rate_GB;
33
+ };
34
+
35
+ struct deepspeed_aio_config_t {
36
+ const int _block_size;
37
+ const int _queue_depth;
38
+ const bool _single_submit;
39
+ const bool _overlap_events;
40
+ const bool _lock_memory;
41
+
42
+ deepspeed_aio_config_t();
43
+ deepspeed_aio_config_t(const int block_size,
44
+ const int queue_depth,
45
+ const bool single_submit,
46
+ const bool overlap_events,
47
+ const bool lock_memory);
48
+ };
49
+
50
+ struct aio_context {
51
+ io_context_t _io_ctxt;
52
+ std::vector<struct io_event> _io_events;
53
+ std::vector<struct iocb*> _iocbs;
54
+ int _block_size;
55
+ int _queue_depth;
56
+
57
+ aio_context(const int block_size, const int queue_depth);
58
+ ~aio_context();
59
+ };
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.cpp ADDED
@@ -0,0 +1,126 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
8
+ */
9
+
10
+ #include <cmath>
11
+ #include <iostream>
12
+
13
+ #include "deepspeed_aio_utils.h"
14
+
15
+ using namespace std;
16
+
17
+ const int c_block_size = 128 * 1024;
18
+ const int c_io_queue_depth = 8;
19
+
20
+ io_xfer_ctxt::io_xfer_ctxt(const int fd,
21
+ const long long int file_offset,
22
+ const long long int num_bytes,
23
+ const void* buffer)
24
+ : _fd(fd), _base_offset(file_offset), _mem_buffer(buffer), _num_bytes(num_bytes)
25
+ {
26
+ }
27
+
28
+ io_prep_context::io_prep_context(const bool read_op,
29
+ const std::unique_ptr<io_xfer_ctxt>& xfer_ctxt,
30
+ const size_t block_size,
31
+ const std::vector<struct iocb*>* iocbs)
32
+ : _read_op(read_op), _xfer_ctxt(xfer_ctxt), _block_size(block_size), _iocbs(iocbs)
33
+ {
34
+ }
35
+
36
+ void io_prep_context::prep_iocbs(const int n_iocbs,
37
+ const size_t num_bytes,
38
+ const void* start_buffer,
39
+ const long long int start_offset)
40
+ {
41
+ assert(static_cast<size_t>(n_iocbs) <= _iocbs->size());
42
+ for (auto i = 0; i < n_iocbs; ++i) {
43
+ const auto shift = i * _block_size;
44
+ const auto xfer_buffer = (char*)start_buffer + _xfer_ctxt->_base_offset + shift;
45
+ const auto xfer_offset = _xfer_ctxt->_base_offset + start_offset + shift;
46
+ auto byte_count = _block_size;
47
+ if ((shift + _block_size) > num_bytes) { byte_count = num_bytes - shift; }
48
+
49
+ if (_read_op) {
50
+ io_prep_pread(_iocbs->at(i), _xfer_ctxt->_fd, xfer_buffer, byte_count, xfer_offset);
51
+ } else {
52
+ io_prep_pwrite(_iocbs->at(i), _xfer_ctxt->_fd, xfer_buffer, byte_count, xfer_offset);
53
+ }
54
+ }
55
+ }
56
+
57
+ io_prep_generator::io_prep_generator(const bool read_op,
58
+ const std::unique_ptr<io_xfer_ctxt>& xfer_ctxt,
59
+ const size_t block_size)
60
+ : _read_op(read_op),
61
+ _xfer_ctxt(xfer_ctxt),
62
+ _block_size(block_size),
63
+ _remaining_bytes(xfer_ctxt->_num_bytes),
64
+ _next_iocb_index(0)
65
+ {
66
+ _num_io_blocks =
67
+ static_cast<long long int>(ceil(static_cast<double>(xfer_ctxt->_num_bytes) / block_size));
68
+ _remaining_io_blocks = _num_io_blocks;
69
+ }
70
+
71
+ int io_prep_generator::prep_iocbs(const int n_iocbs, std::vector<struct iocb*>* iocbs)
72
+ {
73
+ if ((_remaining_bytes) == 0 || (_remaining_io_blocks == 0)) {
74
+ assert(static_cast<long long int>(_remaining_bytes) == _remaining_io_blocks);
75
+ return 0;
76
+ }
77
+
78
+ assert(static_cast<size_t>(n_iocbs) <= iocbs->size());
79
+
80
+ auto actual_n_iocbs = min(static_cast<long long int>(n_iocbs), _remaining_io_blocks);
81
+ for (auto i = 0; i < actual_n_iocbs; ++i, ++_next_iocb_index) {
82
+ const auto xfer_offset = _xfer_ctxt->_base_offset + (_next_iocb_index * _block_size);
83
+ const auto xfer_buffer = (char*)_xfer_ctxt->_mem_buffer + xfer_offset;
84
+ const auto num_bytes = min(static_cast<long long int>(_block_size), _remaining_bytes);
85
+
86
+ if (_read_op) {
87
+ io_prep_pread(iocbs->at(i), _xfer_ctxt->_fd, xfer_buffer, num_bytes, xfer_offset);
88
+ } else {
89
+ io_prep_pwrite(iocbs->at(i), _xfer_ctxt->_fd, xfer_buffer, num_bytes, xfer_offset);
90
+ }
91
+ _remaining_bytes -= num_bytes;
92
+ }
93
+ _remaining_io_blocks -= actual_n_iocbs;
94
+
95
+ return actual_n_iocbs;
96
+ }
97
+
98
+ int get_file_size(const char* filename, long long int& size)
99
+ {
100
+ struct stat st;
101
+ if (stat(filename, &st) == -1) { return -1; }
102
+ size = st.st_size;
103
+ return 0;
104
+ }
105
+
106
+ void* ds_page_aligned_alloc(const size_t size, const bool lock)
107
+ {
108
+ void* ptr;
109
+ int retval;
110
+
111
+ retval = posix_memalign(&ptr, (size_t)sysconf(_SC_PAGESIZE), size);
112
+ if (retval) { return nullptr; }
113
+
114
+ if (lock == false) { return ptr; }
115
+
116
+ auto mlock_ret = mlock(ptr, size);
117
+ if (mlock_ret != 0) {
118
+ auto mlock_error = errno;
119
+ std::cerr << "mlock failed to allocate " << size << " bytes with error no " << mlock_error
120
+ << " msg " << strerror(mlock_error) << std::endl;
121
+ free(ptr);
122
+ return nullptr;
123
+ }
124
+
125
+ return ptr;
126
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.h ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
8
+ */
9
+
10
+ #pragma once
11
+
12
+ #include <assert.h>
13
+ #include <stdlib.h>
14
+ #include <string.h>
15
+
16
+ #include <fcntl.h>
17
+ #include <libaio.h>
18
+ #include <sys/mman.h>
19
+ #include <sys/stat.h>
20
+ #include <sys/types.h>
21
+ #include <unistd.h>
22
+
23
+ #include <deepspeed_aio_types.h>
24
+ #include <cstring>
25
+ #include <fstream>
26
+ #include <iostream>
27
+ #include <memory>
28
+ #include <string>
29
+ #include <vector>
30
+
31
+ struct io_xfer_ctxt {
32
+ const int _fd;
33
+ const long long int _base_offset;
34
+ const void* _mem_buffer;
35
+ const long long int _num_bytes;
36
+
37
+ io_xfer_ctxt(const int fd,
38
+ const long long int file_offset,
39
+ const long long int num_bytes,
40
+ const void* buffer);
41
+ };
42
+
43
+ struct io_prep_context {
44
+ const bool _read_op;
45
+ const std::unique_ptr<io_xfer_ctxt>& _xfer_ctxt;
46
+ const size_t _block_size;
47
+ const std::vector<struct iocb*>* _iocbs;
48
+
49
+ io_prep_context(const bool read_op,
50
+ const std::unique_ptr<io_xfer_ctxt>& xfer_ctxt,
51
+ const size_t block_size,
52
+ const std::vector<struct iocb*>* iocbs);
53
+
54
+ void prep_iocbs(const int n_iocbs,
55
+ const size_t num_bytes,
56
+ const void* start_buffer,
57
+ const long long int start_offset);
58
+ };
59
+
60
+ struct io_prep_generator {
61
+ const bool _read_op;
62
+ const std::unique_ptr<io_xfer_ctxt>& _xfer_ctxt;
63
+ const size_t _block_size;
64
+
65
+ long long int _remaining_bytes;
66
+ long long int _num_io_blocks;
67
+ long long int _remaining_io_blocks;
68
+ long long int _next_iocb_index;
69
+
70
+ io_prep_generator(const bool read_op,
71
+ const std::unique_ptr<io_xfer_ctxt>& xfer_ctxt,
72
+ const size_t block_size);
73
+
74
+ int prep_iocbs(const int n_iocbs, std::vector<struct iocb*>* iocbs);
75
+ };
76
+
77
+ void* ds_page_aligned_alloc(const size_t size, const bool lock = false);
78
+
79
+ int get_file_size(const char* filename, long long int& size);
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.cpp ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
8
+ */
9
+
10
+ #include "deepspeed_aio_thread.h"
11
+
12
+ #if defined(__ENABLE_CANN__)
13
+ #include "torch_npu/csrc/framework/utils/OpAdapter.h"
14
+ #include "torch_npu/csrc/framework/utils/UtilForOpAdapter.h"
15
+ #endif
16
+
17
+ using namespace std;
18
+
19
+ io_op_desc_t::io_op_desc_t(const bool read_op,
20
+ const torch::Tensor& buffer,
21
+ const int fd,
22
+ const char* filename,
23
+ const long long int num_bytes,
24
+ const bool validate)
25
+ : _read_op(read_op),
26
+ _buffer(buffer),
27
+ _fd(fd),
28
+ _filename(filename),
29
+ _num_bytes(num_bytes),
30
+ _validate(validate)
31
+ {
32
+ _cpu_buffer = (_buffer.is_cuda() || _buffer.is_xpu()
33
+ #if defined(__ENABLE_CANN__)
34
+ || torch_npu::utils::is_npu(_buffer)
35
+ #endif
36
+ )
37
+ ? _buffer.to(torch::kCPU).pin_memory()
38
+ : _buffer;
39
+ _contiguous_buffer = _cpu_buffer.contiguous();
40
+ }
41
+
42
+ char* io_op_desc_t::data_ptr() const { return (char*)_contiguous_buffer.data_ptr(); }
43
+
44
+ void io_op_desc_t::fini()
45
+ {
46
+ if (_read_op && _buffer.is_cuda()) { _buffer.copy_(_cpu_buffer.to(torch::kCUDA)); }
47
+ if (_read_op && _buffer.is_xpu()) { _buffer.copy_(_cpu_buffer.to(torch::kXPU)); }
48
+ #if defined(__ENABLE_CANN__)
49
+ if (_read_op && torch_npu::utils::is_npu(_buffer)) {
50
+ auto device = at::Device("npu:0");
51
+ _buffer.copy_(_cpu_buffer.to(device));
52
+ }
53
+ #endif
54
+ }
55
+
56
+ deepspeed_aio_thread_t::deepspeed_aio_thread_t(const int tid, deepspeed_aio_config_t& aio_config)
57
+ : _tid(tid),
58
+ _aio_config(aio_config),
59
+ _aio_ctxt(new aio_context(aio_config._block_size, aio_config._queue_depth)),
60
+ _time_to_exit(false)
61
+ {
62
+ }
63
+
64
+ deepspeed_aio_thread_t::~deepspeed_aio_thread_t() {}
65
+
66
+ void deepspeed_aio_thread_t::run()
67
+ {
68
+ while (true) {
69
+ std::shared_ptr<struct io_op_desc_t> next_io_op = nullptr;
70
+
71
+ {
72
+ std::unique_lock<std::mutex> lock(_work_sync._mutex);
73
+ _work_sync._cond_var.wait(lock,
74
+ [this] { return (!_work_queue.empty() || _time_to_exit); });
75
+ if (!_work_queue.empty()) {
76
+ next_io_op = _work_queue.front();
77
+ _work_queue.pop();
78
+ }
79
+ }
80
+
81
+ if (next_io_op) {
82
+ const auto base_offset = next_io_op->_num_bytes * _tid;
83
+
84
+ std::unique_ptr<io_xfer_ctxt> xfer_ctxt(new io_xfer_ctxt(
85
+ next_io_op->_fd, base_offset, next_io_op->_num_bytes, next_io_op->data_ptr()));
86
+
87
+ if (_aio_config._overlap_events) {
88
+ do_aio_operation_overlap(
89
+ next_io_op->_read_op, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr);
90
+ } else {
91
+ do_aio_operation_sequential(
92
+ next_io_op->_read_op, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr);
93
+ }
94
+
95
+ {
96
+ std::lock_guard<std::mutex> lock(_complete_sync._mutex);
97
+ _complete_queue.push(next_io_op);
98
+ }
99
+ _complete_sync._cond_var.notify_one();
100
+ }
101
+
102
+ if (_time_to_exit) { break; }
103
+ }
104
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.h ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
8
+ */
9
+
10
+ #include <condition_variable>
11
+ #include <memory>
12
+ #include <queue>
13
+ #include "deepspeed_py_aio.h"
14
+
15
+ struct io_op_desc_t {
16
+ const bool _read_op;
17
+ torch::Tensor _buffer;
18
+ int _fd;
19
+ const std::string _filename;
20
+ const long long int _num_bytes;
21
+ torch::Tensor _cpu_buffer;
22
+ torch::Tensor _contiguous_buffer;
23
+ const bool _validate;
24
+
25
+ io_op_desc_t(const bool read_op,
26
+ const torch::Tensor& buffer,
27
+ const int fd,
28
+ const char* filename,
29
+ const long long int num_bytes,
30
+ const bool validate);
31
+
32
+ char* data_ptr() const;
33
+ void fini();
34
+ };
35
+
36
+ struct thread_sync_t {
37
+ std::mutex _mutex;
38
+ std::condition_variable _cond_var;
39
+ };
40
+
41
+ struct deepspeed_aio_thread_t {
42
+ const int _tid;
43
+ deepspeed_aio_config_t& _aio_config;
44
+
45
+ std::unique_ptr<struct aio_context> _aio_ctxt;
46
+ std::queue<std::shared_ptr<struct io_op_desc_t>> _work_queue;
47
+ std::queue<std::shared_ptr<struct io_op_desc_t>> _complete_queue;
48
+
49
+ bool _time_to_exit;
50
+
51
+ struct thread_sync_t _work_sync;
52
+ struct thread_sync_t _complete_sync;
53
+
54
+ deepspeed_aio_thread_t(const int tid, deepspeed_aio_config_t& aio_config);
55
+
56
+ ~deepspeed_aio_thread_t();
57
+
58
+ void run();
59
+ };
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_pin_tensor.cpp ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Functionality for managing CPU tensors occupying page-locked memory.
8
+ */
9
+
10
+ #include "deepspeed_pin_tensor.h"
11
+
12
+ using namespace std;
13
+
14
+ deepspeed_pin_tensor_t::~deepspeed_pin_tensor_t()
15
+ {
16
+ for (auto iter = _locked_tensors.begin(); iter != _locked_tensors.end(); ++iter) {
17
+ munlock(iter->first, iter->second);
18
+ }
19
+ _locked_tensors.clear();
20
+ }
21
+
22
+ torch::Tensor deepspeed_pin_tensor_t::alloc(const size_t num_elem, const at::ScalarType& elem_type)
23
+ {
24
+ const auto num_bytes = num_elem * elementSize(elem_type);
25
+ auto pinned_buffer = ds_page_aligned_alloc(num_bytes, true);
26
+ assert(nullptr != pinned_buffer);
27
+
28
+ _locked_tensors[pinned_buffer] = num_bytes;
29
+
30
+ auto options = torch::TensorOptions().dtype(elem_type).device(torch::kCPU);
31
+
32
+ return at::from_blob(pinned_buffer, static_cast<long int>(num_bytes), options);
33
+ }
34
+
35
+ bool deepspeed_pin_tensor_t::free(torch::Tensor& locked_tensor)
36
+ {
37
+ auto addr = locked_tensor.data_ptr();
38
+ if (_locked_tensors.find(addr) != _locked_tensors.end()) {
39
+ munlock(addr, _locked_tensors[addr]);
40
+ _locked_tensors.erase(addr);
41
+ return true;
42
+ }
43
+
44
+ return false;
45
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_pin_tensor.h ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Functionality for managing CPU tensors occupying page-locked memory.
8
+ TODO: Implement a full-featured manager that
9
+ 1. Avoid page-locked memory leaks
10
+ 2. Minimize page-locked memory usage by reducing internal fragmentation
11
+ Functionality for managing CPU tensors occupying page-locked memory.
12
+ */
13
+
14
+ #include <map>
15
+ #include "deepspeed_py_aio.h"
16
+
17
+ struct deepspeed_pin_tensor_t {
18
+ std::map<void*, size_t> _locked_tensors;
19
+
20
+ deepspeed_pin_tensor_t() = default;
21
+
22
+ ~deepspeed_pin_tensor_t();
23
+
24
+ torch::Tensor alloc(const size_t num_elem, const at::ScalarType& elem_type);
25
+
26
+ bool free(torch::Tensor& locked_tensor);
27
+ };
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.cpp ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Copyright 2020 The Microsoft DeepSpeed Team
8
+ Licensed under the MIT license.
9
+
10
+ Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
11
+ */
12
+
13
+ #include <assert.h>
14
+ #include <stdlib.h>
15
+ #include <string.h>
16
+
17
+ #include <fcntl.h>
18
+ #include <sys/mman.h>
19
+ #include <sys/stat.h>
20
+ #include <sys/types.h>
21
+ #include <unistd.h>
22
+
23
+ #include <cassert>
24
+ #include <chrono>
25
+ #include <cstring>
26
+ #include <fstream>
27
+ #include <iostream>
28
+ #include <memory>
29
+ #include <string>
30
+ #include <vector>
31
+
32
+ #include "deepspeed_py_aio.h"
33
+
34
+ using namespace std;
35
+ using namespace std::chrono;
36
+
37
+ #define DEBUG_DS_AIO_READ 0
38
+ #define DEBUG_DS_AIO_WRITE 0
39
+
40
+ static const std::string c_library_name = "deepspeed_aio";
41
+
42
+ int deepspeed_py_aio_write(const torch::Tensor& buffer,
43
+ const char* filename,
44
+ const int block_size,
45
+ const int queue_depth,
46
+ const bool single_submit,
47
+ const bool overlap_events,
48
+ const bool validate)
49
+ {
50
+ const auto start_time = std::chrono::high_resolution_clock::now();
51
+ deepspeed_aio_config_t config(block_size, queue_depth, single_submit, overlap_events, false);
52
+
53
+ const auto fd = open_file(filename, false);
54
+ if (fd == -1) { return -1; }
55
+
56
+ auto write_buffer = (char*)buffer.data_ptr();
57
+ const auto num_write_bytes = static_cast<long long int>(buffer.nbytes());
58
+ std::unique_ptr<io_xfer_ctxt> xfer_ctxt(new io_xfer_ctxt(fd, 0, num_write_bytes, write_buffer));
59
+ std::unique_ptr<aio_context> aio_ctxt(new aio_context(config._block_size, config._queue_depth));
60
+
61
+ if (config._overlap_events) {
62
+ do_aio_operation_overlap(false, aio_ctxt, xfer_ctxt, &config, nullptr);
63
+ } else {
64
+ do_aio_operation_sequential(false, aio_ctxt, xfer_ctxt, &config, nullptr);
65
+ }
66
+ const std::chrono::duration<double> aio_time =
67
+ std::chrono::high_resolution_clock::now() - start_time;
68
+
69
+ close(fd);
70
+
71
+ if (validate) { validate_aio_operation(false, filename, write_buffer, num_write_bytes); }
72
+
73
+ const std::chrono::duration<double> fn_time =
74
+ std::chrono::high_resolution_clock::now() - start_time;
75
+ std::cout << "Elapsed time(usec): "
76
+ << "aio = " << aio_time.count() * 1e6 << " call = " << fn_time.count() * 1e6
77
+ << std::endl;
78
+ return 0;
79
+ }
80
+
81
+ int deepspeed_py_aio_read(torch::Tensor& buffer,
82
+ const char* filename,
83
+ const int block_size,
84
+ const int queue_depth,
85
+ const bool single_submit,
86
+ const bool overlap_events,
87
+ const bool validate)
88
+ {
89
+ const auto start_time = std::chrono::high_resolution_clock::now();
90
+ long long num_file_bytes;
91
+ if (-1 == get_file_size(filename, num_file_bytes)) {
92
+ const auto error_code = errno;
93
+ report_file_error(filename, " fstat for read", error_code);
94
+ return -1;
95
+ }
96
+
97
+ deepspeed_aio_config_t config(block_size, queue_depth, single_submit, overlap_events, false);
98
+ const auto fd = open_file(filename, true);
99
+ if (fd == -1) { return -1; }
100
+
101
+ auto read_buffer = (char*)buffer.data_ptr();
102
+ assert(static_cast<long long int>(buffer.nbytes()) == num_file_bytes);
103
+
104
+ std::unique_ptr<io_xfer_ctxt> xfer_ctxt(new io_xfer_ctxt(fd, 0, num_file_bytes, read_buffer));
105
+ std::unique_ptr<aio_context> aio_ctxt(new aio_context(config._block_size, config._queue_depth));
106
+
107
+ if (config._overlap_events) {
108
+ do_aio_operation_overlap(true, aio_ctxt, xfer_ctxt, &config, nullptr);
109
+ } else {
110
+ do_aio_operation_sequential(true, aio_ctxt, xfer_ctxt, &config, nullptr);
111
+ }
112
+ const std::chrono::duration<double> aio_time =
113
+ std::chrono::high_resolution_clock::now() - start_time;
114
+
115
+ close(fd);
116
+
117
+ if (validate) { validate_aio_operation(true, filename, read_buffer, num_file_bytes); }
118
+
119
+ const std::chrono::duration<double> fn_time =
120
+ std::chrono::high_resolution_clock::now() - start_time;
121
+ std::cout << "Elapsed time(usec): "
122
+ << "aio = " << aio_time.count() * 1e6 << " call = " << fn_time.count() * 1e6
123
+ << std::endl;
124
+ return 0;
125
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.h ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Copyright 2020 The Microsoft DeepSpeed Team
8
+ Licensed under the MIT license.
9
+
10
+ Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
11
+ */
12
+
13
+ #include <deepspeed_aio_common.h>
14
+ #include <stdlib.h>
15
+ #include <torch/extension.h>
16
+
17
+ int deepspeed_py_aio_write(const torch::Tensor& buffer,
18
+ const char* filename,
19
+ const int block_size,
20
+ const int queue_depth,
21
+ const bool single_submit,
22
+ const bool overlap_events,
23
+ const bool validate);
24
+
25
+ int deepspeed_py_aio_read(torch::Tensor& buffer,
26
+ const char* filename,
27
+ const int block_size,
28
+ const int queue_depth,
29
+ const bool single_submit,
30
+ const bool overlap_events,
31
+ const bool validate);
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.cpp ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Copyright 2020 The Microsoft DeepSpeed Team
8
+ Licensed under the MIT license.
9
+
10
+ Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
11
+ */
12
+
13
+ #include "deepspeed_py_aio_handle.h"
14
+
15
+ using namespace std;
16
+
17
+ static void _start_aio_thread(std::shared_ptr<struct deepspeed_aio_thread_t> ctxt) { ctxt->run(); }
18
+
19
+ deepspeed_aio_handle_t::deepspeed_aio_handle_t(const int block_size,
20
+ const int queue_depth,
21
+ const bool single_submit,
22
+ const bool overlap_events,
23
+ const int num_threads)
24
+ : _aio_ctxt(new aio_context(block_size, queue_depth)),
25
+ _single_submit(single_submit),
26
+ _overlap_events(overlap_events),
27
+ _num_threads(num_threads),
28
+ _aio_config(block_size, queue_depth, single_submit, overlap_events, false),
29
+ _num_pending_ops(0),
30
+ _pinned_tensor_mgr(new deepspeed_pin_tensor_t())
31
+ {
32
+ for (auto i = 0; i < num_threads; ++i) {
33
+ _thread_contexts.push_back(std::make_shared<deepspeed_aio_thread_t>(i, _aio_config));
34
+ }
35
+
36
+ for (auto& ctxt : _thread_contexts) {
37
+ _threads.push_back(std::thread(_start_aio_thread, ctxt));
38
+ }
39
+ }
40
+
41
+ deepspeed_aio_handle_t::~deepspeed_aio_handle_t()
42
+ {
43
+ _stop_threads();
44
+ for (auto& thr : _threads) { thr.join(); }
45
+ }
46
+
47
+ const int deepspeed_aio_handle_t::get_block_size() const
48
+ {
49
+ return _aio_ctxt ? _aio_ctxt->_block_size : -1;
50
+ }
51
+
52
+ const int deepspeed_aio_handle_t::get_queue_depth() const
53
+ {
54
+ return _aio_ctxt ? _aio_ctxt->_queue_depth : -1;
55
+ }
56
+
57
+ const bool deepspeed_aio_handle_t::get_single_submit() const { return _single_submit; }
58
+
59
+ const bool deepspeed_aio_handle_t::get_overlap_events() const { return _overlap_events; }
60
+
61
+ const int deepspeed_aio_handle_t::get_thread_count() const { return _num_threads; }
62
+
63
+ int deepspeed_aio_handle_t::read(torch::Tensor& buffer, const char* filename, const bool validate)
64
+ {
65
+ const auto start_time = std::chrono::high_resolution_clock::now();
66
+
67
+ assert(_aio_ctxt);
68
+
69
+ long long num_file_bytes;
70
+ if (-1 == get_file_size(filename, num_file_bytes)) {
71
+ const auto error_code = errno;
72
+ report_file_error(filename, " fstat for read", error_code);
73
+ return -1;
74
+ }
75
+ assert(static_cast<long long int>(buffer.nbytes()) == num_file_bytes);
76
+
77
+ const auto fd = open_file(filename, true);
78
+ if (fd == -1) { return -1; }
79
+
80
+ auto read_buffer = (char*)buffer.data_ptr();
81
+ std::unique_ptr<io_xfer_ctxt> xfer_ctxt(new io_xfer_ctxt(fd, 0, num_file_bytes, read_buffer));
82
+
83
+ if (_aio_config._overlap_events) {
84
+ do_aio_operation_overlap(true, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr);
85
+ } else {
86
+ do_aio_operation_sequential(true, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr);
87
+ }
88
+
89
+ close(fd);
90
+ const std::chrono::duration<double> aio_time =
91
+ std::chrono::high_resolution_clock::now() - start_time;
92
+
93
+ if (validate) { validate_aio_operation(true, filename, read_buffer, num_file_bytes); }
94
+ const std::chrono::duration<double> fn_time =
95
+ std::chrono::high_resolution_clock::now() - start_time;
96
+ std::cout << "Elapsed time(usec): "
97
+ << "aio = " << aio_time.count() * 1e6 << " call = " << fn_time.count() * 1e6
98
+ << std::endl;
99
+ return 0;
100
+ }
101
+
102
+ int deepspeed_aio_handle_t::write(const torch::Tensor& buffer,
103
+ const char* filename,
104
+ const bool validate)
105
+ {
106
+ assert(_aio_ctxt);
107
+
108
+ const auto start_time = std::chrono::high_resolution_clock::now();
109
+
110
+ const auto fd = open_file(filename, false);
111
+ if (fd == -1) { return -1; }
112
+
113
+ auto write_buffer = (char*)buffer.data_ptr();
114
+ const auto num_write_bytes = static_cast<long long int>(buffer.nbytes());
115
+ std::unique_ptr<io_xfer_ctxt> xfer_ctxt(new io_xfer_ctxt(fd, 0, num_write_bytes, write_buffer));
116
+
117
+ if (_aio_config._overlap_events) {
118
+ do_aio_operation_overlap(false, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr);
119
+ } else {
120
+ do_aio_operation_sequential(false, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr);
121
+ }
122
+ const std::chrono::duration<double> aio_time =
123
+ std::chrono::high_resolution_clock::now() - start_time;
124
+
125
+ close(fd);
126
+
127
+ if (validate) { validate_aio_operation(false, filename, write_buffer, num_write_bytes); }
128
+
129
+ const std::chrono::duration<double> fn_time =
130
+ std::chrono::high_resolution_clock::now() - start_time;
131
+ std::cout << "Elapsed time(usec): "
132
+ << "aio = " << aio_time.count() * 1e6 << " call = " << fn_time.count() * 1e6
133
+ << std::endl;
134
+ return 0;
135
+ }
136
+
137
+ void deepspeed_aio_handle_t::_schedule_aio_work(std::shared_ptr<struct io_op_desc_t> scheduled_op)
138
+ {
139
+ for (auto& ctxt : _thread_contexts) {
140
+ {
141
+ std::lock_guard<std::mutex> lock(ctxt->_work_sync._mutex);
142
+ ctxt->_work_queue.push(scheduled_op);
143
+ }
144
+ ctxt->_work_sync._cond_var.notify_one();
145
+ }
146
+ _num_pending_ops++;
147
+ }
148
+
149
+ std::shared_ptr<struct io_op_desc_t> deepspeed_aio_handle_t::_wait_for_aio_work()
150
+ {
151
+ std::shared_ptr<struct io_op_desc_t> completed_op = nullptr;
152
+ for (auto& ctxt : _thread_contexts) {
153
+ std::unique_lock<std::mutex> lock(ctxt->_complete_sync._mutex);
154
+ ctxt->_complete_sync._cond_var.wait(lock,
155
+ [ctxt] { return !ctxt->_complete_queue.empty(); });
156
+ completed_op = ctxt->_complete_queue.front();
157
+ ctxt->_complete_queue.pop();
158
+ }
159
+ return completed_op;
160
+ }
161
+
162
+ void deepspeed_aio_handle_t::_stop_threads()
163
+ {
164
+ assert(0 == _num_pending_ops);
165
+ for (auto& ctxt : _thread_contexts) {
166
+ {
167
+ std::lock_guard<std::mutex> lock(ctxt->_work_sync._mutex);
168
+ ctxt->_time_to_exit = true;
169
+ }
170
+ ctxt->_work_sync._cond_var.notify_one();
171
+ }
172
+ }
173
+
174
+ int deepspeed_aio_handle_t::wait()
175
+ {
176
+ assert(_num_pending_ops > 0);
177
+ auto num_completed_ops = 0;
178
+
179
+ while (_num_pending_ops > 0) {
180
+ auto completed_op = _wait_for_aio_work();
181
+
182
+ completed_op->fini();
183
+
184
+ close(completed_op->_fd);
185
+
186
+ if (completed_op->_validate) {
187
+ validate_aio_operation(completed_op->_read_op,
188
+ completed_op->_filename.c_str(),
189
+ completed_op->data_ptr(),
190
+ _num_threads * completed_op->_num_bytes);
191
+ }
192
+ --_num_pending_ops;
193
+ ++num_completed_ops;
194
+ }
195
+
196
+ return num_completed_ops;
197
+ }
198
+
199
+ bool deepspeed_aio_handle_t::_is_valid_parallel_aio_op(const bool read_op,
200
+ const long long int num_bytes)
201
+ {
202
+ const auto op_string = read_op ? "Read" : "Write";
203
+ if (num_bytes % get_thread_count()) {
204
+ std::cout << "deepspeed_aio failure: parallel " << op_string << " num_bytes = " << num_bytes
205
+ << " not divisible by thread count = " << get_thread_count() << std::endl;
206
+ return false;
207
+ }
208
+
209
+ return true;
210
+ }
211
+
212
+ int deepspeed_aio_handle_t::pread(const torch::Tensor& buffer,
213
+ const char* filename,
214
+ const bool validate,
215
+ const bool async)
216
+ {
217
+ long long num_file_bytes;
218
+ if (-1 == get_file_size(filename, num_file_bytes)) {
219
+ const auto error_code = errno;
220
+ report_file_error(filename, " fstat for read", error_code);
221
+ return -1;
222
+ }
223
+ const auto buffer_bytes = static_cast<long long int>(buffer.nbytes());
224
+ if (buffer_bytes != num_file_bytes) {
225
+ std::cout << filename << ": buffer nbytes != file bytes " << buffer_bytes
226
+ << " != " << num_file_bytes << std::endl;
227
+ }
228
+ assert(static_cast<long long int>(buffer.nbytes()) == num_file_bytes);
229
+ assert((num_file_bytes % _num_threads) == 0);
230
+
231
+ if (!_is_valid_parallel_aio_op(true, num_file_bytes)) { return -1; }
232
+
233
+ const auto fd = open_file(filename, true);
234
+ if (fd == -1) { return -1; }
235
+
236
+ auto scheduled_op = std::make_shared<io_op_desc_t>(
237
+ true, buffer, fd, filename, (num_file_bytes / _num_threads), validate);
238
+
239
+ _schedule_aio_work(scheduled_op);
240
+
241
+ if (async) { return 0; }
242
+
243
+ return wait();
244
+ }
245
+
246
+ int deepspeed_aio_handle_t::pwrite(const torch::Tensor& buffer,
247
+ const char* filename,
248
+ const bool validate,
249
+ const bool async)
250
+ {
251
+ const auto num_write_bytes = static_cast<long long int>(buffer.nbytes());
252
+ assert((num_write_bytes % _num_threads) == 0);
253
+
254
+ if (!_is_valid_parallel_aio_op(false, num_write_bytes)) { return -1; }
255
+
256
+ const auto fd = open_file(filename, false);
257
+ if (fd == -1) { return -1; }
258
+
259
+ auto scheduled_op = std::make_shared<io_op_desc_t>(
260
+ false, buffer, fd, filename, (num_write_bytes / _num_threads), validate);
261
+
262
+ _schedule_aio_work(scheduled_op);
263
+
264
+ if (async) { return 0; }
265
+
266
+ return wait();
267
+ }
268
+
269
+ int deepspeed_aio_handle_t::sync_pread(torch::Tensor& buffer, const char* filename)
270
+ {
271
+ return pread(buffer, filename, false, false);
272
+ }
273
+
274
+ int deepspeed_aio_handle_t::sync_pwrite(const torch::Tensor& buffer, const char* filename)
275
+ {
276
+ return pwrite(buffer, filename, false, false);
277
+ }
278
+
279
+ int deepspeed_aio_handle_t::async_pread(torch::Tensor& buffer, const char* filename)
280
+ {
281
+ return pread(buffer, filename, false, true);
282
+ }
283
+
284
+ int deepspeed_aio_handle_t::async_pwrite(const torch::Tensor& buffer, const char* filename)
285
+ {
286
+ return pwrite(buffer, filename, false, true);
287
+ }
288
+
289
+ at::Tensor deepspeed_aio_handle_t::new_cpu_locked_tensor(const size_t num_elem,
290
+ const torch::Tensor& example_tensor)
291
+ {
292
+ return _pinned_tensor_mgr->alloc(num_elem, example_tensor.scalar_type());
293
+ }
294
+
295
+ bool deepspeed_aio_handle_t::free_cpu_locked_tensor(torch::Tensor& locked_tensor)
296
+ {
297
+ return _pinned_tensor_mgr->free(locked_tensor);
298
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.h ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
8
+ */
9
+
10
+ #include <condition_variable>
11
+ #include <memory>
12
+ #include "deepspeed_aio_thread.h"
13
+ #include "deepspeed_pin_tensor.h"
14
+
15
+ struct deepspeed_aio_handle_t {
16
+ std::unique_ptr<struct aio_context> _aio_ctxt;
17
+ const bool _single_submit;
18
+ const bool _overlap_events;
19
+ const int _num_threads;
20
+ deepspeed_aio_config_t _aio_config;
21
+
22
+ std::vector<std::shared_ptr<struct deepspeed_aio_thread_t>> _thread_contexts;
23
+ std::vector<std::thread> _threads;
24
+ int _num_pending_ops;
25
+ std::unique_ptr<struct deepspeed_pin_tensor_t> _pinned_tensor_mgr;
26
+
27
+ deepspeed_aio_handle_t(const int block_size,
28
+ const int queue_depth,
29
+ const bool single_submit,
30
+ const bool overlap_events,
31
+ const int num_threads);
32
+
33
+ ~deepspeed_aio_handle_t();
34
+
35
+ const int get_block_size() const;
36
+ const int get_queue_depth() const;
37
+ const bool get_single_submit() const;
38
+ const bool get_overlap_events() const;
39
+ const int get_thread_count() const;
40
+
41
+ int read(torch::Tensor& buffer, const char* filename, const bool validate);
42
+
43
+ int write(const torch::Tensor& buffer, const char* filename, const bool validate);
44
+
45
+ int pread(const torch::Tensor& buffer,
46
+ const char* filename,
47
+ const bool validate,
48
+ const bool async);
49
+
50
+ int pwrite(const torch::Tensor& buffer,
51
+ const char* filename,
52
+ const bool validate,
53
+ const bool async);
54
+
55
+ int sync_pread(torch::Tensor& buffer, const char* filename);
56
+
57
+ int sync_pwrite(const torch::Tensor& buffer, const char* filename);
58
+
59
+ int async_pread(torch::Tensor& buffer, const char* filename);
60
+
61
+ int async_pwrite(const torch::Tensor& buffer, const char* filename);
62
+
63
+ // TODO: Make API's args to be shape and dtype.
64
+ torch::Tensor new_cpu_locked_tensor(const size_t num_elem, const torch::Tensor& example_tensor);
65
+
66
+ bool free_cpu_locked_tensor(torch::Tensor&);
67
+
68
+ int wait();
69
+
70
+ void _stop_threads();
71
+
72
+ void _schedule_aio_work(std::shared_ptr<struct io_op_desc_t> scheduled_op);
73
+
74
+ std::shared_ptr<struct io_op_desc_t> _wait_for_aio_work();
75
+
76
+ bool _is_valid_parallel_aio_op(const bool read_op, const long long int num_bytes);
77
+ };
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.cpp ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
8
+ */
9
+
10
+ #include "deepspeed_py_copy.h"
11
+ #include <omp.h>
12
+
13
+ #define ROUND_DOWN(size, step) ((size) & ~((step)-1))
14
+
15
+ #if defined(__AVX512__) or defined(__AVX256__)
16
+ union AVX_Data {
17
+ #if defined(__AVX512__)
18
+ __m512 data;
19
+ #else
20
+ __m256 data;
21
+ #endif
22
+ };
23
+ #endif
24
+
25
+ static void helper_memcpy_1(float* dest, float* src, size_t param_size)
26
+ {
27
+ size_t rounded_size = 0;
28
+
29
+ #if defined(__AVX512__) or defined(__AVX256__)
30
+
31
+ rounded_size = ROUND_DOWN(param_size, SIMD_WIDTH);
32
+
33
+ for (size_t t = 0; t < rounded_size; t += TILE) {
34
+ size_t copy_size = TILE;
35
+ if ((t + TILE) > rounded_size) copy_size = rounded_size - t;
36
+ size_t offset = copy_size + t;
37
+ #pragma omp parallel for
38
+ for (size_t i = t; i < offset; i += SIMD_WIDTH) {
39
+ AVX_Data src_4;
40
+ src_4.data = SIMD_LOAD(src + i);
41
+
42
+ SIMD_STORE(dest + i, src_4.data);
43
+ }
44
+ }
45
+
46
+ #endif
47
+
48
+ if (param_size > rounded_size) {
49
+ #pragma omp parallel for
50
+ for (size_t k = rounded_size; k < param_size; k++) { dest[k] = src[k]; }
51
+ }
52
+ }
53
+
54
+ static void helper_memcpy_4(float* dest, float* src, size_t param_size)
55
+ {
56
+ size_t rounded_size = 0;
57
+
58
+ #if defined(__AVX512__) or defined(__AVX256__)
59
+
60
+ rounded_size = ROUND_DOWN(param_size, (SIMD_WIDTH << 2));
61
+
62
+ for (size_t t = 0; t < rounded_size; t += TILE) {
63
+ size_t copy_size = TILE;
64
+ if ((t + TILE) > rounded_size) copy_size = rounded_size - t;
65
+ size_t offset = copy_size + t;
66
+ #pragma omp parallel for
67
+ for (size_t i = t; i < offset; i += (SIMD_WIDTH << 2)) {
68
+ AVX_Data src_4[4];
69
+ src_4[0].data = SIMD_LOAD(src + i);
70
+ src_4[1].data = SIMD_LOAD(src + i + SIMD_WIDTH);
71
+ src_4[2].data = SIMD_LOAD(src + i + (SIMD_WIDTH << 1));
72
+ src_4[3].data = SIMD_LOAD(src + i + SIMD_WIDTH * 3);
73
+
74
+ SIMD_STORE(dest + i, src_4[0].data);
75
+ SIMD_STORE(dest + i + SIMD_WIDTH, src_4[1].data);
76
+ SIMD_STORE(dest + i + (SIMD_WIDTH << 1), src_4[2].data);
77
+ SIMD_STORE(dest + i + SIMD_WIDTH * 3, src_4[3].data);
78
+ }
79
+ }
80
+ #endif
81
+ if (param_size > rounded_size)
82
+ helper_memcpy_1((dest + rounded_size), (src + rounded_size), (param_size - rounded_size));
83
+ }
84
+
85
+ static void helper_mempcy_8(float* dest, float* src, size_t param_size)
86
+ {
87
+ size_t rounded_size = 0;
88
+
89
+ #if defined(__AVX512__) or defined(__AVX256__)
90
+
91
+ rounded_size = ROUND_DOWN(param_size, (SIMD_WIDTH << 2));
92
+
93
+ for (size_t t = 0; t < rounded_size; t += TILE) {
94
+ size_t copy_size = TILE;
95
+ if ((t + TILE) > rounded_size) copy_size = rounded_size - t;
96
+ size_t offset = copy_size + t;
97
+ #pragma omp parallel for
98
+ for (size_t i = t; i < offset; i += (SIMD_WIDTH << 3)) {
99
+ AVX_Data src_4[8];
100
+ src_4[0].data = SIMD_LOAD(src + i);
101
+ src_4[1].data = SIMD_LOAD(src + i + SIMD_WIDTH);
102
+ src_4[2].data = SIMD_LOAD(src + i + (SIMD_WIDTH << 1));
103
+ src_4[3].data = SIMD_LOAD(src + i + SIMD_WIDTH * 3);
104
+ src_4[4].data = SIMD_LOAD(src + i + (SIMD_WIDTH << 2));
105
+ src_4[5].data = SIMD_LOAD(src + i + SIMD_WIDTH * 5);
106
+ src_4[6].data = SIMD_LOAD(src + i + SIMD_WIDTH * 6);
107
+ src_4[7].data = SIMD_LOAD(src + i + SIMD_WIDTH * 7);
108
+
109
+ SIMD_STORE(dest + i, src_4[0].data);
110
+ SIMD_STORE(dest + i + SIMD_WIDTH, src_4[1].data);
111
+ SIMD_STORE(dest + i + (SIMD_WIDTH << 1), src_4[2].data);
112
+ SIMD_STORE(dest + i + SIMD_WIDTH * 3, src_4[3].data);
113
+ SIMD_STORE(dest + i + (SIMD_WIDTH << 2), src_4[4].data);
114
+ SIMD_STORE(dest + i + SIMD_WIDTH * 5, src_4[5].data);
115
+ SIMD_STORE(dest + i + SIMD_WIDTH * 6, src_4[6].data);
116
+ SIMD_STORE(dest + i + SIMD_WIDTH * 7, src_4[7].data);
117
+ }
118
+ }
119
+ #endif
120
+ if (param_size > rounded_size)
121
+ helper_memcpy_4((dest + rounded_size), (src + rounded_size), (param_size - rounded_size));
122
+ }
123
+
124
+ int deepspeed_py_memcpy(torch::Tensor& dest, const torch::Tensor& src)
125
+ {
126
+ auto dest_c = dest.contiguous();
127
+ auto src_c = src.contiguous();
128
+
129
+ float* dest_ptr = (float*)dest_c.data_ptr();
130
+ float* src_ptr = (float*)src_c.data_ptr();
131
+
132
+ helper_mempcy_8(dest_ptr, src_ptr, dest_c.size(0));
133
+
134
+ return 0;
135
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.h ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Copyright 2020 The Microsoft DeepSpeed Team
8
+ Licensed under the MIT license.
9
+
10
+ Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
11
+ */
12
+
13
+ #if (__x86_64__ || __i386__)
14
+ #include <cpuid.h>
15
+ #include <x86intrin.h>
16
+ #endif
17
+
18
+ #include <deepspeed_aio_common.h>
19
+ #include <stdlib.h>
20
+ #include <torch/extension.h>
21
+
22
+ #define TILE (1024 * 1024 * 1024)
23
+
24
+ #if defined(__AVX512__)
25
+ #define SIMD_STORE(a, d) _mm512_storeu_ps(a, d)
26
+ #define SIMD_LOAD(x) _mm512_loadu_ps(x)
27
+ #define SIMD_SET(x) _mm512_set1_ps(x)
28
+ #define SIMD_MUL(x, y) _mm512_mul_ps(x, y)
29
+ #define SIMD_FMA(x, y, c) _mm512_fmadd_ps(x, y, c)
30
+ #define SIMD_SQRT(x) _mm512_sqrt_ps(x)
31
+ #define SIMD_DIV(x, y) _mm512_div_ps(x, y)
32
+ #define SIMD_WIDTH 16
33
+ #else
34
+ #if defined(__AVX256__)
35
+ #define SIMD_STORE(a, d) _mm256_storeu_ps(a, d)
36
+ #define SIMD_LOAD(x) _mm256_loadu_ps(x)
37
+ #define SIMD_SET(x) _mm256_set1_ps(x)
38
+ #define SIMD_MUL(x, y) _mm256_mul_ps(x, y)
39
+ #define SIMD_FMA(x, y, c) _mm256_fmadd_ps(x, y, c)
40
+ #define SIMD_SQRT(x) _mm256_sqrt_ps(x)
41
+ #define SIMD_DIV(x, y) _mm256_div_ps(x, y)
42
+ #define SIMD_WIDTH 8
43
+ #endif
44
+ #endif
45
+
46
+ int deepspeed_py_memcpy(torch::Tensor& dest, const torch::Tensor& src);
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/py_ds_aio.cpp ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Functionality for swapping optimizer tensors to/from (NVMe) storage devices.
8
+ */
9
+
10
+ #include <torch/extension.h>
11
+ #include "deepspeed_py_aio_handle.h"
12
+ #include "deepspeed_py_copy.h"
13
+
14
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
15
+ {
16
+ m.def("aio_read", &deepspeed_py_aio_read, "DeepSpeed Asynchronous I/O Read");
17
+
18
+ m.def("aio_write", &deepspeed_py_aio_write, "DeepSpeed Asynchronous I/O Write");
19
+
20
+ m.def("deepspeed_memcpy", &deepspeed_py_memcpy, "DeepSpeed Memory Copy");
21
+
22
+ py::class_<deepspeed_aio_handle_t>(m, "aio_handle")
23
+ .def(py::init<const int, const int, const bool, const bool, const int>())
24
+
25
+ .def("get_block_size", &deepspeed_aio_handle_t::get_block_size)
26
+ .def("get_queue_depth", &deepspeed_aio_handle_t::get_queue_depth)
27
+ .def("get_single_submit", &deepspeed_aio_handle_t::get_single_submit)
28
+ .def("get_overlap_events", &deepspeed_aio_handle_t::get_overlap_events)
29
+ .def("get_thread_count", &deepspeed_aio_handle_t::get_thread_count)
30
+
31
+ .def("read", &deepspeed_aio_handle_t::read)
32
+ .def("write", &deepspeed_aio_handle_t::write)
33
+
34
+ .def("pread", &deepspeed_aio_handle_t::pread)
35
+ .def("pwrite", &deepspeed_aio_handle_t::pwrite)
36
+
37
+ .def("sync_pread", &deepspeed_aio_handle_t::sync_pread)
38
+ .def("sync_pwrite", &deepspeed_aio_handle_t::sync_pwrite)
39
+ .def("async_pread", &deepspeed_aio_handle_t::async_pread)
40
+ .def("async_pwrite", &deepspeed_aio_handle_t::async_pwrite)
41
+
42
+ .def("new_cpu_locked_tensor", &deepspeed_aio_handle_t::new_cpu_locked_tensor)
43
+ .def("free_cpu_locked_tensor", &deepspeed_aio_handle_t::free_cpu_locked_tensor)
44
+
45
+ .def("wait", &deepspeed_aio_handle_t::wait);
46
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_test/single_process_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "block_size": [
3
+ "128K",
4
+ "256K",
5
+ "1M"
6
+ ],
7
+ "queue_depth": [
8
+ 4,
9
+ 16,
10
+ 32
11
+ ],
12
+ "io_parallel": [
13
+ 1,
14
+ 2,
15
+ 4,
16
+ 8
17
+ ],
18
+ "single_submit": [
19
+ true,
20
+ false
21
+ ],
22
+ "overlap_events": [
23
+ true,
24
+ false
25
+ ],
26
+ "threads": [
27
+ 1
28
+ ]
29
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/adam/fused_adam.cpp ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "cpu_adam.h"
7
+
8
+ // C++ interface
9
+
10
+ void multi_tensor_adam(int chunk_size,
11
+ at::Tensor noop_flag,
12
+ std::vector<std::vector<at::Tensor>> tensor_lists, /*gpmv*/
13
+ const float lr,
14
+ const float beta1,
15
+ const float beta2,
16
+ const float epsilon,
17
+ const int step,
18
+ const int mode,
19
+ const int bias_correction,
20
+ const float weight_decay)
21
+ {
22
+ static bool initialized = false;
23
+ if (!initialized) {
24
+ create_adam_optimizer(0);
25
+ initialized = true;
26
+ }
27
+ for (int i = 0; i < tensor_lists[0].size(); i++) {
28
+ ds_adam_step(0,
29
+ step,
30
+ lr,
31
+ beta1,
32
+ beta2,
33
+ epsilon,
34
+ weight_decay,
35
+ bias_correction,
36
+ tensor_lists[1][i],
37
+ tensor_lists[0][i],
38
+ tensor_lists[2][i],
39
+ tensor_lists[3][i]);
40
+ }
41
+ }
42
+
43
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
44
+ {
45
+ m.def("multi_tensor_adam",
46
+ &multi_tensor_adam,
47
+ "Compute and apply gradient update to parameters for Adam optimizer");
48
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/ccl.cpp ADDED
@@ -0,0 +1,344 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <torch/extension.h>
7
+
8
+ #include <oneapi/ccl.hpp>
9
+ #include "shm.h"
10
+
11
+ // #define DO_PROFILE
12
+ #ifdef DO_PROFILE
13
+ #include <cfloat>
14
+ #include <chrono>
15
+ #endif
16
+
17
+ // Communication settings
18
+ static int world_rank = -1;
19
+ static int world_size = -1;
20
+
21
+ static std::set<int> _comm_ids;
22
+ static std::set<int> _colors;
23
+ static std::vector<ccl::communicator> _ccl_comms;
24
+ static ccl::shared_ptr_class<ccl::kvs> sub_kvs;
25
+ static std::map<std::vector<int>, int> group_to_comm_id;
26
+
27
+ ccl::communicator& _get_comm_from_group() { return _ccl_comms[0]; }
28
+ ccl::communicator& _get_comm_from_group(py::object group) { return _ccl_comms[0]; }
29
+ ccl::communicator& _get_comm_from_group(std::vector<int> ranks)
30
+ {
31
+ if (group_to_comm_id.find(ranks) != group_to_comm_id.end()) {
32
+ auto id = group_to_comm_id.find(ranks);
33
+ return _ccl_comms[id->second];
34
+ }
35
+ return _ccl_comms[0];
36
+ }
37
+
38
+ #define CCLCHECK(cmd) \
39
+ do { \
40
+ cmd; \
41
+ } while (0)
42
+
43
+ #define KVS_CREATE_SUCCESS 0
44
+ #define KVS_CREATE_FAILURE -1
45
+
46
+ static bool is_initialized = 0;
47
+
48
+ static ccl::shared_ptr_class<ccl::kvs> kvs;
49
+
50
+ static bool all_ranks_local_p = false;
51
+
52
+ void initialize(int size, int rank, torch::Tensor& kvs_data)
53
+ {
54
+ if (is_initialized) return;
55
+
56
+ // Check whether all ranks is on the same physical machine.
57
+ // If true, we will use an SHM based low latency allreduce
58
+
59
+ auto ls_string = std::getenv("LOCAL_SIZE");
60
+ int ls = 0;
61
+ if (ls_string != NULL) { ls = std::stoi(std::getenv("LOCAL_SIZE")); }
62
+
63
+ if (size >= 1 && size == ls) { all_ranks_local_p = true; }
64
+
65
+ world_size = size;
66
+ world_rank = rank;
67
+ is_initialized = 1;
68
+
69
+ ccl::kvs::address_type main_addr;
70
+
71
+ if (rank != 0) {
72
+ memcpy(main_addr.data(), kvs_data.data_ptr(), main_addr.size());
73
+ kvs = ccl::create_kvs(main_addr);
74
+ }
75
+
76
+ _ccl_comms.emplace_back(ccl::create_communicator(size, rank, kvs));
77
+
78
+ auto addr_string = std::getenv("MASTER_ADDR");
79
+ if (addr_string == NULL) { addr_string = ""; }
80
+ auto port_string = std::getenv("MASTER_PORT");
81
+ if (port_string == NULL) { port_string = ""; }
82
+
83
+ if (all_ranks_local_p) { shm_initialize(size, rank, addr_string, port_string); }
84
+ }
85
+
86
+ /*
87
+ rank == 0: create main kvs and return its address
88
+ rank == else: return an empty address
89
+ */
90
+ std::vector<uint8_t> get_kvs_addr(int rank)
91
+ {
92
+ if (rank == 0) {
93
+ kvs = ccl::create_main_kvs();
94
+ ccl::kvs::address_type main_addr = kvs->get_address();
95
+ auto ccl_kvs_addr = std::vector<uint8_t>(main_addr.begin(), main_addr.end());
96
+ return ccl_kvs_addr;
97
+ } else {
98
+ ccl::kvs::address_type main_addr;
99
+ auto ccl_kvs_addr = std::vector<uint8_t>(main_addr.begin(), main_addr.end());
100
+ return ccl_kvs_addr;
101
+ }
102
+ }
103
+
104
+ int get_rank(int group = 0) { return world_rank; }
105
+
106
+ int get_world_size(int group = 0) { return world_size; }
107
+
108
+ // Find the next ordered, unique value to a set. E.g. <0,1,2,7> --> 3
109
+ int next_unique_val(std::set<int> s)
110
+ {
111
+ std::set<int>::iterator itr;
112
+ // Base case. Add 0 to start of set.
113
+ if (s.empty() || *s.begin() != 0) {
114
+ return 0;
115
+ // second base case where s = {0} (the case of s = {n != 0} is caught above)
116
+ } else if (s.size() == 1) {
117
+ return 1;
118
+ } else {
119
+ int prev_val = *s.begin();
120
+ for (itr = std::next(s.begin()); itr != s.end(); itr++) {
121
+ if (*itr != prev_val + 1) { return prev_val + 1; }
122
+ prev_val = *itr;
123
+ }
124
+ return *(s.end()) + 1;
125
+ }
126
+ }
127
+
128
+ std::vector<uint8_t> get_sub_kvs_addr(bool first)
129
+ {
130
+ if (first) {
131
+ sub_kvs = ccl::create_main_kvs();
132
+ ccl::kvs::address_type main_addr = sub_kvs->get_address();
133
+ auto ccl_kvs_addr = std::vector<uint8_t>(main_addr.begin(), main_addr.end());
134
+ return ccl_kvs_addr;
135
+ } else {
136
+ ccl::kvs::address_type main_addr;
137
+ auto ccl_kvs_addr = std::vector<uint8_t>(main_addr.begin(), main_addr.end());
138
+ return ccl_kvs_addr;
139
+ }
140
+ }
141
+
142
+ void initialize_sub_comm(int size, int rank, torch::Tensor& kvs_data, std::vector<int> ranks)
143
+ {
144
+ ccl::kvs::address_type main_addr;
145
+ if (rank != 0) {
146
+ memcpy(main_addr.data(), kvs_data.data_ptr(), main_addr.size());
147
+ sub_kvs = ccl::create_kvs(main_addr);
148
+ }
149
+ _ccl_comms.push_back(ccl::create_communicator(size, rank, sub_kvs));
150
+ group_to_comm_id[ranks] = _ccl_comms.size() - 1;
151
+ }
152
+
153
+ ccl::datatype get_ccl_datatype(c10::ScalarType type)
154
+ {
155
+ ccl::datatype ccl_type;
156
+ switch (type) {
157
+ case c10::ScalarType::Int: ccl_type = ccl::datatype::int32; break;
158
+ case c10::ScalarType::Long: ccl_type = ccl::datatype::int64; break;
159
+ case c10::ScalarType::Float: ccl_type = ccl::datatype::float32; break;
160
+ case c10::ScalarType::Double: ccl_type = ccl::datatype::float64; break;
161
+ case c10::ScalarType::BFloat16: ccl_type = ccl::datatype::bfloat16; break;
162
+ case c10::ScalarType::Half: ccl_type = ccl::datatype::float16; break;
163
+ default: ccl_type = ccl::datatype::int8;
164
+ }
165
+ return ccl_type;
166
+ }
167
+
168
+ ccl::reduction get_ccl_reduce_op(py::object op, at::Tensor& input)
169
+ {
170
+ py::object ReduceOp = py::module_::import("deepspeed.comm").attr("ReduceOp");
171
+ if (!py::isinstance(op, ReduceOp)) {
172
+ throw std::runtime_error("Error: Op must be of type ReduceOp");
173
+ }
174
+
175
+ int op_val = py::int_(op.attr("value"));
176
+ ccl::reduction ccl_op;
177
+
178
+ if (input.scalar_type() == at::kBool) {
179
+ if (op_val == (int)py::int_(ReduceOp.attr("SUM").attr("value"))) {
180
+ // For bool tensors, map sum to max, which both represent a bitwise or.
181
+ // This is to prevent overflow issues with sum, since we use uint8 to
182
+ // represent a bool (see cclDataType mapping).
183
+ ccl_op = ccl::reduction::max;
184
+ } else if (op_val == (int)py::int_(ReduceOp.attr("AVG").attr("value"))) {
185
+ throw std::runtime_error("Error: For bool tensors, op must be of type ReduceOp");
186
+ }
187
+ }
188
+
189
+ if (op_val == (int)py::int_(ReduceOp.attr("SUM").attr("value"))) {
190
+ ccl_op = ccl::reduction::sum;
191
+ } else if (op_val == (int)py::int_(ReduceOp.attr("MIN").attr("value"))) {
192
+ ccl_op = ccl::reduction::min;
193
+ } else if (op_val == (int)py::int_(ReduceOp.attr("MAX").attr("value"))) {
194
+ ccl_op = ccl::reduction::max;
195
+ } else if (op_val == (int)py::int_(ReduceOp.attr("PRODUCT").attr("value"))) {
196
+ ccl_op = ccl::reduction::prod;
197
+ } else {
198
+ throw std::runtime_error("Error: Unrecognized ReduceOp type");
199
+ }
200
+ return ccl_op;
201
+ }
202
+
203
+ void broadcast(torch::Tensor& data, int src, std::vector<int> group, bool async_op)
204
+ {
205
+ CCLCHECK(ccl::broadcast(data.data_ptr(),
206
+ data.numel(),
207
+ get_ccl_datatype(data.scalar_type()),
208
+ src,
209
+ _get_comm_from_group(group))
210
+ .wait());
211
+ }
212
+
213
+ // TODO: implement torch's async_op behavior, document it.
214
+ void all_reduce(torch::Tensor& data, py::object op, std::vector<int> group, bool async_op)
215
+ {
216
+ CCLCHECK(ccl::allreduce(data.data_ptr(),
217
+ data.data_ptr(),
218
+ data.numel(),
219
+ get_ccl_datatype(data.scalar_type()),
220
+ get_ccl_reduce_op(op, data),
221
+ _get_comm_from_group(group))
222
+ .wait());
223
+ }
224
+
225
+ void all_reduce_caching(torch::Tensor& data,
226
+ py::object op,
227
+ std::string match_id,
228
+ std::vector<int> group,
229
+ bool async_op)
230
+ {
231
+ ccl::allreduce_attr attr = ccl::default_allreduce_attr;
232
+ auto match_str = ccl::v1::string(match_id);
233
+ attr.template set<ccl::operation_attr_id::to_cache>(true);
234
+ attr.template set<ccl::operation_attr_id::match_id>(match_str);
235
+ // To control this, use operation attribute and set true value for to_cache field and unique
236
+ // string (for example, tensor name) for match_id field. Note that:
237
+ // match_id should be the same for a specific communication operation across all ranks.
238
+ // If the same tensor is a part of different communication operations, match_id should have
239
+ // different values for each of these operations.
240
+ CCLCHECK(ccl::allreduce(data.data_ptr(),
241
+ data.data_ptr(),
242
+ data.numel(),
243
+ get_ccl_datatype(data.scalar_type()),
244
+ get_ccl_reduce_op(op, data),
245
+ _get_comm_from_group(group),
246
+ attr)
247
+ .wait());
248
+ }
249
+
250
+ void inference_all_reduce(torch::Tensor& data, py::object op)
251
+ {
252
+ #ifdef DO_PROFILE
253
+ static double total_time = 0.0;
254
+ static double total_time_sq = 0.0;
255
+ static int count = -16; // warmup
256
+ static double max_time = 0.0;
257
+ static double min_time = DBL_MAX;
258
+ // make sure all rank reach this point before measuring time
259
+ // turn on this if you suspect each rank didn't reach here at the same time (stragger)
260
+ // if (all_ranks_local_p) {
261
+ // barrier_wait(0, world_size);
262
+ //}
263
+ auto start = std::chrono::system_clock::now();
264
+ #endif
265
+
266
+ static py::object ReduceOp = py::module_::import("deepspeed.comm").attr("ReduceOp");
267
+ static auto ReduceOpSum = (int)py::int_(ReduceOp.attr("SUM").attr("value"));
268
+
269
+ assert(py::int_(op.attr("value")) == ReduceOpSum);
270
+
271
+ auto numel = data.numel();
272
+
273
+ int data_size = 0;
274
+ bool data_type_fallback = false;
275
+
276
+ switch (data.scalar_type()) {
277
+ case c10::ScalarType::BFloat16: data_size = numel * 2; break;
278
+ case c10::ScalarType::Float: data_size = numel * 4; break;
279
+ default: data_type_fallback = true;
280
+ }
281
+
282
+ if (data_type_fallback || !all_ranks_local_p) {
283
+ // fallback to oneccl allreduce
284
+ CCLCHECK(ccl::allreduce(data.data_ptr(),
285
+ data.data_ptr(),
286
+ data.numel(),
287
+ get_ccl_datatype(data.scalar_type()),
288
+ get_ccl_reduce_op(op, data),
289
+ _get_comm_from_group())
290
+ .wait());
291
+ } else {
292
+ all_reduce_outer_loop(data, numel, data_size);
293
+ }
294
+
295
+ #ifdef DO_PROFILE
296
+ auto end = std::chrono::system_clock::now();
297
+ count++;
298
+ if (count > 0) {
299
+ double elapsed = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
300
+ if (elapsed > max_time) { max_time = elapsed; }
301
+ if (elapsed < min_time) { min_time = elapsed; }
302
+ total_time += elapsed;
303
+ total_time_sq += elapsed * elapsed;
304
+ if (world_rank == 0 && count == 1000) {
305
+ auto avg = total_time / count;
306
+ auto sd =
307
+ sqrt(total_time_sq / count - total_time * total_time / (count * count)) / avg * 100;
308
+ printf(" C++ kernel\t\t %.2f\t %.2f\t%.2f\t %.2f\n",
309
+ min_time,
310
+ max_time,
311
+ total_time / count,
312
+ sd);
313
+ }
314
+ }
315
+ #endif
316
+ }
317
+
318
+ void barrier(std::vector<int> group, bool async_op)
319
+ {
320
+ CCLCHECK(ccl::barrier(_get_comm_from_group(group)).wait());
321
+ }
322
+
323
+ std::vector<std::string> get_available_coll()
324
+ {
325
+ std::vector<std::string> colls{
326
+ "broadcast", "all_reduce", "inference_all_reduce", "all_reduce_caching", "barrier"};
327
+ return colls;
328
+ }
329
+
330
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
331
+ {
332
+ m.def("get_kvs_addr", &get_kvs_addr, "create and get main kvs addr");
333
+ m.def("initialize", &initialize, "ccl initialize");
334
+ m.def("get_rank", &get_rank, "get rank");
335
+ m.def("get_world_size", &get_world_size, "get world size");
336
+ m.def("broadcast", &broadcast, "ccl broadcast");
337
+ m.def("all_reduce", &all_reduce, "ccl all_reduce");
338
+ m.def("inference_all_reduce", &inference_all_reduce, "low latency all_reduce implementation");
339
+ m.def("all_reduce_caching", &all_reduce_caching, "ccl all_reduce with caching");
340
+ m.def("barrier", &barrier, "barrier");
341
+ m.def("initialize_sub_comm", &initialize_sub_comm, "initialize_sub_comm");
342
+ m.def("get_sub_kvs_addr", &get_sub_kvs_addr, "get_sub_kvs_addr");
343
+ m.def("get_available_coll", &get_available_coll, "get_available_coll");
344
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/shm.cpp ADDED
@@ -0,0 +1,686 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <torch/extension.h>
7
+
8
+ #include <ATen/ATen.h>
9
+ #include <fcntl.h>
10
+ #include <immintrin.h>
11
+ #include <semaphore.h>
12
+ #include <sys/mman.h>
13
+ #include "shm.h"
14
+
15
+ // #define DO_PROFILE
16
+ #ifdef DO_PROFILE
17
+ #include <cfloat>
18
+ #include <chrono>
19
+ #endif
20
+
21
+ // states for collectives
22
+ enum coll_state {
23
+ coll_begin = 0,
24
+ coll_allreduce_naive__copy_in_done, // this state is for rank != 0
25
+ coll_allreduce_naive__reduce_done, // this state is for rank == 0
26
+ coll_allreduce_naive__copy_out_done, // this state is for rank != 0
27
+ };
28
+
29
+ // SHM building blocks
30
+ struct SharedData {
31
+ const char* name;
32
+ int descriptor;
33
+ void* bytes;
34
+ size_t nbytes;
35
+ };
36
+
37
+ void shared_open(SharedData* data, const char* name, size_t nbytes)
38
+ {
39
+ int d = shm_open(name, O_RDWR, S_IRUSR | S_IWUSR);
40
+ if (d != -1) {
41
+ void* bytes = mmap(NULL, nbytes, PROT_READ | PROT_WRITE, MAP_SHARED, d, 0);
42
+ data->name = name;
43
+ data->descriptor = d;
44
+ data->bytes = bytes;
45
+ data->nbytes = nbytes;
46
+ } else {
47
+ if (errno != ENOENT) {
48
+ // don't print if shm can not be found because we want to loop over from
49
+ // caller again until the other ranks created the shm
50
+ printf("shared_open %s failed, errno=%d\n", name, errno);
51
+ }
52
+ data->descriptor = -1;
53
+ }
54
+ }
55
+
56
+ void shared_create(SharedData* data, const char* name, void* bytes, size_t nbytes)
57
+ {
58
+ int d = shm_open(name, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR);
59
+ if (d != -1) {
60
+ if (nbytes = write(d, bytes, nbytes)) { shared_open(data, name, nbytes); }
61
+ } else {
62
+ printf("shared_create %s failed\n", name);
63
+ }
64
+ }
65
+
66
+ void shared_close(SharedData* data)
67
+ {
68
+ if (data->descriptor != -1) {
69
+ munmap(data->bytes, data->nbytes);
70
+ shm_unlink(data->name);
71
+ }
72
+ }
73
+
74
+ // SHM based allreduce helper functions
75
+ // buffer that holds shm name
76
+ #define NAME_BUF_SIZE 1000
77
+ #define MAX_BUF_SIZE 1048576 * 32
78
+ #define NAIVE_ALLREDUCE_THRESHOLD 1048576
79
+ #define SHM_BUFFER_NAME "deepspeed_allreduce_buffer"
80
+ struct allreduce_workspace {
81
+ enum coll_state state;
82
+ sem_t mutex;
83
+ sem_t turnstile1;
84
+ sem_t turnstile2;
85
+ int counter;
86
+ char buffer[MAX_BUF_SIZE];
87
+ };
88
+ struct allreduce_workspace** workspace;
89
+
90
+ void wait_buffer_state_until(int index, enum coll_state state)
91
+ {
92
+ volatile enum coll_state* state_ptr = &(workspace[index]->state);
93
+
94
+ while (*state_ptr != state)
95
+ ;
96
+ }
97
+
98
+ void wait_buffer_state_until_range(int index, enum coll_state start, int size)
99
+ {
100
+ volatile enum coll_state* state_ptr = &(workspace[index]->state);
101
+ enum coll_state end = (enum coll_state)(start + size);
102
+
103
+ while (1) {
104
+ volatile enum coll_state cur_state = *state_ptr;
105
+ if (cur_state >= start and cur_state < end) break;
106
+ }
107
+ }
108
+
109
+ void wait_buffer_state_until_not(int index, enum coll_state state)
110
+ {
111
+ volatile enum coll_state* state_ptr = &(workspace[index]->state);
112
+
113
+ while (*state_ptr == state)
114
+ ;
115
+ }
116
+
117
+ void barrier_wait(int root_idx, int num_ranks)
118
+ {
119
+ // Phase 1: Wait for all threads to enter the barrier
120
+ auto shared = workspace[root_idx];
121
+ sem_wait(&shared->mutex);
122
+ shared->counter++;
123
+ if (shared->counter == num_ranks) {
124
+ for (int i = 0; i < num_ranks; ++i) { sem_post(&shared->turnstile1); }
125
+ }
126
+ sem_post(&shared->mutex);
127
+ sem_wait(&shared->turnstile1);
128
+
129
+ // Phase 2: Wait for all threads to exit the barrier
130
+ sem_wait(&shared->mutex);
131
+ shared->counter--;
132
+ if (shared->counter == 0) {
133
+ for (int i = 0; i < num_ranks; ++i) { sem_post(&shared->turnstile2); }
134
+ }
135
+ sem_post(&shared->mutex);
136
+ sem_wait(&shared->turnstile2);
137
+ }
138
+
139
+ __m512 cvt_bf16_to_fp32(const __m256i src) __attribute__((target("avx512bw")));
140
+ inline __m512 cvt_bf16_to_fp32(const __m256i src)
141
+ {
142
+ auto y = _mm512_cvtepu16_epi32(src);
143
+ return _mm512_castsi512_ps(_mm512_bslli_epi128(y, 2));
144
+ }
145
+
146
+ inline __m256i cvt_fp32_to_bf16(const __m512 src) __attribute__((target("avx512bw")));
147
+ inline __m256i cvt_fp32_to_bf16(const __m512 src)
148
+ {
149
+ __m512i value = _mm512_castps_si512(src);
150
+ __m512i nan = _mm512_set1_epi32(0xffff);
151
+ auto mask_value = _mm512_cmp_ps_mask(src, src, _CMP_ORD_Q);
152
+ __m512i ones = _mm512_set1_epi32(0x1);
153
+ __m512i vec_bias = _mm512_set1_epi32(0x7fff);
154
+ // uint32_t lsb = (input >> 16) & 1;
155
+ auto t_value = _mm512_and_si512(_mm512_srli_epi32(value, 16), ones);
156
+ // uint32_t rounding_bias = 0x7fff + lsb;
157
+ t_value = _mm512_add_epi32(t_value, vec_bias);
158
+ // input += rounding_bias;
159
+ t_value = _mm512_add_epi32(t_value, value);
160
+ // input = input >> 16;
161
+ t_value = _mm512_srli_epi32(t_value, 16);
162
+ // Check NaN before converting back to bf16
163
+ t_value = _mm512_mask_blend_epi32(mask_value, nan, t_value);
164
+ return _mm512_cvtusepi32_epi16(t_value);
165
+ }
166
+
167
+ void reduce_2_bf16_buffers_iio(int num_elements, void* in0, void* in1, void* out)
168
+ __attribute__((target("avx512bw")));
169
+
170
+ void reduce_bf16_buffers(int start_elements,
171
+ int num_elements,
172
+ int num_buffers,
173
+ int to_buffer_idx,
174
+ struct allreduce_workspace** workspace)
175
+ __attribute__((target("avx512bw")));
176
+
177
+ void reduce_2_fp32_buffers_iio(int num_elements, void* in0, void* in1, void* out)
178
+ __attribute__((target("avx512bw")));
179
+
180
+ void reduce_fp32_buffers(int start_elements,
181
+ int num_elements,
182
+ int num_buffers,
183
+ int to_buffer_idx,
184
+ struct allreduce_workspace** workspace)
185
+ __attribute__((target("avx512bw")));
186
+
187
+ // N_REDUCE_LIMIT is the number of buffers that can be reduced together in one shot.
188
+ // Compared with do N-1 2-reduces which needs 2*(N-1) read and N-1 write,
189
+ // N-reduce only needs N read and 1 write, this saves 2/3 memory bandwidth.
190
+ // When increase N_REDUCE_LIMIT to a bigger number, do the following steps
191
+ // 1. Extend REPEAT_<X> macros list down below
192
+ // 2. Extend switch cases which call "REPEAT(X, ...)" down below
193
+ #define N_REDUCE_LIMIT 16
194
+
195
+ void reduce_all_buffers(struct allreduce_workspace** workspace,
196
+ int start_elements,
197
+ int num_elements,
198
+ c10::ScalarType scalar_type,
199
+ int num_buffers,
200
+ int to_buffer_idx)
201
+ {
202
+ switch (scalar_type) {
203
+ case c10::ScalarType::BFloat16:
204
+ if (num_buffers > 2 && num_buffers <= N_REDUCE_LIMIT) {
205
+ reduce_bf16_buffers(
206
+ start_elements, num_elements, num_buffers, to_buffer_idx, workspace);
207
+ } else {
208
+ for (int i = 0; i < num_buffers; i++) {
209
+ if (i == to_buffer_idx) continue;
210
+ reduce_2_bf16_buffers_iio(
211
+ num_elements,
212
+ workspace[i]->buffer + start_elements * 2,
213
+ workspace[to_buffer_idx]->buffer + start_elements * 2,
214
+ workspace[to_buffer_idx]->buffer + start_elements * 2);
215
+ }
216
+ }
217
+ break;
218
+ case c10::ScalarType::Float:
219
+ if (num_buffers > 2 && num_buffers <= N_REDUCE_LIMIT) {
220
+ reduce_fp32_buffers(
221
+ start_elements, num_elements, num_buffers, to_buffer_idx, workspace);
222
+ } else {
223
+ for (int i = 0; i < num_buffers; i++) {
224
+ if (i == to_buffer_idx) continue;
225
+ reduce_2_fp32_buffers_iio(
226
+ num_elements,
227
+ workspace[i]->buffer + start_elements * 4,
228
+ workspace[to_buffer_idx]->buffer + start_elements * 4,
229
+ workspace[to_buffer_idx]->buffer + start_elements * 4);
230
+ }
231
+ }
232
+ break;
233
+ default: assert(!"Should not get here");
234
+ }
235
+ }
236
+
237
+ #define REPEAT(N, x) REPEAT_##N(x)
238
+ #define REPEAT_1(x) x(1)
239
+ #define REPEAT_2(x) \
240
+ REPEAT_1(x); \
241
+ x(2)
242
+ #define REPEAT_3(x) \
243
+ REPEAT_2(x); \
244
+ x(3)
245
+ #define REPEAT_4(x) \
246
+ REPEAT_3(x); \
247
+ x(4)
248
+ #define REPEAT_5(x) \
249
+ REPEAT_4(x); \
250
+ x(5)
251
+ #define REPEAT_6(x) \
252
+ REPEAT_5(x); \
253
+ x(6)
254
+ #define REPEAT_7(x) \
255
+ REPEAT_6(x); \
256
+ x(7)
257
+ #define REPEAT_8(x) \
258
+ REPEAT_7(x); \
259
+ x(8)
260
+ #define REPEAT_9(x) \
261
+ REPEAT_8(x); \
262
+ x(9)
263
+ #define REPEAT_10(x) \
264
+ REPEAT_9(x); \
265
+ x(10)
266
+ #define REPEAT_11(x) \
267
+ REPEAT_10(x); \
268
+ x(11)
269
+ #define REPEAT_12(x) \
270
+ REPEAT_11(x); \
271
+ x(12)
272
+ #define REPEAT_13(x) \
273
+ REPEAT_12(x); \
274
+ x(13)
275
+ #define REPEAT_14(x) \
276
+ REPEAT_13(x); \
277
+ x(14)
278
+ #define REPEAT_15(x) \
279
+ REPEAT_14(x); \
280
+ x(15)
281
+
282
+ #define CVT_ADD_BF16(x) \
283
+ do { \
284
+ auto in##x##_val = \
285
+ cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(workspace[x]->buffer + i))); \
286
+ inout_val = _mm512_add_ps(inout_val, in##x##_val); \
287
+ } while (0)
288
+
289
+ // Reduce functions down below use vectorized algorithm, the number of bytes processed each
290
+ // iteration depends on vector length. 256bit vector ==> 32 bytes, 512bit vector ==> 64 bytes
291
+ // If you change implementation of reduce_2_bf16_buffers_iio or reduce_2_fp32_buffers_iio, check
292
+ // whether this number needs to be changed
293
+ #define VECTOR_LENGTH_IN_BYTES 32
294
+
295
+ void reduce_bf16_buffers(int start_elements,
296
+ int num_elements,
297
+ int num_buffers,
298
+ int to_buffer_idx,
299
+ struct allreduce_workspace** workspace)
300
+ {
301
+ const int element_size = 2;
302
+ const int vector_length = VECTOR_LENGTH_IN_BYTES / element_size;
303
+ int main_elements = num_elements - (num_elements % vector_length);
304
+ int remain_elements = num_elements % vector_length;
305
+
306
+ // process aligned part
307
+ #pragma omp parallel for
308
+ for (int i = start_elements * element_size; i < (start_elements + main_elements) * element_size;
309
+ i += VECTOR_LENGTH_IN_BYTES) {
310
+ auto inout_val = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(workspace[0]->buffer + i)));
311
+ switch (num_buffers) {
312
+ case 16: REPEAT(15, CVT_ADD_BF16); break;
313
+ case 15: REPEAT(14, CVT_ADD_BF16); break;
314
+ case 14: REPEAT(13, CVT_ADD_BF16); break;
315
+ case 13: REPEAT(12, CVT_ADD_BF16); break;
316
+ case 12: REPEAT(11, CVT_ADD_BF16); break;
317
+ case 11: REPEAT(10, CVT_ADD_BF16); break;
318
+ case 10: REPEAT(9, CVT_ADD_BF16); break;
319
+ case 9: REPEAT(8, CVT_ADD_BF16); break;
320
+ case 8: REPEAT(7, CVT_ADD_BF16); break;
321
+ case 7: REPEAT(6, CVT_ADD_BF16); break;
322
+ case 6: REPEAT(5, CVT_ADD_BF16); break;
323
+ case 5: REPEAT(4, CVT_ADD_BF16); break;
324
+ case 4: REPEAT(3, CVT_ADD_BF16); break;
325
+ case 3: REPEAT(2, CVT_ADD_BF16); break;
326
+ default: assert(!"Should not get here.");
327
+ }
328
+ _mm256_storeu_si256((__m256i*)(workspace[to_buffer_idx]->buffer + i),
329
+ cvt_fp32_to_bf16(inout_val));
330
+ }
331
+
332
+ // process remaining part
333
+ int i = (start_elements + main_elements) * element_size;
334
+ while (remain_elements > 0) {
335
+ float val = 0.0f;
336
+ for (int j = 0; j < num_buffers; j++) { val += *(at::BFloat16*)(workspace[j]->buffer + i); }
337
+ *(at::BFloat16*)(workspace[to_buffer_idx]->buffer + i) = val;
338
+ remain_elements--;
339
+ i += element_size;
340
+ }
341
+ }
342
+
343
+ void reduce_2_bf16_buffers_iio(int num_elements, void* in0, void* in1, void* out)
344
+ {
345
+ const int element_size = 2;
346
+ const int vector_length = VECTOR_LENGTH_IN_BYTES / element_size;
347
+ int main_elements = num_elements - (num_elements % vector_length);
348
+ int remain_elements = num_elements % vector_length;
349
+
350
+ // process aligned part
351
+ #pragma omp parallel for
352
+ for (int i = 0; i < main_elements * element_size; i += VECTOR_LENGTH_IN_BYTES) {
353
+ auto in0_val = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)((char*)in0 + i)));
354
+ auto in1_val = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)((char*)in1 + i)));
355
+ auto out_val = _mm512_add_ps(in0_val, in1_val);
356
+ _mm256_storeu_si256((__m256i*)((char*)out + i), cvt_fp32_to_bf16(out_val));
357
+ }
358
+
359
+ // process remaining part
360
+ int i = main_elements * element_size;
361
+ while (remain_elements > 0) {
362
+ float in0_val = *((at::BFloat16*)((char*)in0 + i));
363
+ float in1_val = *((at::BFloat16*)((char*)in1 + i));
364
+ *((at::BFloat16*)((char*)out + i)) = in0_val + in1_val;
365
+ remain_elements--;
366
+ i += element_size;
367
+ }
368
+ }
369
+
370
+ #define CVT_ADD_F32(x) \
371
+ do { \
372
+ auto in##x##_val = _mm256_loadu_ps((float*)(workspace[x]->buffer + i)); \
373
+ inout_val = _mm256_add_ps(inout_val, in##x##_val); \
374
+ } while (0)
375
+
376
+ void reduce_fp32_buffers(int start_elements,
377
+ int num_elements,
378
+ int num_buffers,
379
+ int to_buffer_idx,
380
+ struct allreduce_workspace** workspace)
381
+ {
382
+ const int element_size = 4;
383
+ const int vector_length = VECTOR_LENGTH_IN_BYTES / element_size;
384
+ int main_elements = num_elements - (num_elements % vector_length);
385
+ int remain_elements = num_elements % vector_length;
386
+
387
+ // process aligned part
388
+ #pragma omp parallel for
389
+ for (int i = start_elements * element_size; i < (start_elements + main_elements) * element_size;
390
+ i += VECTOR_LENGTH_IN_BYTES) {
391
+ auto inout_val = _mm256_loadu_ps((float*)(workspace[0]->buffer + i));
392
+ switch (num_buffers) {
393
+ case 16: REPEAT(15, CVT_ADD_F32); break;
394
+ case 15: REPEAT(14, CVT_ADD_F32); break;
395
+ case 14: REPEAT(13, CVT_ADD_F32); break;
396
+ case 13: REPEAT(12, CVT_ADD_F32); break;
397
+ case 12: REPEAT(11, CVT_ADD_F32); break;
398
+ case 11: REPEAT(10, CVT_ADD_F32); break;
399
+ case 10: REPEAT(9, CVT_ADD_F32); break;
400
+ case 9: REPEAT(8, CVT_ADD_F32); break;
401
+ case 8: REPEAT(7, CVT_ADD_F32); break;
402
+ case 7: REPEAT(6, CVT_ADD_F32); break;
403
+ case 6: REPEAT(5, CVT_ADD_F32); break;
404
+ case 5: REPEAT(4, CVT_ADD_F32); break;
405
+ case 4: REPEAT(3, CVT_ADD_F32); break;
406
+ case 3: REPEAT(2, CVT_ADD_F32); break;
407
+ default: assert(!"Should not get here.");
408
+ }
409
+ _mm256_storeu_ps((float*)(workspace[to_buffer_idx]->buffer + i), inout_val);
410
+ }
411
+
412
+ // process remaining part
413
+ int i = (start_elements + main_elements) * element_size;
414
+ while (remain_elements > 0) {
415
+ float val = 0.0f;
416
+ for (int j = 0; j < num_buffers; j++) { val += *(float*)(workspace[j]->buffer + i); }
417
+ *(float*)(workspace[to_buffer_idx]->buffer + i) = val;
418
+ remain_elements--;
419
+ i += element_size;
420
+ }
421
+ }
422
+
423
+ void reduce_2_fp32_buffers_iio(int num_elements, void* in0, void* in1, void* out)
424
+ {
425
+ const int element_size = 4;
426
+ const int vector_length = VECTOR_LENGTH_IN_BYTES / element_size;
427
+ int main_elements = num_elements - (num_elements % vector_length);
428
+ int remain_elements = num_elements % vector_length;
429
+
430
+ // process aligned part
431
+ #pragma omp parallel for
432
+ for (int i = 0; i < main_elements * element_size; i += VECTOR_LENGTH_IN_BYTES) {
433
+ auto in0_val = _mm256_loadu_ps((float*)((char*)in0 + i));
434
+ auto in1_val = _mm256_loadu_ps((float*)((char*)in1 + i));
435
+ auto out_val = _mm256_add_ps(in0_val, in1_val);
436
+ _mm256_storeu_ps((float*)((char*)out + i), out_val);
437
+ }
438
+
439
+ // process remaining part
440
+ int i = main_elements * element_size;
441
+ while (remain_elements > 0) {
442
+ float in0_val = *((float*)((char*)in0 + i));
443
+ float in1_val = *((float*)((char*)in1 + i));
444
+ *((float*)((char*)out + i)) = in0_val + in1_val;
445
+ remain_elements--;
446
+ i += element_size;
447
+ }
448
+ }
449
+
450
+ static bool is_initialized = 0;
451
+ static int world_size;
452
+ static int world_rank;
453
+
454
+ void shm_initialize(int size, int rank, char* addr_string, char* port_string)
455
+ {
456
+ if (is_initialized) return;
457
+ is_initialized = 1;
458
+
459
+ world_size = size;
460
+ world_rank = rank;
461
+
462
+ char shm_name_prefix[NAME_BUF_SIZE];
463
+ char shm_name[NAME_BUF_SIZE];
464
+ snprintf(shm_name_prefix,
465
+ NAME_BUF_SIZE,
466
+ "%s_%d_%s_%s",
467
+ SHM_BUFFER_NAME,
468
+ getuid(),
469
+ addr_string,
470
+ port_string);
471
+ // create shared workspace for SHM based allreduce
472
+ SharedData allreduce_buffer;
473
+ // allocate workspace_buf for current rank
474
+ struct allreduce_workspace* workspace_buf;
475
+ struct allreduce_workspace* workspace_buf_other;
476
+ workspace_buf = (struct allreduce_workspace*)malloc(sizeof(struct allreduce_workspace));
477
+ snprintf(shm_name, NAME_BUF_SIZE, "%s_%d", shm_name_prefix, rank);
478
+ shared_create(&allreduce_buffer, shm_name, workspace_buf, sizeof(struct allreduce_workspace));
479
+ workspace_buf = (struct allreduce_workspace*)allreduce_buffer.bytes;
480
+ workspace_buf->state = coll_begin;
481
+
482
+ // create the workspace pointer list
483
+ workspace = (struct allreduce_workspace**)malloc(size * sizeof(struct allreduce_workspace*));
484
+
485
+ // map shm of all ranks
486
+ for (int i = 0; i < size; i++) {
487
+ if (i != rank) {
488
+ snprintf(shm_name, NAME_BUF_SIZE, "%s_%d", shm_name_prefix, i);
489
+ // printf("open %s, %d\n", shm_name, rank);
490
+ do {
491
+ shared_open(&allreduce_buffer, shm_name, sizeof(struct allreduce_workspace));
492
+ } while (allreduce_buffer.descriptor == -1 && errno == ENOENT);
493
+ workspace_buf_other = (struct allreduce_workspace*)allreduce_buffer.bytes;
494
+ workspace[i] = workspace_buf_other;
495
+ } else {
496
+ workspace[i] = workspace_buf;
497
+ workspace_buf->counter = 0;
498
+ sem_init(&workspace_buf->mutex, 1, 1);
499
+ sem_init(&workspace_buf->turnstile1, 1, 0);
500
+ sem_init(&workspace_buf->turnstile2, 1, 0);
501
+ }
502
+ }
503
+ }
504
+
505
+ static void parallel_memcpy(void* to, void* from, size_t n_bytes)
506
+ __attribute__((target("avx512bw")));
507
+ static void parallel_memcpy(void* to, void* from, size_t n_bytes)
508
+ {
509
+ auto aligned_bytes = n_bytes - (n_bytes % VECTOR_LENGTH_IN_BYTES);
510
+ // process aligned part
511
+ #pragma omp parallel for
512
+ for (int i = 0; i < aligned_bytes; i += VECTOR_LENGTH_IN_BYTES) {
513
+ auto val = _mm256_loadu_si256((__m256i*)((char*)from + i));
514
+ _mm256_storeu_si256((__m256i*)((char*)to + i), val);
515
+ }
516
+
517
+ // process remaining part
518
+ for (int i = aligned_bytes; i < n_bytes; i++) { *((char*)to + i) = *((char*)from + i); }
519
+ }
520
+
521
+ #define positive_mod(num, mod) ((((num) % (mod)) + (mod)) % (mod))
522
+ #define rank_mod(rank) positive_mod(rank, world_size)
523
+ size_t slice_size(size_t chunk_el, int slice_idx)
524
+ {
525
+ size_t slice_size = chunk_el / world_size;
526
+ return slice_idx == world_size - 1 ? slice_size + (chunk_el % world_size) : slice_size;
527
+ }
528
+
529
+ char* slice_data(char* data_ptr, size_t chunk_el, int el_size, int slice_idx)
530
+ {
531
+ size_t slice_size = chunk_el / world_size;
532
+ size_t el_offset = slice_size * slice_idx;
533
+ return data_ptr + el_offset * el_size;
534
+ }
535
+
536
+ size_t slice_el_start(size_t chunk_el, int slice_idx)
537
+ {
538
+ size_t slice_size = chunk_el / world_size;
539
+ return slice_size * slice_idx;
540
+ }
541
+
542
+ void naive_all_reduce(char* data_ptr,
543
+ c10::ScalarType scalar_type,
544
+ size_t chunk_size,
545
+ size_t chunk_el)
546
+ {
547
+ parallel_memcpy(workspace[world_rank]->buffer, data_ptr, chunk_size);
548
+ std::atomic_thread_fence(std::memory_order_release);
549
+ workspace[world_rank]->state = coll_allreduce_naive__copy_in_done;
550
+
551
+ if (world_rank == 0) {
552
+ // compute allreduce result on rank 0
553
+ for (int i = 1; i < world_size; i++) {
554
+ // wait until the other rank copy the buffer
555
+ wait_buffer_state_until(i, coll_allreduce_naive__copy_in_done);
556
+ }
557
+ reduce_all_buffers(workspace, 0, chunk_el, scalar_type, world_size, 0);
558
+ std::atomic_thread_fence(std::memory_order_release);
559
+ workspace[world_rank]->state = coll_allreduce_naive__reduce_done;
560
+ parallel_memcpy(data_ptr, workspace[0]->buffer, chunk_size);
561
+ }
562
+ if (world_rank != 0) {
563
+ wait_buffer_state_until(0, coll_allreduce_naive__reduce_done);
564
+ parallel_memcpy(data_ptr, workspace[0]->buffer, chunk_size);
565
+ std::atomic_thread_fence(std::memory_order_release);
566
+ workspace[world_rank]->state = coll_allreduce_naive__copy_out_done;
567
+ }
568
+ if (world_rank == 0) {
569
+ for (int i = 1; i < world_size; i++) {
570
+ wait_buffer_state_until(i, coll_allreduce_naive__copy_out_done);
571
+ }
572
+ std::atomic_thread_fence(std::memory_order_release);
573
+ workspace[world_rank]->state = coll_begin;
574
+ }
575
+ if (world_rank != 0) {
576
+ // if rank 0 spin too fast it could be in state 1 of next allreduce
577
+ // in this case wait_buffer_state_until(0, 0) may cause deadlock
578
+ // what we are certain is when rank 0 finishes the state won't be 2
579
+ wait_buffer_state_until_not(0, coll_allreduce_naive__reduce_done);
580
+ workspace[world_rank]->state = coll_begin;
581
+ }
582
+ }
583
+
584
+ // naive allreduce distributed, each rank do naive reduce on its slice
585
+ void distributed_naive_reduce(char* data_ptr,
586
+ c10::ScalarType scalar_type,
587
+ size_t chunk_size,
588
+ size_t chunk_el)
589
+ {
590
+ #ifdef DO_PROFILE
591
+ static double total_t1_t0 = 0.0;
592
+ static double total_t2_t1 = 0.0;
593
+ static double total_t3_t2 = 0.0;
594
+ static double total_t4_t3 = 0.0;
595
+ static double total_t5_t4 = 0.0;
596
+ static int count = -16; // warmup
597
+ auto t0 = std::chrono::system_clock::now();
598
+ #endif
599
+
600
+ int data_size = chunk_size / chunk_el;
601
+ parallel_memcpy(workspace[world_rank]->buffer, data_ptr, chunk_size);
602
+ std::atomic_thread_fence(std::memory_order_release);
603
+ workspace[world_rank]->state = coll_allreduce_naive__copy_in_done;
604
+
605
+ #ifdef DO_PROFILE
606
+ auto t1 = std::chrono::system_clock::now();
607
+ #endif
608
+
609
+ for (int i = 0; i < world_size; i++) {
610
+ // wait until all the other ranks copy the buffer
611
+ wait_buffer_state_until_range(i, coll_allreduce_naive__copy_in_done, 2);
612
+ }
613
+
614
+ #ifdef DO_PROFILE
615
+ auto t2 = std::chrono::system_clock::now();
616
+ #endif
617
+
618
+ // reduce scatter
619
+ reduce_all_buffers(workspace,
620
+ slice_el_start(chunk_el, world_rank),
621
+ slice_size(chunk_el, world_rank),
622
+ scalar_type,
623
+ world_size,
624
+ world_rank);
625
+ std::atomic_thread_fence(std::memory_order_release);
626
+ workspace[world_rank]->state = coll_allreduce_naive__reduce_done;
627
+
628
+ #ifdef DO_PROFILE
629
+ auto t3 = std::chrono::system_clock::now();
630
+ #endif
631
+
632
+ for (int i = 0; i < world_size; i++) {
633
+ int rank = (i + world_rank) % world_size;
634
+ // wait until the other rank reduce the buffer
635
+ wait_buffer_state_until_range(rank, coll_allreduce_naive__reduce_done, 2);
636
+ parallel_memcpy(slice_data(data_ptr, chunk_el, data_size, rank),
637
+ slice_data(workspace[rank]->buffer, chunk_el, chunk_size / chunk_el, rank),
638
+ slice_size(chunk_el, rank) * data_size);
639
+ }
640
+ std::atomic_thread_fence(std::memory_order_release);
641
+ workspace[world_rank]->state = coll_allreduce_naive__copy_out_done;
642
+
643
+ #ifdef DO_PROFILE
644
+ auto t4 = std::chrono::system_clock::now();
645
+ #endif
646
+
647
+ for (int i = 0; i < world_size; i++) {
648
+ wait_buffer_state_until_not(i, coll_allreduce_naive__reduce_done);
649
+ }
650
+
651
+ std::atomic_thread_fence(std::memory_order_release);
652
+ workspace[world_rank]->state = coll_begin;
653
+
654
+ #ifdef DO_PROFILE
655
+ auto t5 = std::chrono::system_clock::now();
656
+ count++;
657
+ if (count > 0) {
658
+ total_t1_t0 += std::chrono::duration_cast<std::chrono::microseconds>(t1 - t0).count();
659
+ total_t2_t1 += std::chrono::duration_cast<std::chrono::microseconds>(t2 - t1).count();
660
+ total_t3_t2 += std::chrono::duration_cast<std::chrono::microseconds>(t3 - t2).count();
661
+ total_t4_t3 += std::chrono::duration_cast<std::chrono::microseconds>(t4 - t3).count();
662
+ total_t5_t4 += std::chrono::duration_cast<std::chrono::microseconds>(t5 - t4).count();
663
+ if (world_rank == 0 && count == 1000) {
664
+ printf("distributed_naive_reduce time breakdown:\n");
665
+ printf("\tcopy input buffer: %.2f\n", total_t1_t0 / count);
666
+ printf("\twait for copy: %.2f\n", total_t2_t1 / count);
667
+ printf("\treduce: %.2f\n", total_t3_t2 / count);
668
+ printf("\tcopy buffer to output: %.2f\n", total_t4_t3 / count);
669
+ printf("\twait finish: %.2f\n", total_t5_t4 / count);
670
+ }
671
+ }
672
+ #endif
673
+ }
674
+
675
+ void all_reduce_outer_loop(torch::Tensor& data, size_t numel, int data_size)
676
+ {
677
+ for (int offset = 0; offset < data_size; offset += MAX_BUF_SIZE) {
678
+ auto data_ptr = ((char*)(data.data_ptr()) + offset);
679
+ size_t chunk_size = data_size - offset > MAX_BUF_SIZE ? MAX_BUF_SIZE : data_size - offset;
680
+ size_t chunk_el = chunk_size / (data_size / numel);
681
+ if (chunk_size < NAIVE_ALLREDUCE_THRESHOLD)
682
+ naive_all_reduce(data_ptr, data.scalar_type(), chunk_size, chunk_el);
683
+ else
684
+ distributed_naive_reduce(data_ptr, data.scalar_type(), chunk_size, chunk_el);
685
+ }
686
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/shm.h ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #ifndef __SHM_COLLECTIVES__
7
+ #define __SHM_COLLECTIVES__
8
+ #define VECTOR_LENGTH_IN_BYTES 32
9
+ void shm_initialize(int size, int rank, char* addr_string, char* port_string);
10
+ void all_reduce_outer_loop(torch::Tensor& data, size_t numel, int data_size);
11
+ void barrier_wait(int root_idx, int num_ranks);
12
+ #endif
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/shm_interface.cpp ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <torch/extension.h>
7
+
8
+ #include "shm.h"
9
+
10
+ // #define DO_PROFILE
11
+ #ifdef DO_PROFILE
12
+ #include <cfloat>
13
+ #include <chrono>
14
+ #endif
15
+
16
+ // Communication settings
17
+ static int world_rank = -1;
18
+ static int world_size = -1;
19
+
20
+ static bool is_initialized = 0;
21
+
22
+ static bool all_ranks_local_p = false;
23
+
24
+ void initialize(int size, int rank)
25
+ {
26
+ if (is_initialized) return;
27
+
28
+ // Check whether all ranks is on the same physical machine.
29
+ // If true, we will use an SHM based low latency allreduce
30
+
31
+ auto ls_string = std::getenv("LOCAL_SIZE");
32
+ int ls = 0;
33
+ if (ls_string != NULL) { ls = std::stoi(std::getenv("LOCAL_SIZE")); }
34
+
35
+ if (size >= 1 && size == ls) { all_ranks_local_p = true; }
36
+
37
+ world_size = size;
38
+ world_rank = rank;
39
+ is_initialized = 1;
40
+
41
+ auto addr_string = std::getenv("MASTER_ADDR");
42
+ if (addr_string == NULL) { addr_string = ""; }
43
+ auto port_string = std::getenv("MASTER_PORT");
44
+ if (port_string == NULL) { port_string = ""; }
45
+
46
+ if (all_ranks_local_p) { shm_initialize(size, rank, addr_string, port_string); }
47
+ }
48
+
49
+ int get_rank(int group = 0) { return world_rank; }
50
+
51
+ int get_world_size(int group = 0) { return world_size; }
52
+
53
+ // Success - return 0
54
+ // Fail (cannot hornor the request and need to fall back) - return -1
55
+ int inference_all_reduce(torch::Tensor& data, py::object op)
56
+ {
57
+ if (!all_ranks_local_p) return -1;
58
+ #ifdef DO_PROFILE
59
+ static double total_time = 0.0;
60
+ static double total_time_sq = 0.0;
61
+ static int count = -16; // warmup
62
+ static double max_time = 0.0;
63
+ static double min_time = DBL_MAX;
64
+ // make sure all rank reach this point before measuring time
65
+ // turn on this if you suspect each rank didn't reach here at the same time (stragger)
66
+ // if (all_ranks_local_p) { barrier_wait(0, world_size); }
67
+ auto start = std::chrono::system_clock::now();
68
+ #endif
69
+
70
+ static py::object ReduceOp = py::module_::import("deepspeed.comm").attr("ReduceOp");
71
+ static auto ReduceOpSum = (int)py::int_(ReduceOp.attr("SUM").attr("value"));
72
+
73
+ assert(py::int_(op.attr("value")) == ReduceOpSum);
74
+
75
+ auto numel = data.numel();
76
+
77
+ int data_size = 0;
78
+ bool data_type_fallback = false;
79
+
80
+ switch (data.scalar_type()) {
81
+ case c10::ScalarType::BFloat16: data_size = numel * 2; break;
82
+ case c10::ScalarType::Float: data_size = numel * 4; break;
83
+ default: data_type_fallback = true;
84
+ }
85
+
86
+ if (data_type_fallback) return -1;
87
+
88
+ all_reduce_outer_loop(data, numel, data_size);
89
+
90
+ #ifdef DO_PROFILE
91
+ auto end = std::chrono::system_clock::now();
92
+ count++;
93
+ if (count > 0) {
94
+ double elapsed = std::chrono::duration_cast<std::chrono::microseconds>(end - start).count();
95
+ if (elapsed > max_time) { max_time = elapsed; }
96
+ if (elapsed < min_time) { min_time = elapsed; }
97
+ total_time += elapsed;
98
+ total_time_sq += elapsed * elapsed;
99
+ if (world_rank == 0 && count == 1000) {
100
+ auto avg = total_time / count;
101
+ auto sd =
102
+ sqrt(total_time_sq / count - total_time * total_time / (count * count)) / avg * 100;
103
+ printf(" C++ kernel\t\t %.2f\t %.2f\t%.2f\t %.2f\n",
104
+ min_time,
105
+ max_time,
106
+ total_time / count,
107
+ sd);
108
+ }
109
+ }
110
+ #endif
111
+ return 0;
112
+ }
113
+
114
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
115
+ {
116
+ m.def("initialize", &initialize, "shm initialize");
117
+ m.def("get_rank", &get_rank, "get rank");
118
+ m.def("get_world_size", &get_world_size, "get world size");
119
+ m.def("inference_all_reduce", &inference_all_reduce, "low latency all_reduce implementation");
120
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/lion/fused_lion.cpp ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "cpu_lion.h"
7
+
8
+ // C++ interface
9
+
10
+ void multi_tensor_lion(int chunk_size,
11
+ at::Tensor noop_flag,
12
+ std::vector<std::vector<at::Tensor>> tensor_lists, /*gpmv*/
13
+ const float lr,
14
+ const float beta1,
15
+ const float beta2,
16
+ const int step,
17
+ const int mode,
18
+ const float weight_decay)
19
+ {
20
+ static bool initialized = false;
21
+ if (!initialized) {
22
+ create_lion_optimizer(0);
23
+ initialized = true;
24
+ }
25
+ for (int i = 0; i < tensor_lists[0].size(); i++) {
26
+ ds_lion_step(0,
27
+ step,
28
+ lr,
29
+ beta1,
30
+ beta2,
31
+ weight_decay,
32
+ tensor_lists[1][i],
33
+ tensor_lists[0][i],
34
+ tensor_lists[2][i]);
35
+ }
36
+ }
37
+
38
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
39
+ {
40
+ m.def("multi_tensor_lion",
41
+ &multi_tensor_lion,
42
+ "Compute and apply gradient update to parameters for Lion optimizer");
43
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_grad_bias.h ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+ #include <cutlass/epilogue/threadblock/default_epilogue_tensor_op.h>
8
+ #include <cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h>
9
+ #include "../iterators/predicated_tile_iterator_atomic.h"
10
+ #include "cutlass/epilogue/threadblock/epilogue.h"
11
+
12
+ namespace cutlass {
13
+ namespace epilogue {
14
+ namespace threadblock {
15
+ template <int Rank,
16
+ typename Shape_,
17
+ typename WarpMmaTensorOp_,
18
+ int PartitionsK,
19
+ typename OutputOp_,
20
+ int ElementsPerAccess>
21
+ struct EpilogueTensorOpAffineRankN : public DefaultEpilogueTensorOpAffineRankN<Rank,
22
+ Shape_,
23
+ WarpMmaTensorOp_,
24
+ PartitionsK,
25
+ OutputOp_,
26
+ ElementsPerAccess> {
27
+ using Base = DefaultEpilogueTensorOpAffineRankN<Rank,
28
+ Shape_,
29
+ WarpMmaTensorOp_,
30
+ PartitionsK,
31
+ OutputOp_,
32
+ ElementsPerAccess>;
33
+ using OutputTileIterator =
34
+ cutlass::epilogue::threadblock::PredicatedTileIteratorAffineRankNAtomic<
35
+ typename Base::OutputTileThreadMap,
36
+ typename Base::ElementOutput,
37
+ Rank>;
38
+
39
+ using Epilogue =
40
+ cutlass::epilogue::threadblock::Epilogue<typename Base::Shape,
41
+ typename Base::WarpMmaTensorOp,
42
+ Base::kPartitionsK,
43
+ OutputTileIterator,
44
+ typename Base::AccumulatorFragmentIterator,
45
+ typename Base::WarpTileIterator,
46
+ typename Base::SharedLoadIterator,
47
+ typename Base::OutputOp,
48
+ typename Base::Padding,
49
+ Base::kFragmentsPerIteration>;
50
+ };
51
+
52
+ template <int Rank,
53
+ typename Shape_,
54
+ typename WarpMmaTensorOp_,
55
+ int PartitionsK,
56
+ typename OutputOp_,
57
+ int ElementsPerAccess>
58
+ struct EpilogueVoltaTensorOpAffineRankN
59
+ : public DefaultEpilogueVoltaTensorOpAffineRankN<Rank,
60
+ Shape_,
61
+ WarpMmaTensorOp_,
62
+ PartitionsK,
63
+ OutputOp_,
64
+ ElementsPerAccess> {
65
+ using Base = DefaultEpilogueVoltaTensorOpAffineRankN<Rank,
66
+ Shape_,
67
+ WarpMmaTensorOp_,
68
+ PartitionsK,
69
+ OutputOp_,
70
+ ElementsPerAccess>;
71
+ using OutputTileIterator =
72
+ cutlass::epilogue::threadblock::PredicatedTileIteratorAffineRankNAtomic<
73
+ typename Base::OutputTileThreadMap,
74
+ typename Base::ElementOutput,
75
+ Rank>;
76
+
77
+ using Epilogue =
78
+ cutlass::epilogue::threadblock::Epilogue<typename Base::Shape,
79
+ typename Base::WarpMmaTensorOp,
80
+ Base::kPartitionsK,
81
+ OutputTileIterator,
82
+ typename Base::AccumulatorFragmentIterator,
83
+ typename Base::WarpTileIterator,
84
+ typename Base::SharedLoadIterator,
85
+ typename Base::OutputOp,
86
+ typename Base::Padding>;
87
+ };
88
+
89
+ template <typename Shape_,
90
+ typename WarpMmaTensorOp_,
91
+ int PartitionsK,
92
+ typename OutputOp_,
93
+ int ElementsPerAccess,
94
+ bool ScatterD = false,
95
+ typename PermuteDLayout = layout::NoPermute>
96
+ struct EpilogueTensorOp : public DefaultEpilogueTensorOp<Shape_,
97
+ WarpMmaTensorOp_,
98
+ PartitionsK,
99
+ OutputOp_,
100
+ ElementsPerAccess,
101
+ ScatterD,
102
+ PermuteDLayout> {
103
+ using Base = DefaultEpilogueTensorOp<Shape_,
104
+ WarpMmaTensorOp_,
105
+ PartitionsK,
106
+ OutputOp_,
107
+ ElementsPerAccess,
108
+ ScatterD,
109
+ PermuteDLayout>;
110
+ using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorAtomic<
111
+ typename Base::OutputTileThreadMap,
112
+ typename Base::ElementOutput,
113
+ ScatterD,
114
+ PermuteDLayout>;
115
+ using Epilogue =
116
+ cutlass::epilogue::threadblock::Epilogue<typename Base::Shape,
117
+ typename Base::WarpMmaTensorOp,
118
+ Base::kPartitionsK,
119
+ OutputTileIterator,
120
+ typename Base::AccumulatorFragmentIterator,
121
+ typename Base::WarpTileIterator,
122
+ typename Base::SharedLoadIterator,
123
+ typename Base::OutputOp,
124
+ typename Base::Padding,
125
+ Base::kFragmentsPerIteration>;
126
+ };
127
+
128
+ template <typename Shape_,
129
+ typename WarpMmaTensorOp_,
130
+ int PartitionsK,
131
+ typename OutputOp_,
132
+ int ElementsPerAccess,
133
+ bool ScatterD = false,
134
+ typename PermuteDLayout = layout::NoPermute>
135
+ struct EpilogueVoltaTensorOp : public DefaultEpilogueVoltaTensorOp<Shape_,
136
+ WarpMmaTensorOp_,
137
+ PartitionsK,
138
+ OutputOp_,
139
+ ElementsPerAccess,
140
+ ScatterD,
141
+ PermuteDLayout> {
142
+ using Base = DefaultEpilogueVoltaTensorOp<Shape_,
143
+ WarpMmaTensorOp_,
144
+ PartitionsK,
145
+ OutputOp_,
146
+ ElementsPerAccess,
147
+ ScatterD,
148
+ PermuteDLayout>;
149
+ using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorAtomic<
150
+ typename Base::OutputTileThreadMap,
151
+ typename Base::ElementOutput,
152
+ ScatterD,
153
+ PermuteDLayout>;
154
+ using Epilogue =
155
+ cutlass::epilogue::threadblock::Epilogue<typename Base::Shape,
156
+ typename Base::WarpMmaTensorOp,
157
+ Base::kPartitionsK,
158
+ OutputTileIterator,
159
+ typename Base::AccumulatorFragmentIterator,
160
+ typename Base::WarpTileIterator,
161
+ typename Base::SharedLoadIterator,
162
+ typename Base::OutputOp,
163
+ typename Base::Padding>;
164
+ };
165
+ } // namespace threadblock
166
+ } // namespace epilogue
167
+ } // namespace cutlass
168
+
169
+ template <typename Arch_,
170
+ typename Shape_,
171
+ typename WarpMmaTensorOp_,
172
+ int PartitionsK,
173
+ typename OutputOp_,
174
+ int ElementsPerAccess,
175
+ bool ScatterD = false,
176
+ typename PermuteDLayout = cutlass::layout::NoPermute>
177
+ struct BiasGradEpilogue {
178
+ using Epilogue =
179
+ typename cutlass::epilogue::threadblock::EpilogueTensorOp<Shape_,
180
+ WarpMmaTensorOp_,
181
+ PartitionsK,
182
+ OutputOp_,
183
+ ElementsPerAccess,
184
+ ScatterD,
185
+ PermuteDLayout>::Epilogue;
186
+ };
187
+
188
+ template <typename Shape_,
189
+ typename WarpMmaTensorOp_,
190
+ int PartitionsK,
191
+ typename OutputOp_,
192
+ int ElementsPerAccess,
193
+ bool ScatterD,
194
+ typename PermuteDLayout>
195
+ struct BiasGradEpilogue<cutlass::arch::Sm70,
196
+ Shape_,
197
+ WarpMmaTensorOp_,
198
+ PartitionsK,
199
+ OutputOp_,
200
+ ElementsPerAccess,
201
+ ScatterD,
202
+ PermuteDLayout> {
203
+ using Epilogue =
204
+ typename cutlass::epilogue::threadblock::EpilogueVoltaTensorOp<Shape_,
205
+ WarpMmaTensorOp_,
206
+ PartitionsK,
207
+ OutputOp_,
208
+ ElementsPerAccess,
209
+ ScatterD,
210
+ PermuteDLayout>::Epilogue;
211
+ };
212
+
213
+ template <typename Arch_,
214
+ int Rank,
215
+ typename Shape_,
216
+ typename WarpMmaTensorOp_,
217
+ int PartitionsK,
218
+ typename OutputOp_,
219
+ int ElementsPerAccess>
220
+ struct BiasGradEpilogueAffineRankN {
221
+ using Epilogue = typename cutlass::epilogue::threadblock::EpilogueTensorOpAffineRankN<
222
+ Rank,
223
+ Shape_,
224
+ WarpMmaTensorOp_,
225
+ PartitionsK,
226
+ OutputOp_,
227
+ ElementsPerAccess>::Epilogue;
228
+ };
229
+
230
+ template <int Rank,
231
+ typename Shape_,
232
+ typename WarpMmaTensorOp_,
233
+ int PartitionsK,
234
+ typename OutputOp_,
235
+ int ElementsPerAccess>
236
+ struct BiasGradEpilogueAffineRankN<cutlass::arch::Sm70,
237
+ Rank,
238
+ Shape_,
239
+ WarpMmaTensorOp_,
240
+ PartitionsK,
241
+ OutputOp_,
242
+ ElementsPerAccess> {
243
+ using Epilogue = typename cutlass::epilogue::threadblock::EpilogueVoltaTensorOpAffineRankN<
244
+ Rank,
245
+ Shape_,
246
+ WarpMmaTensorOp_,
247
+ PartitionsK,
248
+ OutputOp_,
249
+ ElementsPerAccess>::Epilogue;
250
+ };
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_pipelined.h ADDED
@@ -0,0 +1,592 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holdvr nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ // Copyright (c) Microsoft Corporation.
33
+ // SPDX-License-Identifier: Apache-2.0
34
+
35
+ // DeepSpeed Team
36
+
37
+ /*! \file
38
+ \brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
39
+
40
+ File copied from "cutlass/epilogue/threadblock/epilogue.h"
41
+ then modified to:
42
+ (1) load 2 source fragments at the same time (pipelining)
43
+ (2) support reading from a different dtype
44
+ (3) pass the row id to the OutputOp if it takes it
45
+ (see MemoryEfficientAttentionNormalize)
46
+ Note that in general the fragment passed to the OutputOp could
47
+ span multiple rows but it does not happen with the configurations we have
48
+ */
49
+
50
+ #pragma once
51
+
52
+ #if defined(__CUDACC_RTC__)
53
+ #include <cuda/std/cassert>
54
+ #else
55
+ #include <assert.h>
56
+ #endif
57
+
58
+ #include "cutlass/aligned_buffer.h"
59
+ #include "cutlass/array.h"
60
+ #include "cutlass/cutlass.h"
61
+ #include "cutlass/functional.h"
62
+ #include "cutlass/layout/tensor.h"
63
+ #include "cutlass/layout/vector.h"
64
+ #include "cutlass/numeric_types.h"
65
+ #include "cutlass/tensor_coord.h"
66
+
67
+ #include "cutlass/gemm/gemm.h"
68
+
69
+ #include "cutlass/transform/pitch_linear_thread_map.h"
70
+ #include "cutlass/transform/threadblock/regular_tile_iterator.h"
71
+
72
+ #include "cutlass/epilogue/threadblock/epilogue_base.h"
73
+ #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
74
+ #include "cutlass/numeric_types.h"
75
+
76
+ ////////////////////////////////////////////////////////////////////////////////
77
+
78
+ namespace cutlass {
79
+ namespace epilogue {
80
+ namespace threadblock {
81
+
82
+ template <typename Op>
83
+ struct ApplyEpilogueOp {
84
+ static CUTLASS_DEVICE typename Op::FragmentOutput apply(
85
+ Op const& output_op,
86
+ int row_id,
87
+ typename Op::FragmentAccumulator const& accum,
88
+ typename Op::FragmentOutput const& source)
89
+ {
90
+ return output_op(accum, source);
91
+ }
92
+ static CUTLASS_DEVICE typename Op::FragmentOutput
93
+ apply(Op const& output_op, int row_id, typename Op::FragmentAccumulator const& accum)
94
+ {
95
+ return output_op(accum);
96
+ }
97
+ };
98
+
99
+ ////////////////////////////////////////////////////////////////////////////////
100
+
101
+ /// Epilogue operator
102
+ template <typename Shape_, ///< Shape of threadblock tile (concept: GemmShape)
103
+ typename WarpMmaOperator_, ///< Warp-level MMA operator (concept:
104
+ ///< gemm::warp::MmaTensorOp)
105
+ int PartitionsK, ///< Number of partitions of the K dimension
106
+ typename OutputTileIterator_, ///< Tile iterator writing output tensors
107
+ typename AccumulatorFragmentIterator_, ///< Fragment iterator selecting
108
+ ///< accumulators
109
+ typename WarpTileIterator_, ///< Warp-scoped tile iterator writing
110
+ ///< accumulators to SMEM
111
+ typename SharedLoadIterator_, ///< Threadblock-scoped tile iterator loading
112
+ ///< from SMEM
113
+ typename OutputOp_, ///< Output operator
114
+ typename Padding_, ///< Padding added to SMEM allocation to avoid bank
115
+ ///< conflicts (concept: MatrixShape)
116
+ int FragmentsPerPartition = 1, ///< Used to coarsten the epilogue granularity
117
+ int IterationsUnroll = ///< Used to reduce binary size when epilogue op is
118
+ ///< large
119
+ (!IsEpilogueFunctorHeavy<OutputOp_>::value),
120
+ typename OutputTileSourceIterator_ =
121
+ OutputTileIterator_ ///< Tile iterator reading tensors
122
+ >
123
+ class EpiloguePipelined : public EpilogueBase<Shape_,
124
+ typename WarpMmaOperator_::Shape,
125
+ PartitionsK,
126
+ AccumulatorFragmentIterator_,
127
+ WarpTileIterator_,
128
+ Padding_,
129
+ FragmentsPerPartition> {
130
+ public:
131
+ using Base = EpilogueBase<Shape_,
132
+ typename WarpMmaOperator_::Shape,
133
+ PartitionsK,
134
+ AccumulatorFragmentIterator_,
135
+ WarpTileIterator_,
136
+ Padding_,
137
+ FragmentsPerPartition>;
138
+
139
+ using Shape = Shape_;
140
+ using WarpMmaOperator = WarpMmaOperator_;
141
+ static int const kPartitionsK = PartitionsK;
142
+ using OutputTileIterator = OutputTileIterator_;
143
+ using OutputTileSourceIterator = OutputTileSourceIterator_;
144
+ using AccumulatorFragmentIterator = AccumulatorFragmentIterator_;
145
+ using WarpTileIterator = WarpTileIterator_;
146
+ using SharedLoadIterator = SharedLoadIterator_;
147
+ using OutputOp = OutputOp_;
148
+ using Padding = Padding_;
149
+
150
+ using Layout = layout::RowMajor;
151
+ using LongIndex = typename Layout::LongIndex;
152
+
153
+ /// The complete warp-level accumulator tile
154
+ using AccumulatorTile = typename Base::AccumulatorTile;
155
+
156
+ /// Accumulator element
157
+ using ElementAccumulator = typename WarpTileIterator::Element;
158
+
159
+ /// Output element
160
+ using ElementOutput = typename OutputTileIterator::Element;
161
+ using ElementSource = typename OutputTileSourceIterator::Element;
162
+
163
+ /// Output access size
164
+ static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess;
165
+
166
+ /// Tensor reference to destination tensor
167
+ using TensorRef = typename OutputTileIterator::TensorRef;
168
+
169
+ /// Tensor reference to sync tensor
170
+ using SyncTensorRef = typename cutlass::TensorRef<int, cutlass::layout::PackedVectorLayout>;
171
+
172
+ /// Const tensor reference to source tensor
173
+ using ConstTensorRef = typename OutputTileIterator::ConstTensorRef;
174
+
175
+ /// Array type used to output
176
+ using OutputAccessType =
177
+ Array<typename OutputTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
178
+ using SourceAccessType = Array<typename OutputTileSourceIterator::Element,
179
+ OutputTileSourceIterator::kElementsPerAccess>;
180
+
181
+ /// Array type used by output functor
182
+ using AccumulatorAccessType =
183
+ Array<typename WarpTileIterator::Element, OutputTileIterator::kElementsPerAccess>;
184
+
185
+ /// Number of warps
186
+ using WarpCount = typename Base::WarpCount;
187
+
188
+ static int constexpr kSmemTiles =
189
+ Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK;
190
+ static int constexpr kSmemPointerOffset =
191
+ Base::SharedStorage::StorageShape::kCount / kSmemTiles;
192
+
193
+ public:
194
+ static_assert(OutputTileSourceIterator::Fragment::kElements ==
195
+ OutputTileIterator::Fragment::kElements,
196
+ "Mismatch between input tile and output tile iterator (kElements)");
197
+ static_assert(OutputTileSourceIterator::kIterations == OutputTileIterator::kIterations,
198
+ "Mismatch between input tile and output tile iterator (kIterations)");
199
+ static_assert(SharedLoadIterator::Fragment::kElements ==
200
+ OutputTileIterator::Fragment::kElements,
201
+ "Mismatch between shared load iterator and output tile iterator.");
202
+
203
+ static_assert(OutputTileIterator::kElementsPerAccess,
204
+ "OutputTileIterator::kElementsPerAccess must not be zero.");
205
+
206
+ static_assert(!(OutputTileIterator::Fragment::kElements %
207
+ OutputTileIterator::kElementsPerAccess),
208
+ "Divisibility");
209
+
210
+ private:
211
+ /// Loads fragment from shared memory aligned with output tensor
212
+ SharedLoadIterator shared_load_iterator_;
213
+
214
+ public:
215
+ /// Constructor
216
+ CUTLASS_DEVICE
217
+ EpiloguePipelined(typename Base::SharedStorage& shared_storage, ///< Shared storage object
218
+ int thread_idx, ///< ID of a thread within the threadblock
219
+ int warp_idx, ///< ID of warp within threadblock
220
+ int lane_idx ///< Id of thread within warp
221
+ )
222
+ : Base(shared_storage, thread_idx, warp_idx, lane_idx),
223
+ shared_load_iterator_(shared_storage.reference(), thread_idx)
224
+ {
225
+ }
226
+
227
+ /// Streams the result to global memory
228
+ CUTLASS_DEVICE
229
+ void operator()(OutputOp const& output_op, ///< Output operator
230
+ OutputTileIterator destination_iterator, ///< Tile iterator for destination
231
+ AccumulatorTile const& accumulators, ///< Complete warp-level accumulator tile
232
+ OutputTileSourceIterator source_iterator)
233
+ { ///< Threadblock tile coordinate in GEMM (in units
234
+ ///< of threadblock tiles)
235
+
236
+ if (!output_op.is_source_needed()) {
237
+ compute_source_not_needed_(output_op, destination_iterator, accumulators);
238
+ } else {
239
+ compute_source_needed_(output_op, destination_iterator, accumulators, source_iterator);
240
+ }
241
+ }
242
+ CUTLASS_DEVICE
243
+ void operator()(OutputOp const& output_op, ///< Output operator
244
+ OutputTileIterator destination_iterator, ///< Tile iterator for destination
245
+ AccumulatorTile const& accumulators)
246
+ { ///< Complete warp-level accumulator tile
247
+ compute_source_not_needed_(output_op, destination_iterator, accumulators);
248
+ }
249
+
250
+ private:
251
+ template <class Seq>
252
+ struct acc2smem_source_not_needed;
253
+
254
+ template <size_t... Seq>
255
+ struct acc2smem_source_not_needed<cutlass::index_sequence<Seq...>> {
256
+ template <int Advance>
257
+ CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
258
+ WarpTileIterator& warp_tile_iterator)
259
+ {
260
+ CUTLASS_PRAGMA_UNROLL
261
+ for (int i = 0; i < Advance; i++) { ++accum_fragment_iterator; }
262
+
263
+ CUTLASS_PRAGMA_UNROLL
264
+ for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
265
+ typename AccumulatorFragmentIterator::Fragment accum_fragment;
266
+
267
+ accum_fragment_iterator.load(accum_fragment);
268
+ ++accum_fragment_iterator;
269
+
270
+ warp_tile_iterator.store(accum_fragment);
271
+ if (p < Base::kFragmentsPerIteration - 1) {
272
+ warp_tile_iterator.add_pointer_offset(kSmemPointerOffset);
273
+ }
274
+ }
275
+
276
+ if (Base::kFragmentsPerIteration > 1) {
277
+ warp_tile_iterator.add_pointer_offset(kSmemPointerOffset *
278
+ (1 - Base::kFragmentsPerIteration));
279
+ }
280
+ }
281
+
282
+ CUTLASS_DEVICE
283
+ static void push(size_t pos,
284
+ AccumulatorFragmentIterator const& iterator_begin,
285
+ WarpTileIterator& warp_tile_iterator)
286
+ {
287
+ int dummy[] = {
288
+ (pos == (Seq * Base::kFragmentsPerIteration)) &&
289
+ (helper<Seq * Base::kFragmentsPerIteration>(iterator_begin, warp_tile_iterator),
290
+ 0)...};
291
+
292
+ CUTLASS_UNUSED(dummy[0]);
293
+ }
294
+ };
295
+
296
+ static_assert(kPartitionsK == 1 || Base::kFragmentsPerIteration == 1,
297
+ "One of these must be exactly 1.");
298
+
299
+ /// Streams the result to global memory
300
+ CUTLASS_DEVICE
301
+ void compute_source_not_needed_(
302
+ OutputOp const& output_op, ///< Output operator
303
+ OutputTileIterator destination_iterator, ///< Tile iterator for destination
304
+ AccumulatorTile const& accumulators ///< Complete warp-level accumulator tile
305
+ )
306
+ {
307
+ //
308
+ // Iterator over warp-level accumulator fragment
309
+ //
310
+
311
+ AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
312
+
313
+ //
314
+ // Iterate over accumulator tile
315
+ //
316
+
317
+ #pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations / Base::kFragmentsPerIteration \
318
+ : 1)
319
+ for (int iter = 0; iter < OutputTileIterator::kIterations;
320
+ iter += Base::kFragmentsPerIteration) {
321
+ //
322
+ // Convert and store fragment
323
+ //
324
+
325
+ __syncthreads();
326
+
327
+ acc2smem_source_not_needed<cutlass::make_index_sequence<
328
+ OutputTileIterator::kIterations / Base::kFragmentsPerIteration>>::
329
+ push(iter, accum_fragment_iterator, this->warp_tile_iterator_);
330
+
331
+ __syncthreads();
332
+
333
+ //
334
+ // Load fragments from shared memory
335
+ //
336
+
337
+ CUTLASS_PRAGMA_UNROLL
338
+ for (int p = 0; p < Base::kFragmentsPerIteration; ++p) {
339
+ typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
340
+
341
+ shared_load_iterator_.load(aligned_accum_fragment[0]);
342
+
343
+ if (p < Base::kFragmentsPerIteration - 1) {
344
+ shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
345
+ } else if (kPartitionsK > 1) {
346
+ plus<typename SharedLoadIterator::Fragment> add_fragments;
347
+
348
+ CUTLASS_PRAGMA_UNROLL
349
+ for (int i = 1; i < kPartitionsK; ++i) {
350
+ shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
351
+ shared_load_iterator_.load(aligned_accum_fragment[i]);
352
+ aligned_accum_fragment[0] =
353
+ add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
354
+ }
355
+
356
+ shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) *
357
+ kSmemPointerOffset);
358
+ }
359
+
360
+ //
361
+ // Compute the output result
362
+ //
363
+
364
+ typename OutputTileIterator::Fragment output_fragment;
365
+
366
+ apply_output_operator_source_not_needed_(destination_iterator.thread_start_row(),
367
+ output_fragment,
368
+ output_op,
369
+ aligned_accum_fragment[0]);
370
+
371
+ //
372
+ // Store the final result
373
+ //
374
+
375
+ destination_iterator.store(output_fragment);
376
+ ++destination_iterator;
377
+ }
378
+
379
+ if (Base::kFragmentsPerIteration > 1) {
380
+ shared_load_iterator_.add_pointer_offset(kSmemPointerOffset *
381
+ (1 - Base::kFragmentsPerIteration));
382
+ }
383
+ }
384
+ }
385
+
386
+ template <class Seq>
387
+ struct acc2smem_source_needed;
388
+
389
+ template <size_t... Seq>
390
+ struct acc2smem_source_needed<cutlass::index_sequence<Seq...>> {
391
+ template <int Advance>
392
+ CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator,
393
+ WarpTileIterator& warp_tile_iterator)
394
+ {
395
+ CUTLASS_PRAGMA_UNROLL
396
+ for (int i = 0; i < Advance; i++) { ++accum_fragment_iterator; }
397
+
398
+ typename AccumulatorFragmentIterator::Fragment accum_fragment;
399
+ accum_fragment_iterator.load(accum_fragment);
400
+ warp_tile_iterator.store(accum_fragment);
401
+ }
402
+
403
+ CUTLASS_DEVICE
404
+ static void push(size_t pos,
405
+ AccumulatorFragmentIterator const& iterator_begin,
406
+ WarpTileIterator& warp_tile_iterator)
407
+ {
408
+ int dummy[] = {(pos == Seq) && (helper<Seq>(iterator_begin, warp_tile_iterator), 0)...};
409
+ }
410
+ };
411
+
412
+ /// Streams the result to global memory
413
+ CUTLASS_DEVICE
414
+ void compute_source_needed_(
415
+ OutputOp const& output_op, ///< Output operator
416
+ OutputTileIterator destination_iterator, ///< Tile iterator for destination
417
+ AccumulatorTile const& accumulators, ///< Complete warp-level accumulator tile
418
+ OutputTileSourceIterator source_iterator ///< Threadblock tile coordinate in GEMM (in units
419
+ ///< of threadblock tiles)
420
+ )
421
+ {
422
+ typename OutputTileSourceIterator::Fragment source_fragment[2];
423
+
424
+ source_fragment[0].clear();
425
+ source_iterator.load(source_fragment[0]);
426
+ ++source_iterator;
427
+ source_fragment[1].clear();
428
+
429
+ //
430
+ // Iterator over warp-level accumulator fragment
431
+ //
432
+
433
+ AccumulatorFragmentIterator accum_fragment_iterator(accumulators);
434
+
435
+ //
436
+ // Iterate over accumulator tile
437
+ //
438
+
439
+ #pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1)
440
+ for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) {
441
+ if (iter > 0) { __syncthreads(); }
442
+ //
443
+ // Load the source for next iteration (pipelining)
444
+ //
445
+
446
+ if (iter + 1 < OutputTileIterator::kIterations) {
447
+ source_iterator.load(source_fragment[(iter + 1) % 2]);
448
+ }
449
+ ++source_iterator;
450
+ acc2smem_source_needed<cutlass::make_index_sequence<OutputTileIterator::kIterations>>::
451
+ push(iter, accum_fragment_iterator, this->warp_tile_iterator_);
452
+
453
+ __syncthreads();
454
+
455
+ //
456
+ // Load fragments from shared memory
457
+ //
458
+
459
+ typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK];
460
+
461
+ shared_load_iterator_.load(aligned_accum_fragment[0]);
462
+
463
+ // If the number of k-slices is > 1 - perform a reduction amongst the
464
+ // k-slices
465
+ if (kPartitionsK > 1) {
466
+ plus<typename SharedLoadIterator::Fragment> add_fragments;
467
+
468
+ CUTLASS_PRAGMA_UNROLL
469
+ for (int i = 1; i < kPartitionsK; ++i) {
470
+ shared_load_iterator_.add_pointer_offset(kSmemPointerOffset);
471
+ shared_load_iterator_.load(aligned_accum_fragment[i]);
472
+ aligned_accum_fragment[0] =
473
+ add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]);
474
+ }
475
+
476
+ shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset);
477
+ }
478
+
479
+ //
480
+ // Compute the output result
481
+ //
482
+
483
+ typename OutputTileIterator::Fragment output_fragment;
484
+
485
+ apply_output_operator_(destination_iterator.thread_start_row(),
486
+ output_fragment,
487
+ output_op,
488
+ aligned_accum_fragment[0],
489
+ source_fragment[iter % 2]);
490
+
491
+ //
492
+ // Store the final result
493
+ //
494
+
495
+ destination_iterator.store(output_fragment);
496
+ ++destination_iterator;
497
+ }
498
+ }
499
+
500
+ /// Helper to invoke the output functor over each vector of output
501
+ CUTLASS_DEVICE
502
+ void apply_output_operator_(int begin_row,
503
+ typename OutputTileIterator::Fragment& output_fragment,
504
+ OutputOp const& output_op, ///< Output operator
505
+ typename SharedLoadIterator::Fragment const& aligned_accum_fragment,
506
+ typename OutputTileSourceIterator::Fragment const& source_fragment)
507
+ {
508
+ OutputAccessType* output_frag_ptr = reinterpret_cast<OutputAccessType*>(&output_fragment);
509
+
510
+ AccumulatorAccessType const* compute_frag_ptr =
511
+ reinterpret_cast<AccumulatorAccessType const*>(&aligned_accum_fragment);
512
+
513
+ SourceAccessType const* source_frag_ptr =
514
+ reinterpret_cast<SourceAccessType const*>(&source_fragment);
515
+
516
+ int const kOutputOpIterations =
517
+ OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
518
+
519
+ CUTLASS_PRAGMA_UNROLL
520
+ for (int i = 0; i < kOutputOpIterations; ++i) {
521
+ // Call the output operator
522
+ output_frag_ptr[i] = ApplyEpilogueOp<OutputOp>::apply(
523
+ output_op,
524
+ begin_row + getRowOffset(i * OutputTileIterator::kElementsPerAccess),
525
+ compute_frag_ptr[i],
526
+ source_frag_ptr[i]);
527
+ }
528
+ }
529
+
530
+ /// Helper to invoke the output functor over each vector of output
531
+ CUTLASS_DEVICE
532
+ void apply_output_operator_source_not_needed_(
533
+ int begin_row,
534
+ typename OutputTileIterator::Fragment& output_fragment,
535
+ OutputOp const& output_op, ///< Output operator
536
+ typename SharedLoadIterator::Fragment const& aligned_accum_fragment)
537
+ {
538
+ OutputAccessType* output_frag_ptr = reinterpret_cast<OutputAccessType*>(&output_fragment);
539
+
540
+ AccumulatorAccessType const* compute_frag_ptr =
541
+ reinterpret_cast<AccumulatorAccessType const*>(&aligned_accum_fragment);
542
+
543
+ int const kOutputOpIterations =
544
+ OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess;
545
+
546
+ CUTLASS_PRAGMA_UNROLL
547
+ for (int i = 0; i < kOutputOpIterations; ++i) {
548
+ // Call the output operator
549
+ output_frag_ptr[i] = ApplyEpilogueOp<OutputOp>::apply(
550
+ output_op,
551
+ begin_row + getRowOffset(i * OutputTileIterator::kElementsPerAccess),
552
+ compute_frag_ptr[i]);
553
+ }
554
+ }
555
+
556
+ // This should be constexpr, but it's only supported on c++14
557
+ static int CUTLASS_HOST_DEVICE getRowOffset(int i)
558
+ {
559
+ using ThreadMap = typename OutputTileIterator::ThreadMap;
560
+
561
+ CUTLASS_PRAGMA_UNROLL
562
+ for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
563
+ CUTLASS_PRAGMA_UNROLL
564
+ for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
565
+ CUTLASS_PRAGMA_UNROLL
566
+ for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
567
+ int row_offset = row * ThreadMap::Delta::kRow +
568
+ group * ThreadMap::Delta::kGroup +
569
+ cluster * ThreadMap::Delta::kCluster;
570
+ int frag_row_idx =
571
+ (row + ThreadMap::Iterations::kRow *
572
+ (group + ThreadMap::Iterations::kGroup * cluster));
573
+ CUTLASS_PRAGMA_UNROLL
574
+ for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
575
+ int frag_idx = ThreadMap::kElementsPerAccess *
576
+ (frag_row_idx * ThreadMap::Iterations::kColumn + column);
577
+ if (i < frag_idx + ThreadMap::kElementsPerAccess) { return row_offset; }
578
+ }
579
+ }
580
+ }
581
+ }
582
+ return -1;
583
+ }
584
+ };
585
+
586
+ ////////////////////////////////////////////////////////////////////////////////
587
+
588
+ } // namespace threadblock
589
+ } // namespace epilogue
590
+ } // namespace cutlass
591
+
592
+ ////////////////////////////////////////////////////////////////////////////////
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_rescale_output.h ADDED
@@ -0,0 +1,251 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holdvr nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ // Copyright (c) Microsoft Corporation.
33
+ // SPDX-License-Identifier: Apache-2.0
34
+
35
+ // DeepSpeed Team
36
+
37
+ /*! \file
38
+ \brief Epilogue for threadblock scoped GEMMs using Tensor Ops.
39
+
40
+ The epilogue rearranges the result of a matrix product through shared memory
41
+ to match canonical tensor layouts in global memory. Epilogues support
42
+ conversion and reduction operations.
43
+
44
+ This is a copy of cutlass/epilogue/threadblock/epilogue.h that can
45
+ handle "row_id" as a first argument, as uses it to get the corresponding
46
+ `m_prime` / `s_prime` to rescale the output.
47
+ */
48
+
49
+ #pragma once
50
+
51
+ #if defined(__CUDACC_RTC__)
52
+ #include <cuda/std/cassert>
53
+ #else
54
+ #include <assert.h>
55
+ #endif
56
+
57
+ #include "cutlass/aligned_buffer.h"
58
+ #include "cutlass/array.h"
59
+ #include "cutlass/cutlass.h"
60
+ #include "cutlass/functional.h"
61
+ #include "cutlass/layout/tensor.h"
62
+ #include "cutlass/layout/vector.h"
63
+ #include "cutlass/numeric_types.h"
64
+ #include "cutlass/tensor_coord.h"
65
+
66
+ #include "cutlass/gemm/gemm.h"
67
+
68
+ #include "cutlass/transform/pitch_linear_thread_map.h"
69
+ #include "cutlass/transform/threadblock/regular_tile_iterator.h"
70
+
71
+ #include "cutlass/epilogue/threadblock/epilogue_base.h"
72
+ #include "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
73
+ #include "cutlass/numeric_types.h"
74
+
75
+ #include "cutlass/array.h"
76
+ #include "cutlass/cutlass.h"
77
+ #include "cutlass/epilogue/thread/scale_type.h"
78
+ #include "cutlass/functional.h"
79
+ #include "cutlass/numeric_conversion.h"
80
+ #include "cutlass/numeric_types.h"
81
+ #include "epilogue_pipelined.h"
82
+
83
+ /////////////////////////////////////////////////////////////////////////////////////////////////
84
+
85
+ namespace cutlass {
86
+ namespace epilogue {
87
+ namespace thread {
88
+
89
+ /////////////////////////////////////////////////////////////////////////////////////////////////
90
+
91
+ /// Applies a linear combination operator to an array of elements.
92
+ // output <- alpha * accumulator + beta * source
93
+ // with:
94
+ // alpha = 1 / s_prime (to normalize when isLast=True, 1 otherwise)
95
+ // beta = alpha / m_prime (renormalize the output when the max changes)
96
+ // source is the current output
97
+ template <typename ElementOutput_, ///< Data type used to store tensors
98
+ typename ElementSource_, //< Data type for source (usually matches
99
+ //`ElementOutput`)
100
+ int Count, ///< Number of elements computed per operation.
101
+ ///< Usually it is 128/sizeof_bits<ElementOutput_>,
102
+ ///< but we use 64 or 32 sometimes when there are not enough data
103
+ ///< to store
104
+ typename ElementAccumulator_, ///< Accumulator data type
105
+ typename ElementCompute_, ///< Data type used to compute linear combination
106
+ bool isFirst,
107
+ bool isLast,
108
+ typename FragmentAlphaBeta_,
109
+ FloatRoundStyle Round = FloatRoundStyle::round_to_nearest>
110
+ class MemoryEfficientAttentionNormalize {
111
+ public:
112
+ using ElementOutput = ElementOutput_;
113
+ using ElementSource = ElementSource_;
114
+ using ElementAccumulator = ElementAccumulator_;
115
+ using ElementCompute = ElementCompute_;
116
+
117
+ static int const kCount = Count;
118
+
119
+ using FragmentOutput = Array<ElementOutput, kCount>;
120
+ using FragmentSource = Array<ElementSource, kCount>;
121
+ using FragmentAccumulator = Array<ElementAccumulator, kCount>;
122
+ using ComputeFragment = Array<ElementCompute, kCount>;
123
+ using FragmentAlphaBeta = FragmentAlphaBeta_;
124
+
125
+ static FloatRoundStyle const kRound = Round;
126
+
127
+ private:
128
+ //
129
+ // Data members
130
+ //
131
+
132
+ FragmentAlphaBeta const& s_prime_;
133
+ FragmentAlphaBeta const& m_prime_;
134
+
135
+ public:
136
+ /// Constructs the function object, possibly loading from pointers in host
137
+ /// memory
138
+ CUTLASS_HOST_DEVICE
139
+ MemoryEfficientAttentionNormalize(FragmentAlphaBeta const& s_prime,
140
+ FragmentAlphaBeta const& m_prime)
141
+ : s_prime_(s_prime), m_prime_(m_prime)
142
+ {
143
+ }
144
+
145
+ /// Returns true if source is needed
146
+ CUTLASS_HOST_DEVICE
147
+ bool is_source_needed() const { return !isFirst; }
148
+
149
+ /// Functionally required for serial reduction in the epilogue
150
+ CUTLASS_HOST_DEVICE
151
+ void set_k_partition(int k_partition, int k_partition_count) {}
152
+
153
+ /// Computes linear scaling: D = alpha * accumulator + beta * source
154
+ CUTLASS_HOST_DEVICE
155
+ FragmentOutput operator()(int row,
156
+ FragmentAccumulator const& accumulator,
157
+ FragmentSource const& source) const
158
+ {
159
+ assert(!isFirst);
160
+
161
+ // Convert source to internal compute numeric type
162
+ NumericArrayConverter<ElementCompute, ElementSource, kCount, Round> source_converter;
163
+ NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round>
164
+ accumulator_converter;
165
+
166
+ // Convert to destination numeric type
167
+ NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
168
+
169
+ ComputeFragment converted_source = source_converter(source);
170
+ ComputeFragment converted_accumulator = accumulator_converter(accumulator);
171
+
172
+ // Perform binary operations
173
+ ComputeFragment intermediate;
174
+
175
+ multiplies<ComputeFragment> mul_add_source;
176
+ multiply_add<ComputeFragment> mul_add_accumulator;
177
+
178
+ ElementCompute alpha = isLast ? (1 / s_prime_[row]) : 1;
179
+ ElementCompute beta = alpha * m_prime_[row];
180
+
181
+ intermediate = mul_add_source(beta, converted_source); // X = beta * C
182
+
183
+ intermediate = mul_add_accumulator(
184
+ alpha, converted_accumulator, intermediate); // D = alpha * Accum + X
185
+
186
+ return destination_converter(intermediate);
187
+ }
188
+
189
+ /// Computes linear scaling: D = alpha * accumulator
190
+ CUTLASS_HOST_DEVICE
191
+ FragmentOutput operator()(int row, FragmentAccumulator const& accumulator) const
192
+ {
193
+ assert(isFirst);
194
+
195
+ // Convert source to internal compute numeric type
196
+ NumericArrayConverter<ElementCompute, ElementAccumulator, kCount, Round>
197
+ accumulator_converter;
198
+
199
+ // Convert to destination numeric type
200
+ NumericArrayConverter<ElementOutput, ElementCompute, kCount, Round> destination_converter;
201
+
202
+ ComputeFragment converted_accumulator = accumulator_converter(accumulator);
203
+
204
+ ComputeFragment intermediate;
205
+ multiplies<ComputeFragment> mul_accumulator;
206
+
207
+ ElementCompute alpha = isLast ? (1 / s_prime_[row]) : 1;
208
+
209
+ intermediate = mul_accumulator(alpha, converted_accumulator); // X = alpha * C + uniform
210
+
211
+ return destination_converter(intermediate);
212
+ }
213
+ };
214
+
215
+ } // namespace thread
216
+
217
+ namespace threadblock {
218
+ template <typename EO,
219
+ typename ES,
220
+ int Count,
221
+ typename EA,
222
+ typename EC,
223
+ bool F,
224
+ bool L,
225
+ typename FAB,
226
+ FloatRoundStyle R>
227
+ struct ApplyEpilogueOp<
228
+ thread::MemoryEfficientAttentionNormalize<EO, ES, Count, EA, EC, F, L, FAB, R>> {
229
+ using Op = thread::MemoryEfficientAttentionNormalize<EO, ES, Count, EA, EC, F, L, FAB, R>;
230
+ static CUTLASS_DEVICE typename Op::FragmentOutput apply(
231
+ Op const& output_op,
232
+ int row_id,
233
+ typename Op::FragmentAccumulator const& accum,
234
+ typename Op::FragmentSource const& source)
235
+ {
236
+ return output_op(row_id, accum, source);
237
+ }
238
+ static CUTLASS_DEVICE typename Op::FragmentOutput
239
+ apply(Op const& output_op, int row_id, typename Op::FragmentAccumulator const& accum)
240
+ {
241
+ return output_op(row_id, accum);
242
+ }
243
+ };
244
+
245
+ /////////////////////////////////////////////////////////////////////////////////////////////////
246
+
247
+ } // namespace threadblock
248
+ } // namespace epilogue
249
+ } // namespace cutlass
250
+
251
+ /////////////////////////////////////////////////////////////////////////////////////////////////
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_thread_apply_logsumexp.h ADDED
@@ -0,0 +1,168 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
3
+ *reserved. SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice,
9
+ *this list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23
+ *LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24
+ *CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25
+ *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ *INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27
+ *CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28
+ *ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
+ *POSSIBILITY OF SUCH DAMAGE.
30
+ *
31
+ **************************************************************************************************/
32
+
33
+ // Copyright (c) Microsoft Corporation.
34
+ // SPDX-License-Identifier: Apache-2.0
35
+
36
+ // DeepSpeed Team
37
+
38
+ /*! \file
39
+ \brief Functor performing linear combination operations used by epilogues.
40
+ */
41
+
42
+ #pragma once
43
+
44
+ #include <cuda_fp16.h>
45
+
46
+ #include "cutlass/array.h"
47
+ #include "cutlass/cutlass.h"
48
+ #include "cutlass/epilogue/thread/activation.h"
49
+ #include "cutlass/functional.h"
50
+ #include "cutlass/numeric_conversion.h"
51
+ #include "cutlass/numeric_types.h"
52
+
53
+ /////////////////////////////////////////////////////////////////////////////////////////////////
54
+
55
+ namespace cutlass {
56
+ namespace epilogue {
57
+ namespace thread {
58
+
59
+ /////////////////////////////////////////////////////////////////////////////////////////////////
60
+
61
+ namespace detail {
62
+
63
+ template <typename Element, int ElementsPerAccess>
64
+ struct ArrayExponential {
65
+ CUTLASS_HOST_DEVICE
66
+ Array<Element, ElementsPerAccess> operator()(
67
+ Array<Element, ElementsPerAccess> const& input) const
68
+ {
69
+ Array<Element, ElementsPerAccess> result;
70
+
71
+ CUTLASS_PRAGMA_UNROLL
72
+ for (int i = 0; i < ElementsPerAccess; ++i) { result[i] = expf(input[i]); }
73
+
74
+ return result;
75
+ }
76
+ };
77
+
78
+ template <int ElementsPerAccess>
79
+ struct ArrayExponential<half_t, ElementsPerAccess> {
80
+ CUTLASS_DEVICE
81
+ Array<half_t, ElementsPerAccess> operator()(Array<half_t, ElementsPerAccess> const& input) const
82
+ {
83
+ Array<half_t, ElementsPerAccess> result;
84
+
85
+ int const kVectorCount = ElementsPerAccess / 2;
86
+
87
+ __half2 const* input_ptr = reinterpret_cast<__half2 const*>(input.raw_data());
88
+ __half2* res_ptr = reinterpret_cast<__half2*>(result.raw_data());
89
+
90
+ CUTLASS_PRAGMA_UNROLL
91
+ for (int i = 0; i < kVectorCount; ++i) { res_ptr[i] = h2exp(input_ptr[i]); }
92
+
93
+ return result;
94
+ }
95
+ };
96
+ } // namespace detail
97
+
98
+ /////////////////////////////////////////////////////////////////////////////////////////////////
99
+
100
+ /// Applies:
101
+ /// output <- (input - lse).exp()
102
+ template <typename ElementOutput_, // output
103
+ typename ElementLSE_, // accumulator from LSE
104
+ typename ElementAccumulator_, // accumulator from matmul
105
+ typename ElementCompute_, // intermediate compute (and exp calculation)
106
+ int ElementsPerAccess>
107
+ class ApplyLogSumExp {
108
+ public:
109
+ using ElementOutput = ElementOutput_;
110
+ using ElementAccumulator = ElementAccumulator_;
111
+ using ElementCompute = ElementCompute_;
112
+ using ElementLSE = ElementLSE_;
113
+
114
+ static int const kElementsPerAccess = ElementsPerAccess;
115
+ static int const kCount = kElementsPerAccess;
116
+ static const ScaleType::Kind kScale = cutlass::epilogue::thread::ScaleType::NoBetaScaling;
117
+
118
+ using FragmentOutput = Array<ElementOutput, kCount>;
119
+ using FragmentAccumulator = Array<ElementAccumulator, kElementsPerAccess>;
120
+ using FragmentCompute = Array<ElementCompute, kElementsPerAccess>;
121
+ using FragmentLSE = Array<ElementLSE, kElementsPerAccess>;
122
+ using FragmentScaleBias = FragmentLSE; // Used by epilogue_smem_accumulator.h
123
+
124
+ public:
125
+ //
126
+ // Methods
127
+ //
128
+
129
+ CUTLASS_HOST_DEVICE
130
+ ApplyLogSumExp() {}
131
+
132
+ /// Returns true if source is needed
133
+ CUTLASS_HOST_DEVICE
134
+ bool is_source_needed() const { return true; }
135
+
136
+ /// Functionally required for serial reduction in the epilogue
137
+ CUTLASS_HOST_DEVICE
138
+ void set_k_partition(int k_partition, int k_partition_count) {}
139
+
140
+ CUTLASS_HOST_DEVICE
141
+ FragmentOutput operator()(FragmentAccumulator const& AB,
142
+ FragmentLSE const& scale_unused,
143
+ // bias used as LSE
144
+ FragmentLSE const& bias) const
145
+ {
146
+ FragmentCompute frag_AB =
147
+ NumericArrayConverter<ElementCompute, ElementAccumulator, kElementsPerAccess>()(AB);
148
+ FragmentCompute frag_lse_compute =
149
+ NumericArrayConverter<ElementCompute, ElementLSE, kElementsPerAccess>()(bias);
150
+ FragmentCompute frag_compute;
151
+
152
+ minus<FragmentCompute> minus_lse;
153
+ detail::ArrayExponential<ElementCompute, kElementsPerAccess> apply_exp;
154
+ frag_compute = minus_lse(frag_AB, frag_lse_compute);
155
+ frag_compute = apply_exp(frag_compute);
156
+
157
+ return NumericArrayConverter<ElementOutput, ElementCompute, kElementsPerAccess>()(
158
+ frag_compute);
159
+ }
160
+ };
161
+
162
+ /////////////////////////////////////////////////////////////////////////////////////////////////
163
+
164
+ } // namespace thread
165
+ } // namespace epilogue
166
+ } // namespace cutlass
167
+
168
+ /////////////////////////////////////////////////////////////////////////////////////////////////
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma.h ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holdvr nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ // Copyright (c) Microsoft Corporation.
33
+ // SPDX-License-Identifier: Apache-2.0
34
+
35
+ // DeepSpeed Team
36
+
37
+ #pragma once
38
+
39
+ #include "custom_mma_multistage.h"
40
+ #include "custom_mma_pipelined.h"
41
+ #include "cutlass/gemm/threadblock/mma_multistage.h"
42
+ #include "cutlass/gemm/threadblock/mma_pipelined.h"
43
+
44
+ template <typename Mma, int kMaxK>
45
+ struct MakeCustomMma;
46
+
47
+ template <typename Shape,
48
+ typename IteratorA,
49
+ typename SmemIteratorA,
50
+ cutlass::arch::CacheOperation::Kind CacheOpA,
51
+ typename IteratorB,
52
+ typename SmemIteratorB,
53
+ cutlass::arch::CacheOperation::Kind CacheOpB,
54
+ typename ElementC,
55
+ typename LayoutC,
56
+ typename Policy,
57
+ int Stages,
58
+ cutlass::gemm::SharedMemoryClearOption SharedMemoryClear,
59
+ int kMaxK>
60
+ struct MakeCustomMma<cutlass::gemm::threadblock::MmaMultistage<Shape,
61
+ IteratorA,
62
+ SmemIteratorA,
63
+ CacheOpA,
64
+ IteratorB,
65
+ SmemIteratorB,
66
+ CacheOpB,
67
+ ElementC,
68
+ LayoutC,
69
+ Policy,
70
+ Stages,
71
+ SharedMemoryClear>,
72
+ kMaxK> {
73
+ // Reduce the number of stages if we don't need that many
74
+ static int constexpr kStages =
75
+ kMaxK == cutlass::platform::numeric_limits<int>::max()
76
+ ? Stages
77
+ : cutlass::const_min(Stages, (kMaxK + int(Shape::kK) - 1) / int(Shape::kK));
78
+ using Mma = cutlass::gemm::threadblock::CustomMmaMultistage<Shape,
79
+ IteratorA,
80
+ SmemIteratorA,
81
+ CacheOpA,
82
+ IteratorB,
83
+ SmemIteratorB,
84
+ CacheOpB,
85
+ ElementC,
86
+ LayoutC,
87
+ Policy,
88
+ kStages,
89
+ SharedMemoryClear,
90
+ kMaxK>;
91
+ };
92
+
93
+ template <typename Shape,
94
+ typename IteratorA,
95
+ typename SmemIteratorA,
96
+ typename IteratorB,
97
+ typename SmemIteratorB,
98
+ typename ElementC,
99
+ typename LayoutC,
100
+ typename Policy,
101
+ int kMaxK>
102
+ struct MakeCustomMma<cutlass::gemm::threadblock::MmaPipelined<Shape,
103
+ IteratorA,
104
+ SmemIteratorA,
105
+ IteratorB,
106
+ SmemIteratorB,
107
+ ElementC,
108
+ LayoutC,
109
+ Policy>,
110
+ kMaxK> {
111
+ using Mma = cutlass::gemm::threadblock::CustomMmaPipelined<Shape,
112
+ IteratorA,
113
+ SmemIteratorA,
114
+ IteratorB,
115
+ SmemIteratorB,
116
+ ElementC,
117
+ LayoutC,
118
+ Policy>;
119
+ };
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_base.h ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
3
+ *reserved. SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice,
9
+ *this list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23
+ *LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24
+ *CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25
+ *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ *INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27
+ *CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28
+ *ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
+ *POSSIBILITY OF SUCH DAMAGE.
30
+ *
31
+ **************************************************************************************************/
32
+
33
+ // Copyright (c) Microsoft Corporation.
34
+ // SPDX-License-Identifier: Apache-2.0
35
+
36
+ // DeepSpeed Team
37
+
38
+ /*! \file
39
+ \brief Template for a double-buffered threadblock-scoped GEMM kernel.
40
+ */
41
+
42
+ #pragma once
43
+
44
+ #include "cutlass/aligned_buffer.h"
45
+ #include "cutlass/arch/memory.h"
46
+ #include "cutlass/array.h"
47
+ #include "cutlass/cutlass.h"
48
+ #include "cutlass/gemm/gemm.h"
49
+ #include "cutlass/gemm/threadblock/mma_base.h"
50
+ #include "cutlass/matrix_shape.h"
51
+ #include "cutlass/numeric_types.h"
52
+
53
+ ////////////////////////////////////////////////////////////////////////////////
54
+
55
+ namespace cutlass {
56
+ namespace gemm {
57
+ namespace threadblock {
58
+
59
+ ////////////////////////////////////////////////////////////////////////////////
60
+
61
+ /// Structure to compute the matrix product targeting CUDA cores and SIMT math
62
+ /// instructions.
63
+ template <
64
+ /// Size of the Gemm problem - concept: gemm::GemmShape<>
65
+ typename Shape_,
66
+ /// Policy describing tuning details (concept: MmaPolicy)
67
+ typename Policy_,
68
+ /// Number of stages,
69
+ int Stages,
70
+ /// Used for partial specialization
71
+ typename Enable = bool>
72
+ class CustomMmaBase {
73
+ public:
74
+ ///< Size of the Gemm problem - concept: gemm::GemmShape<>
75
+ using Shape = Shape_;
76
+
77
+ ///< Policy describing tuning details
78
+ using Policy = Policy_;
79
+
80
+ //
81
+ // Dependent types
82
+ //
83
+
84
+ /// Warp-level Mma
85
+ using Operator = typename Policy::Operator;
86
+
87
+ /// Shape describing the overall GEMM computed from shared memory
88
+ /// by each warp.
89
+ using WarpGemm = typename Policy::Operator::Shape;
90
+
91
+ /// Shape describing the number of warps filling the CTA
92
+ using WarpCount =
93
+ GemmShape<Shape::kM / WarpGemm::kM, Shape::kN / WarpGemm::kN, Shape::kK / WarpGemm::kK>;
94
+
95
+ /// Number of warp-level GEMM oeprations
96
+ static int const kWarpGemmIterations = (WarpGemm::kK / Operator::Policy::MmaShape::kK);
97
+
98
+ /// Number of stages
99
+ static int const kStages = Stages;
100
+
101
+ //
102
+ // Nested structs
103
+ //
104
+
105
+ /// Shared storage object needed by threadblock-scoped GEMM
106
+ template <typename Element, typename OperandShape, typename OperandLayout>
107
+ struct OperandSharedStorage {
108
+ AlignedBuffer<Element, OperandShape::kCount> buffer;
109
+ using TensorRef = TensorRef<Element, OperandLayout>;
110
+
111
+ CUTLASS_DEVICE
112
+ static OperandLayout Layout()
113
+ {
114
+ return OperandLayout::packed({OperandShape::kRow, OperandShape::kColumn});
115
+ }
116
+
117
+ /// Returns a TensorRef to the operand
118
+ CUTLASS_HOST_DEVICE
119
+ TensorRef ref() { return TensorRef{buffer.data(), Layout()}; }
120
+ };
121
+
122
+ /// Shape of the A matrix operand in shared memory
123
+ using ShapeA = MatrixShape<Shape::kM + Policy::SmemPaddingA::kRow,
124
+ Shape::kK * kStages + Policy::SmemPaddingA::kColumn>;
125
+
126
+ /// Shape of the B matrix operand in shared memory
127
+ using ShapeB = MatrixShape<Shape::kK * kStages + Policy::SmemPaddingB::kRow,
128
+ Shape::kN + Policy::SmemPaddingB::kColumn>;
129
+
130
+ using SharedStorageA =
131
+ OperandSharedStorage<typename Operator::ElementA, ShapeA, typename Operator::LayoutA>;
132
+ using SharedStorageB =
133
+ OperandSharedStorage<typename Operator::ElementB, ShapeB, typename Operator::LayoutB>;
134
+ using TensorRefA = typename SharedStorageA::TensorRef;
135
+ using TensorRefB = typename SharedStorageB::TensorRef;
136
+
137
+ struct SharedStorage {
138
+ /// Buffer for A operand
139
+ SharedStorageA operand_A;
140
+
141
+ /// Buffer for B operand
142
+ SharedStorageB operand_B;
143
+ };
144
+
145
+ protected:
146
+ //
147
+ // Data members
148
+ //
149
+
150
+ /// Iterator to load a warp-scoped tile of A operand from shared memory
151
+ typename Operator::IteratorA warp_tile_iterator_A_;
152
+
153
+ /// Iterator to load a warp-scoped tile of B operand from shared memory
154
+ typename Operator::IteratorB warp_tile_iterator_B_;
155
+
156
+ public:
157
+ /// Construct from tensor references
158
+ CUTLASS_DEVICE
159
+ CustomMmaBase(
160
+ ///< Shared storage needed for internal use by threadblock-scoped GEMM
161
+ SharedStorageA& shared_storageA,
162
+ SharedStorageB& shared_storageB,
163
+ ///< ID within the threadblock
164
+ int thread_idx,
165
+ ///< ID of warp
166
+ int warp_idx,
167
+ ///< ID of each thread within a warp
168
+ int lane_idx)
169
+ : warp_tile_iterator_A_(shared_storageA.ref(), lane_idx),
170
+ warp_tile_iterator_B_(shared_storageB.ref(), lane_idx)
171
+ {
172
+ }
173
+ };
174
+
175
+ /////////////////////////////////////////////////////////////////////////////////////////////////
176
+
177
+ } // namespace threadblock
178
+ } // namespace gemm
179
+ } // namespace cutlass
180
+
181
+ /////////////////////////////////////////////////////////////////////////////////////////////////
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_multistage.h ADDED
@@ -0,0 +1,714 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
3
+ *reserved. SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice,
9
+ *this list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23
+ *LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24
+ *CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25
+ *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ *INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27
+ *CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28
+ *ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
+ *POSSIBILITY OF SUCH DAMAGE.
30
+ *
31
+ **************************************************************************************************/
32
+
33
+ // Copyright (c) Microsoft Corporation.
34
+ // SPDX-License-Identifier: Apache-2.0
35
+
36
+ // DeepSpeed Team
37
+
38
+ /*! \file
39
+ \brief Template for a double-buffered threadblock-scoped GEMM kernel.
40
+ */
41
+
42
+ #pragma once
43
+
44
+ #include "cutlass/aligned_buffer.h"
45
+ #include "cutlass/arch/cache_operation.h"
46
+ #include "cutlass/arch/memory.h"
47
+ #include "cutlass/array.h"
48
+ #include "cutlass/cutlass.h"
49
+ #include "cutlass/gemm/gemm.h"
50
+ #include "cutlass/matrix_shape.h"
51
+ #include "cutlass/numeric_types.h"
52
+
53
+ #include "custom_mma_base.h"
54
+
55
+ /////////////////////////////////////////////////////////////////////////////////////////////////
56
+
57
+ namespace cutlass {
58
+ namespace gemm {
59
+ namespace threadblock {
60
+
61
+ /////////////////////////////////////////////////////////////////////////////////////////////////
62
+
63
+ /// Structure to compute the matrix product targeting CUDA cores and SIMT math
64
+ /// instructions.
65
+ template <
66
+ /// Size of the Gemm problem - concept: gemm::GemmShape<>
67
+ typename Shape_,
68
+ /// Iterates over tiles of A operand in global memory
69
+ // (concept: ReadableTileIterator | ForwardTileIterator |
70
+ // MaskedTileIterator)
71
+ typename IteratorA_,
72
+ /// Iterates over tiles of A operand in shared memory
73
+ /// (concept: WriteableTileIterator | RandomAccessTileIterator)
74
+ typename SmemIteratorA_,
75
+ /// Cache operation for operand A
76
+ cutlass::arch::CacheOperation::Kind CacheOpA,
77
+ /// Iterates over tiles of B operand in global memory
78
+ // (concept: ReadableTileIterator | ForwardTileIterator |
79
+ // MaskedTileIterator)
80
+ typename IteratorB_,
81
+ /// Iterates over tiles of B operand in shared memory
82
+ /// (concept: WriteableTileIterator | RandomAccessTileIterator)
83
+ typename SmemIteratorB_,
84
+ /// Cache operation for operand B
85
+ cutlass::arch::CacheOperation::Kind CacheOpB,
86
+ /// Data type of accumulator matrix
87
+ typename ElementC_,
88
+ /// Data type of accumulator matrix
89
+ typename LayoutC_,
90
+ /// Policy describing tuning details (concept: MmaPolicy)
91
+ typename Policy_,
92
+ /// Number of stages,
93
+ int Stages,
94
+ /// Use zfill or predicate for out-of-bound cp.async
95
+ SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone,
96
+ /// Upper boundon the K dimension
97
+ int kMaxK = cutlass::platform::numeric_limits<int>::max(),
98
+ /// Used for partial specialization
99
+ typename Enable = bool>
100
+ class CustomMmaMultistage : public CustomMmaBase<Shape_, Policy_, Stages> {
101
+ public:
102
+ ///< Base class
103
+ using Base = CustomMmaBase<Shape_, Policy_, Stages>;
104
+ ///< Size of the Gemm problem - concept: gemm::GemmShape<>
105
+ using Shape = Shape_;
106
+ ///< Iterates over tiles of A operand in global memory
107
+ using IteratorA = IteratorA_;
108
+ ///< Iterates over tiles of B operand in global memory
109
+ using IteratorB = IteratorB_;
110
+ ///< Data type of accumulator matrix
111
+ using ElementC = ElementC_;
112
+ ///< Layout of accumulator matrix
113
+ using LayoutC = LayoutC_;
114
+ ///< Policy describing tuning details
115
+ using Policy = Policy_;
116
+
117
+ using SmemIteratorA = SmemIteratorA_;
118
+ using SmemIteratorB = SmemIteratorB_;
119
+
120
+ static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA;
121
+ static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB;
122
+
123
+ //
124
+ // Dependent types
125
+ //
126
+
127
+ /// Fragment of accumulator tile
128
+ using FragmentC = typename Policy::Operator::FragmentC;
129
+
130
+ /// Warp-level Mma
131
+ using Operator = typename Policy::Operator;
132
+
133
+ /// Minimum architecture is Sm80 to support cp.async
134
+ using ArchTag = arch::Sm80;
135
+
136
+ /// Complex transform on A operand
137
+ static ComplexTransform const kTransformA = Operator::kTransformA;
138
+
139
+ /// Complex transform on B operand
140
+ static ComplexTransform const kTransformB = Operator::kTransformB;
141
+
142
+ /// Internal structure exposed for introspection.
143
+ struct Detail {
144
+ static_assert(Base::kWarpGemmIterations > 1,
145
+ "The pipelined structure requires at least two warp-level "
146
+ "GEMM operations.");
147
+
148
+ /// Number of cp.async instructions to load one stage of operand A
149
+ static int const AsyncCopyIterationsPerStageA = IteratorA::ThreadMap::Iterations::kCount;
150
+
151
+ /// Number of cp.async instructions to load one stage of operand B
152
+ static int const AsyncCopyIterationsPerStageB = IteratorB::ThreadMap::Iterations::kCount;
153
+
154
+ /// Number of stages
155
+ static int const kStages = Stages;
156
+
157
+ /// Number of cp.async instructions to load on group of operand A
158
+ static int const kAccessesPerGroupA =
159
+ (AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) /
160
+ Base::kWarpGemmIterations;
161
+
162
+ /// Number of cp.async instructions to load on group of operand B
163
+ static int const kAccessesPerGroupB =
164
+ (AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) /
165
+ Base::kWarpGemmIterations;
166
+ };
167
+
168
+ static bool const kSmemContainsEntireMat = kMaxK <= Shape::kK * Stages;
169
+ static constexpr int kNumStagesConcurrentLoad = kSmemContainsEntireMat ? Stages : Stages - 1;
170
+
171
+ private:
172
+ using WarpLoadedFragmentA = typename Operator::FragmentA;
173
+ using WarpLoadedFragmentB = typename Operator::FragmentB;
174
+ using WarpTransformedFragmentA = typename Operator::TransformedFragmentA;
175
+ using WarpTransformedFragmentB = typename Operator::TransformedFragmentB;
176
+
177
+ private:
178
+ //
179
+ // Data members
180
+ //
181
+
182
+ /// Iterator to write threadblock-scoped tile of A operand to shared memory
183
+ SmemIteratorA smem_iterator_A_;
184
+
185
+ /// Iterator to write threadblock-scoped tile of B operand to shared memory
186
+ SmemIteratorB smem_iterator_B_;
187
+
188
+ bool prologue_done_;
189
+
190
+ // Set to `True` to ensure the accumulator will be zero outside the GEMM
191
+ // footprint
192
+ bool zero_outside_bounds_;
193
+
194
+ public:
195
+ /// Construct from tensor references
196
+ CUTLASS_DEVICE
197
+ CustomMmaMultistage(
198
+ ///< Shared storage needed for internal use by threadblock-scoped GEMM
199
+ typename Base::SharedStorageA& shared_storageA,
200
+ typename Base::SharedStorageB& shared_storageB,
201
+ ///< ID within the threadblock
202
+ int thread_idx,
203
+ ///< ID of warp
204
+ int warp_idx,
205
+ ///< ID of each thread within a warp
206
+ int lane_idx)
207
+ : Base(shared_storageA, shared_storageB, thread_idx, warp_idx, lane_idx),
208
+ smem_iterator_A_(shared_storageA.ref(), thread_idx),
209
+ smem_iterator_B_(shared_storageB.ref(), thread_idx),
210
+ prologue_done_(false),
211
+ zero_outside_bounds_(false)
212
+ {
213
+ // Compute warp location within threadblock tile by mapping the warp_id to
214
+ // three coordinates:
215
+ // _m: the warp's position within the threadblock along the M dimension
216
+ // _n: the warp's position within the threadblock along the N dimension
217
+ // _k: the warp's position within the threadblock along the K dimension
218
+
219
+ int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
220
+ int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
221
+
222
+ int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
223
+ int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
224
+
225
+ // Add per-warp offsets in units of warp-level tiles
226
+ this->warp_tile_iterator_A_.add_tile_offset(
227
+ {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
228
+ this->warp_tile_iterator_B_.add_tile_offset(
229
+ {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
230
+ }
231
+ CUTLASS_DEVICE
232
+ CustomMmaMultistage(
233
+ ///< Shared storage needed for internal use by threadblock-scoped GEMM
234
+ typename Base::SharedStorage& st,
235
+ ///< ID within the threadblock
236
+ int thread_idx,
237
+ ///< ID of warp
238
+ int warp_idx,
239
+ ///< ID of each thread within a warp
240
+ int lane_idx)
241
+ : CustomMmaMultistage(st.operand_A, st.operand_B, thread_idx, warp_idx, lane_idx)
242
+ {
243
+ }
244
+
245
+ CUTLASS_DEVICE
246
+ bool set_prologue_done(bool value)
247
+ {
248
+ prologue_done_ = value;
249
+ return true;
250
+ }
251
+
252
+ CUTLASS_DEVICE
253
+ bool set_zero_outside_bounds(bool value)
254
+ {
255
+ zero_outside_bounds_ = value;
256
+ return true;
257
+ }
258
+
259
+ template <bool kLoadA = true, bool kLoadB = true>
260
+ CUTLASS_DEVICE static void prologue(typename Base::SharedStorage& shared_storage,
261
+ ///< iterator over A operand in global memory
262
+ IteratorA iterator_A,
263
+ ///< iterator over B operand in global memory
264
+ IteratorB iterator_B,
265
+ int thread_idx,
266
+ int problem_size_k)
267
+ {
268
+ prologue<kLoadA, kLoadB>(shared_storage.operand_A,
269
+ shared_storage.operand_B,
270
+ iterator_A,
271
+ iterator_B,
272
+ thread_idx,
273
+ problem_size_k);
274
+ }
275
+
276
+ template <bool kLoadA = true, bool kLoadB = true>
277
+ CUTLASS_DEVICE static void prologue(typename Base::SharedStorageA& shared_storageA,
278
+ typename Base::SharedStorageB& shared_storageB,
279
+ ///< iterator over A operand in global memory
280
+ IteratorA iterator_A,
281
+ ///< iterator over B operand in global memory
282
+ IteratorB iterator_B,
283
+ int thread_idx,
284
+ int problem_size_k)
285
+ {
286
+ SmemIteratorA smem_iterator_A(shared_storageA.ref(), thread_idx);
287
+ SmemIteratorB smem_iterator_B(shared_storageB.ref(), thread_idx);
288
+ int32_t iter = (problem_size_k + Base::Shape::kK - 1) / Base::Shape::kK;
289
+ _prologue<kLoadA, kLoadB>(iterator_A, iterator_B, iter, smem_iterator_A, smem_iterator_B);
290
+ }
291
+
292
+ CUTLASS_DEVICE
293
+ void copy_tiles_and_advance(IteratorA& iterator_A,
294
+ IteratorB& iterator_B,
295
+ int group_start_A = 0,
296
+ int group_start_B = 0)
297
+ {
298
+ iterator_A.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector);
299
+ this->smem_iterator_A_.set_iteration_index(group_start_A);
300
+
301
+ // Async Copy for operand A
302
+ CUTLASS_PRAGMA_UNROLL
303
+ for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) {
304
+ if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) {
305
+ typename IteratorA::AccessType* dst_ptr =
306
+ reinterpret_cast<typename IteratorA::AccessType*>(this->smem_iterator_A_.get());
307
+
308
+ int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
309
+ IteratorA::ThreadMap::kElementsPerAccess /
310
+ IteratorA::kAccessesPerVector / 8;
311
+
312
+ CUTLASS_PRAGMA_UNROLL
313
+ for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
314
+ auto gmem_ptr = iterator_A.get();
315
+
316
+ if (zero_outside_bounds_ ||
317
+ SharedMemoryClear == SharedMemoryClearOption::kZfill) {
318
+ cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
319
+ dst_ptr + v, gmem_ptr, iterator_A.valid());
320
+ } else {
321
+ cutlass::arch::cp_async<kSrcBytes, kCacheOpA>(
322
+ dst_ptr + v, gmem_ptr, iterator_A.valid());
323
+ }
324
+
325
+ ++iterator_A;
326
+ }
327
+
328
+ ++this->smem_iterator_A_;
329
+ }
330
+ }
331
+
332
+ iterator_B.set_iteration_index(group_start_B * IteratorB::kAccessesPerVector);
333
+ this->smem_iterator_B_.set_iteration_index(group_start_B);
334
+
335
+ // Async Copy for operand B
336
+ CUTLASS_PRAGMA_UNROLL
337
+ for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) {
338
+ if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) {
339
+ typename IteratorB::AccessType* dst_ptr =
340
+ reinterpret_cast<typename IteratorB::AccessType*>(this->smem_iterator_B_.get());
341
+
342
+ int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value *
343
+ IteratorB::ThreadMap::kElementsPerAccess /
344
+ IteratorB::kAccessesPerVector / 8;
345
+
346
+ CUTLASS_PRAGMA_UNROLL
347
+ for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
348
+ auto gmem_ptr = iterator_B.get();
349
+
350
+ if (zero_outside_bounds_ ||
351
+ SharedMemoryClear == SharedMemoryClearOption::kZfill) {
352
+ cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
353
+ dst_ptr + v, gmem_ptr, iterator_B.valid());
354
+ } else {
355
+ cutlass::arch::cp_async<kSrcBytes, kCacheOpB>(
356
+ dst_ptr + v, gmem_ptr, iterator_B.valid());
357
+ }
358
+
359
+ ++iterator_B;
360
+ }
361
+ ++this->smem_iterator_B_;
362
+ }
363
+ }
364
+ }
365
+
366
+ template <bool kLoadA = true, bool kLoadB = true>
367
+ CUTLASS_DEVICE static void _prologue(IteratorA& iterator_A,
368
+ IteratorB& iterator_B,
369
+ int32_t& gemm_k_iterations,
370
+ SmemIteratorA& smem_iterator_A_,
371
+ SmemIteratorB& smem_iterator_B_)
372
+ {
373
+ // Issue several complete stages
374
+ CUTLASS_PRAGMA_UNROLL
375
+ for (int stage = 0; stage < kNumStagesConcurrentLoad; ++stage, --gemm_k_iterations) {
376
+ iterator_A.clear_mask(gemm_k_iterations == 0);
377
+ iterator_B.clear_mask(gemm_k_iterations == 0);
378
+
379
+ iterator_A.set_iteration_index(0);
380
+ smem_iterator_A_.set_iteration_index(0);
381
+
382
+ // Async Copy for operand A
383
+ CUTLASS_PRAGMA_UNROLL
384
+ for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
385
+ typename IteratorA::AccessType* dst_ptr =
386
+ reinterpret_cast<typename IteratorA::AccessType*>(smem_iterator_A_.get());
387
+
388
+ CUTLASS_PRAGMA_UNROLL
389
+ for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) {
390
+ int const kSrcBytes = sizeof_bits<typename IteratorA::Element>::value *
391
+ IteratorA::ThreadMap::kElementsPerAccess /
392
+ IteratorA::kAccessesPerVector / 8;
393
+
394
+ int src_bytes = (iterator_A.valid() ? kSrcBytes : 0);
395
+
396
+ if (kLoadA) {
397
+ cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpA>(
398
+ dst_ptr + v, iterator_A.get(), iterator_A.valid());
399
+ }
400
+
401
+ ++iterator_A;
402
+ }
403
+
404
+ ++smem_iterator_A_;
405
+ }
406
+
407
+ iterator_B.set_iteration_index(0);
408
+ smem_iterator_B_.set_iteration_index(0);
409
+
410
+ // Async Copy for operand B
411
+ CUTLASS_PRAGMA_UNROLL
412
+ for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
413
+ typename IteratorB::AccessType* dst_ptr =
414
+ reinterpret_cast<typename IteratorB::AccessType*>(smem_iterator_B_.get());
415
+
416
+ CUTLASS_PRAGMA_UNROLL
417
+ for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) {
418
+ int const kSrcBytes = sizeof_bits<typename IteratorB::Element>::value *
419
+ IteratorB::ThreadMap::kElementsPerAccess /
420
+ IteratorB::kAccessesPerVector / 8;
421
+
422
+ if (kLoadB) {
423
+ cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB>(
424
+ dst_ptr + v, iterator_B.get(), iterator_B.valid());
425
+ }
426
+
427
+ ++iterator_B;
428
+ }
429
+
430
+ ++smem_iterator_B_;
431
+ }
432
+
433
+ // Move to the next stage
434
+ iterator_A.add_tile_offset({0, 1});
435
+ iterator_B.add_tile_offset({1, 0});
436
+
437
+ smem_iterator_A_.add_tile_offset({0, 1});
438
+ smem_iterator_B_.add_tile_offset({1, 0});
439
+
440
+ // Defines the boundary of a stage of cp.async.
441
+ cutlass::arch::cp_async_fence();
442
+ }
443
+ }
444
+
445
+ /// Perform a threadblock-scoped matrix multiply-accumulate
446
+ CUTLASS_DEVICE
447
+ void operator()(
448
+ ///< problem size of GEMM
449
+ int gemm_k_iterations,
450
+ ///< destination accumulator tile
451
+ FragmentC& accum,
452
+ ///< iterator over A operand in global memory
453
+ IteratorA iterator_A,
454
+ ///< iterator over B operand in global memory
455
+ IteratorB iterator_B,
456
+ ///< initial value of accumulator
457
+ FragmentC const& src_accum)
458
+ {
459
+ //
460
+ // Prologue
461
+ //
462
+
463
+ if (!prologue_done_) {
464
+ _prologue<true, true>(
465
+ iterator_A, iterator_B, gemm_k_iterations, smem_iterator_A_, smem_iterator_B_);
466
+ } else if (!kSmemContainsEntireMat) {
467
+ _prologue<false, false>(
468
+ iterator_A, iterator_B, gemm_k_iterations, smem_iterator_A_, smem_iterator_B_);
469
+ } else {
470
+ gemm_k_iterations -= kNumStagesConcurrentLoad;
471
+ }
472
+
473
+ // Perform accumulation in the 'd' output operand
474
+ accum = src_accum;
475
+
476
+ //
477
+ // Clear the remaining tiles of SMEM. This is a functional requirement for
478
+ // some kernels so that all accumulator elements outside the GEMM footprint
479
+ // are zero.
480
+ //
481
+
482
+ if (SharedMemoryClear == SharedMemoryClearOption::kClearLastStage) {
483
+ /// Iterator to write threadblock-scoped tile of A operand to shared
484
+ /// memory
485
+ SmemIteratorA last_smem_iterator_A(this->smem_iterator_A_);
486
+
487
+ typename IteratorA::AccessType zero_A;
488
+ zero_A.clear();
489
+
490
+ last_smem_iterator_A.set_iteration_index(0);
491
+
492
+ // Async Copy for operand A
493
+ CUTLASS_PRAGMA_UNROLL
494
+ for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) {
495
+ typename IteratorA::AccessType* dst_ptr =
496
+ reinterpret_cast<typename IteratorA::AccessType*>(last_smem_iterator_A.get());
497
+
498
+ *dst_ptr = zero_A;
499
+
500
+ ++last_smem_iterator_A;
501
+ }
502
+
503
+ /// Iterator to write threadblock-scoped tile of B operand to shared
504
+ /// memory
505
+ SmemIteratorB last_smem_iterator_B(this->smem_iterator_B_);
506
+ typename IteratorB::AccessType zero_B;
507
+
508
+ zero_B.clear();
509
+ last_smem_iterator_B.set_iteration_index(0);
510
+
511
+ // Async Copy for operand B
512
+ CUTLASS_PRAGMA_UNROLL
513
+ for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) {
514
+ typename IteratorB::AccessType* dst_ptr =
515
+ reinterpret_cast<typename IteratorB::AccessType*>(last_smem_iterator_B.get());
516
+
517
+ *dst_ptr = zero_B;
518
+
519
+ ++last_smem_iterator_B;
520
+ }
521
+ }
522
+
523
+ // Waits until kStages-2 stages have committed.
524
+ cutlass::arch::cp_async_wait<kNumStagesConcurrentLoad - 1>();
525
+ __syncthreads();
526
+
527
+ // Pair of fragments used to overlap shared memory loads and math
528
+ // instructions
529
+ WarpLoadedFragmentA warp_loaded_frag_A[2];
530
+ WarpLoadedFragmentB warp_loaded_frag_B[2];
531
+ WarpTransformedFragmentA warp_transformed_frag_A[2];
532
+ WarpTransformedFragmentB warp_transformed_frag_B[2];
533
+
534
+ Operator warp_mma;
535
+
536
+ this->warp_tile_iterator_A_.set_kgroup_index(0);
537
+ this->warp_tile_iterator_B_.set_kgroup_index(0);
538
+
539
+ this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]);
540
+ this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]);
541
+
542
+ ++this->warp_tile_iterator_A_;
543
+ ++this->warp_tile_iterator_B_;
544
+
545
+ iterator_A.clear_mask(gemm_k_iterations == 0);
546
+ iterator_B.clear_mask(gemm_k_iterations == 0);
547
+
548
+ int smem_write_stage_idx = Base::kStages - 1;
549
+ int smem_read_stage_idx = 0;
550
+
551
+ warp_mma.transform(warp_transformed_frag_A[0],
552
+ warp_transformed_frag_B[0],
553
+ warp_loaded_frag_A[0],
554
+ warp_loaded_frag_B[0]);
555
+
556
+ // tf32x3 kernels use staging accumulation. warp_mma uses a temporary
557
+ // accumulator and this temporary accumulator is added to the final
558
+ // accumulator once in every mainloop iteration.
559
+ plus<FragmentC> plus_accum;
560
+
561
+ FragmentC tmp_accum;
562
+
563
+ if (platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddFastF32>::value ||
564
+ platform::is_same<typename Operator::MathOperator,
565
+ arch::OpMultiplyAddComplexFastF32>::value) {
566
+ tmp_accum.clear();
567
+ }
568
+
569
+ //
570
+ // Mainloop
571
+ //
572
+
573
+ CUTLASS_GEMM_LOOP
574
+ for (; gemm_k_iterations > (-kNumStagesConcurrentLoad);) {
575
+ //
576
+ // Loop over GEMM K dimension
577
+ //
578
+
579
+ // Computes a warp-level GEMM on data held in shared memory
580
+ // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
581
+ CUTLASS_PRAGMA_UNROLL
582
+ for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
583
+ // Load warp-level tiles from shared memory, wrapping to k offset if
584
+ // this is the last group as the case may be.
585
+
586
+ this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) %
587
+ Base::kWarpGemmIterations);
588
+ this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) %
589
+ Base::kWarpGemmIterations);
590
+
591
+ // In case of a non-circular buffer ("kSmemContainsEntireMat")
592
+ // make sure we don't load out of bounds data.
593
+ if (!kSmemContainsEntireMat || gemm_k_iterations > (-kNumStagesConcurrentLoad) ||
594
+ warp_mma_k < Base::kWarpGemmIterations - 1) {
595
+ this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]);
596
+ this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
597
+ }
598
+
599
+ ++this->warp_tile_iterator_A_;
600
+ ++this->warp_tile_iterator_B_;
601
+
602
+ if (warp_mma_k > 0)
603
+ warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2],
604
+ warp_transformed_frag_B[warp_mma_k % 2],
605
+ warp_loaded_frag_A[warp_mma_k % 2],
606
+ warp_loaded_frag_B[warp_mma_k % 2]);
607
+
608
+ if (platform::is_same<typename Operator::MathOperator,
609
+ arch::OpMultiplyAddFastF32>::value ||
610
+ platform::is_same<typename Operator::MathOperator,
611
+ arch::OpMultiplyAddComplexFastF32>::value) {
612
+ warp_mma(tmp_accum,
613
+ warp_transformed_frag_A[warp_mma_k % 2],
614
+ warp_transformed_frag_B[warp_mma_k % 2],
615
+ tmp_accum);
616
+
617
+ if (warp_mma_k == 0) {
618
+ accum = plus_accum(accum, tmp_accum);
619
+ tmp_accum.clear();
620
+ }
621
+ } else {
622
+ warp_mma(accum,
623
+ warp_transformed_frag_A[warp_mma_k % 2],
624
+ warp_transformed_frag_B[warp_mma_k % 2],
625
+ accum);
626
+ }
627
+
628
+ // Issue global->shared copies for the this stage
629
+ if (!kSmemContainsEntireMat && warp_mma_k < Base::kWarpGemmIterations - 1) {
630
+ int group_start_iteration_A, group_start_iteration_B;
631
+
632
+ group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA;
633
+ group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB;
634
+
635
+ copy_tiles_and_advance(
636
+ iterator_A, iterator_B, group_start_iteration_A, group_start_iteration_B);
637
+ }
638
+
639
+ if (warp_mma_k + 2 == Base::kWarpGemmIterations) {
640
+ if (!kSmemContainsEntireMat) {
641
+ int group_start_iteration_A, group_start_iteration_B;
642
+ group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA;
643
+ group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB;
644
+
645
+ copy_tiles_and_advance(iterator_A,
646
+ iterator_B,
647
+ group_start_iteration_A,
648
+ group_start_iteration_B);
649
+ }
650
+
651
+ // Inserts a memory fence between stages of cp.async instructions.
652
+ cutlass::arch::cp_async_fence();
653
+
654
+ // Waits until kStages-2 stages have committed.
655
+ cutlass::arch::cp_async_wait<kNumStagesConcurrentLoad - 1>();
656
+ __syncthreads();
657
+
658
+ // Move to the next stage
659
+ iterator_A.add_tile_offset({0, 1});
660
+ iterator_B.add_tile_offset({1, 0});
661
+
662
+ this->smem_iterator_A_.add_tile_offset({0, 1});
663
+ this->smem_iterator_B_.add_tile_offset({1, 0});
664
+
665
+ // Add negative offsets to return iterators to the 'start' of the
666
+ // circular buffer in shared memory
667
+ if (smem_write_stage_idx == (Base::kStages - 1)) {
668
+ this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
669
+ this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
670
+ smem_write_stage_idx = 0;
671
+ } else {
672
+ ++smem_write_stage_idx;
673
+ }
674
+
675
+ if (!kSmemContainsEntireMat && smem_read_stage_idx == (Base::kStages - 1)) {
676
+ this->warp_tile_iterator_A_.add_tile_offset(
677
+ {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
678
+ this->warp_tile_iterator_B_.add_tile_offset(
679
+ {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0});
680
+ smem_read_stage_idx = 0;
681
+ } else {
682
+ ++smem_read_stage_idx;
683
+ }
684
+
685
+ --gemm_k_iterations;
686
+ iterator_A.clear_mask(gemm_k_iterations == 0);
687
+ iterator_B.clear_mask(gemm_k_iterations == 0);
688
+ }
689
+
690
+ // Do any conversions feeding the first stage at the end of the loop so
691
+ // we can start right away on mma instructions
692
+ if (warp_mma_k + 1 == Base::kWarpGemmIterations)
693
+ warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2],
694
+ warp_transformed_frag_B[(warp_mma_k + 1) % 2],
695
+ warp_loaded_frag_A[(warp_mma_k + 1) % 2],
696
+ warp_loaded_frag_B[(warp_mma_k + 1) % 2]);
697
+ }
698
+ }
699
+
700
+ if (platform::is_same<typename Operator::MathOperator, arch::OpMultiplyAddFastF32>::value ||
701
+ platform::is_same<typename Operator::MathOperator,
702
+ arch::OpMultiplyAddComplexFastF32>::value) {
703
+ accum = plus_accum(accum, tmp_accum);
704
+ }
705
+ }
706
+ };
707
+
708
+ /////////////////////////////////////////////////////////////////////////////////////////////////
709
+
710
+ } // namespace threadblock
711
+ } // namespace gemm
712
+ } // namespace cutlass
713
+
714
+ /////////////////////////////////////////////////////////////////////////////////////////////////
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_pipelined.h ADDED
@@ -0,0 +1,388 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
3
+ *reserved. SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice,
9
+ *this list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23
+ *LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24
+ *CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25
+ *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ *INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27
+ *CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28
+ *ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
+ *POSSIBILITY OF SUCH DAMAGE.
30
+ *
31
+ **************************************************************************************************/
32
+
33
+ // Copyright (c) Microsoft Corporation.
34
+ // SPDX-License-Identifier: Apache-2.0
35
+
36
+ // DeepSpeed Team
37
+
38
+ /*! \file
39
+ \brief Template for a double-buffered threadblock-scoped GEMM kernel.
40
+ */
41
+
42
+ #pragma once
43
+
44
+ #include "cutlass/aligned_buffer.h"
45
+ #include "cutlass/array.h"
46
+ #include "cutlass/cutlass.h"
47
+ #include "cutlass/numeric_conversion.h"
48
+
49
+ #include "cutlass/matrix_shape.h"
50
+ #include "cutlass/numeric_types.h"
51
+
52
+ #include "custom_mma_base.h"
53
+ #include "cutlass/gemm/gemm.h"
54
+
55
+ /////////////////////////////////////////////////////////////////////////////////////////////////
56
+
57
+ namespace cutlass {
58
+ namespace gemm {
59
+ namespace threadblock {
60
+
61
+ /////////////////////////////////////////////////////////////////////////////////////////////////
62
+
63
+ /// Structure to compute the matrix product targeting CUDA cores and SIMT math
64
+ /// instructions.
65
+ template <
66
+ /// Size of the Gemm problem - concept: gemm::GemmShape<>
67
+ typename Shape_,
68
+ /// Iterates over tiles of A operand in global memory
69
+ // (concept: ReadableTileIterator | ForwardTileIterator |
70
+ // MaskedTileIterator)
71
+ typename IteratorA_,
72
+ /// Iterates over tiles of A operand in shared memory
73
+ /// (concept: WriteableTileIterator | RandomAccessTileIterator)
74
+ typename SmemIteratorA_,
75
+ /// Iterates over tiles of B operand in global memory
76
+ // (concept: ReadableTileIterator | ForwardTileIterator |
77
+ // MaskedTileIterator)
78
+ typename IteratorB_,
79
+ /// Iterates over tiles of B operand in shared memory
80
+ /// (concept: WriteableTileIterator | RandomAccessTileIterator)
81
+ typename SmemIteratorB_,
82
+ /// Data type of accumulator matrix
83
+ typename ElementC_,
84
+ /// Data type of accumulator matrix
85
+ typename LayoutC_,
86
+ /// Policy describing tuning details (concept: MmaPolicy)
87
+ typename Policy_,
88
+ /// Transformation applied to A operand
89
+ typename TransformA_ = NumericArrayConverter<typename SmemIteratorA_::Element,
90
+ typename IteratorA_::Element,
91
+ IteratorA_::Fragment::kElements>,
92
+ ///
93
+ /// Transformation applied to B operand
94
+ typename TransformB_ = NumericArrayConverter<typename SmemIteratorB_::Element,
95
+ typename IteratorB_::Element,
96
+ IteratorB_::Fragment::kElements>,
97
+ /// Used for partial specialization
98
+ typename Enable = bool>
99
+ class CustomMmaPipelined : public CustomMmaBase<Shape_, Policy_, 2> {
100
+ public:
101
+ ///< Base class
102
+ using Base = CustomMmaBase<Shape_, Policy_, 2>;
103
+
104
+ using Shape = Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<>
105
+ using IteratorA = IteratorA_; ///< Iterates over tiles of A operand in global memory
106
+ using IteratorB = IteratorB_; ///< Iterates over tiles of B operand in global memory
107
+ using ElementC = ElementC_; ///< Data type of accumulator matrix
108
+ using LayoutC = LayoutC_; ///< Layout of accumulator matrix
109
+ using Policy = Policy_; ///< Policy describing tuning details
110
+
111
+ using SmemIteratorA = SmemIteratorA_;
112
+ using SmemIteratorB = SmemIteratorB_;
113
+
114
+ using TransformA = TransformA_;
115
+ using TransformB = TransformB_;
116
+
117
+ //
118
+ // Dependent types
119
+ //
120
+
121
+ /// Fragment of operand A loaded from global memory
122
+ using FragmentA = typename IteratorA::Fragment;
123
+
124
+ /// Fragment of operand B loaded from global memory
125
+ using FragmentB = typename IteratorB::Fragment;
126
+
127
+ /// Fragment of accumulator tile
128
+ using FragmentC = typename Policy::Operator::FragmentC;
129
+
130
+ /// Warp-level Mma
131
+ using Operator = typename Policy::Operator;
132
+
133
+ /// Obtain the arch tag from the warp-level operator
134
+ using ArchTag = typename Policy::Operator::ArchTag;
135
+
136
+ /// Complex transform on A operand
137
+ static ComplexTransform const kTransformA = Operator::kTransformA;
138
+
139
+ /// Complex transform on B operand
140
+ static ComplexTransform const kTransformB = Operator::kTransformB;
141
+
142
+ // staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline)
143
+ static_assert((Base::kStages == 2), "MmaPipelined requires kStages set to value 2");
144
+
145
+ static bool const kSmemContainsEntireMat = false;
146
+
147
+ private:
148
+ using WarpFragmentA = typename Operator::FragmentA;
149
+ using WarpFragmentB = typename Operator::FragmentB;
150
+
151
+ protected:
152
+ /// Iterator to write threadblock-scoped tile of A operand to shared memory
153
+ SmemIteratorA smem_iterator_A_;
154
+
155
+ /// Iterator to write threadblock-scoped tile of B operand to shared memory
156
+ SmemIteratorB smem_iterator_B_;
157
+
158
+ public:
159
+ /// Construct from tensor references
160
+ CUTLASS_DEVICE
161
+ CustomMmaPipelined(typename Base::SharedStorageA& shared_storageA,
162
+ typename Base::SharedStorageB& shared_storageB,
163
+ int thread_idx, ///< ID within the threadblock
164
+ int warp_idx, ///< ID of warp
165
+ int lane_idx ///< ID of each thread within a warp
166
+ )
167
+ : Base(shared_storageA, shared_storageB, thread_idx, warp_idx, lane_idx),
168
+ smem_iterator_A_(shared_storageA.ref(), thread_idx),
169
+ smem_iterator_B_(shared_storageB.ref(), thread_idx)
170
+ {
171
+ // Compute warp location within threadblock tile by mapping the warp_id to
172
+ // three coordinates:
173
+ // _m: the warp's position within the threadblock along the M dimension
174
+ // _n: the warp's position within the threadblock along the N dimension
175
+ // _k: the warp's position within the threadblock along the K dimension
176
+
177
+ int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
178
+ int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
179
+
180
+ int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
181
+ int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
182
+
183
+ // Add per-warp offsets in units of warp-level tiles
184
+ this->warp_tile_iterator_A_.add_tile_offset(
185
+ {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
186
+ this->warp_tile_iterator_B_.add_tile_offset(
187
+ {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
188
+ }
189
+ CUTLASS_DEVICE
190
+ CustomMmaPipelined(
191
+ ///< Shared storage needed for internal use by threadblock-scoped GEMM
192
+ typename Base::SharedStorage& st,
193
+ ///< ID within the threadblock
194
+ int thread_idx,
195
+ ///< ID of warp
196
+ int warp_idx,
197
+ ///< ID of each thread within a warp
198
+ int lane_idx)
199
+ : CustomMmaPipelined(st.operand_A, st.operand_B, thread_idx, warp_idx, lane_idx)
200
+ {
201
+ }
202
+
203
+ CUTLASS_DEVICE
204
+ bool set_prologue_done(bool value)
205
+ {
206
+ // NOT IMPLEMENTED FOR PIPELINED
207
+ }
208
+
209
+ CUTLASS_DEVICE
210
+ bool set_zero_outside_bounds(bool value)
211
+ {
212
+ // NOT NEEDED FOR PIPELINED
213
+ // shared memory will always be zero-filled
214
+ }
215
+
216
+ template <bool kLoadA = true, bool kLoadB = true>
217
+ CUTLASS_DEVICE static void prologue(typename Base::SharedStorage& shared_storage,
218
+ ///< iterator over A operand in global memory
219
+ IteratorA iterator_A,
220
+ ///< iterator over B operand in global memory
221
+ IteratorB iterator_B,
222
+ int thread_idx,
223
+ int problem_size_k)
224
+ {
225
+ prologue<kLoadA, kLoadB>(shared_storage.operand_A,
226
+ shared_storage.operand_B,
227
+ iterator_A,
228
+ iterator_B,
229
+ thread_idx,
230
+ problem_size_k);
231
+ }
232
+
233
+ template <bool kLoadA = true, bool kLoadB = true>
234
+ CUTLASS_DEVICE static void prologue(typename Base::SharedStorageA& shared_storageA,
235
+ typename Base::SharedStorageB& shared_storageB,
236
+ ///< iterator over A operand in global memory
237
+ IteratorA iterator_A,
238
+ ///< iterator over B operand in global memory
239
+ IteratorB iterator_B,
240
+ int thread_idx,
241
+ int problem_size_k)
242
+ {
243
+ // NOT IMPLEMENTED FOR PIPELINED
244
+ }
245
+
246
+ /// Perform a threadblock-scoped matrix multiply-accumulate
247
+ CUTLASS_DEVICE
248
+ void operator()(
249
+ int gemm_k_iterations, ///< number of iterations of the mainloop
250
+ FragmentC& accum, ///< destination accumulator tile
251
+ IteratorA iterator_A, ///< iterator over A operand in global memory
252
+ IteratorB iterator_B, ///< iterator over B operand in global memory
253
+ FragmentC const& src_accum, ///< source accumulator tile
254
+ TransformA transform_A = TransformA(), ///< transformation applied to A fragment
255
+ TransformB transform_B = TransformB())
256
+ { ///< transformation applied to B fragment
257
+
258
+ //
259
+ // Prologue
260
+ //
261
+
262
+ // Perform accumulation in the 'd' output operand
263
+ accum = src_accum;
264
+
265
+ FragmentA tb_frag_A;
266
+ FragmentB tb_frag_B;
267
+
268
+ tb_frag_A.clear();
269
+ tb_frag_B.clear();
270
+
271
+ // The last kblock is loaded in the prolog
272
+ iterator_A.load(tb_frag_A);
273
+ iterator_B.load(tb_frag_B);
274
+
275
+ ++iterator_A;
276
+ ++iterator_B;
277
+
278
+ this->smem_iterator_A_.store(transform_A(tb_frag_A));
279
+ this->smem_iterator_B_.store(transform_B(tb_frag_B));
280
+
281
+ ++this->smem_iterator_A_;
282
+ ++this->smem_iterator_B_;
283
+
284
+ __syncthreads();
285
+
286
+ // Pair of fragments used to overlap shared memory loads and math
287
+ // instructions
288
+ WarpFragmentA warp_frag_A[2];
289
+ WarpFragmentB warp_frag_B[2];
290
+
291
+ this->warp_tile_iterator_A_.set_kgroup_index(0);
292
+ this->warp_tile_iterator_B_.set_kgroup_index(0);
293
+
294
+ this->warp_tile_iterator_A_.load(warp_frag_A[0]);
295
+ this->warp_tile_iterator_B_.load(warp_frag_B[0]);
296
+
297
+ ++this->warp_tile_iterator_A_;
298
+ ++this->warp_tile_iterator_B_;
299
+
300
+ Operator warp_mma;
301
+
302
+ int smem_write_stage_idx = 1;
303
+
304
+ // Avoid reading out of bounds
305
+ iterator_A.clear_mask(gemm_k_iterations <= 1);
306
+ iterator_B.clear_mask(gemm_k_iterations <= 1);
307
+
308
+ // Issue loads during the first warp-level matrix multiply-add *AFTER*
309
+ // issuing shared memory loads (which have the tightest latency requirement).
310
+
311
+ //
312
+ // Mainloop
313
+ //
314
+
315
+ // Note: The main loop does not support Base::kWarpGemmIterations == 2.
316
+ CUTLASS_GEMM_LOOP
317
+ for (; gemm_k_iterations > 0; --gemm_k_iterations) {
318
+ //
319
+ // Loop over GEMM K dimension
320
+ //
321
+
322
+ CUTLASS_PRAGMA_UNROLL
323
+ for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
324
+ // Load warp-level tiles from shared memory, wrapping to k offset if
325
+ // this is the last group as the case may be.
326
+
327
+ if (warp_mma_k == Base::kWarpGemmIterations - 1) {
328
+ // Write fragments to shared memory
329
+ this->smem_iterator_A_.store(transform_A(tb_frag_A));
330
+
331
+ this->smem_iterator_B_.store(transform_B(tb_frag_B));
332
+
333
+ __syncthreads();
334
+
335
+ ++this->smem_iterator_A_;
336
+ ++this->smem_iterator_B_;
337
+
338
+ // Add negative offsets to return iterators to the 'start' of the
339
+ // circular buffer in shared memory
340
+ if (smem_write_stage_idx == 1) {
341
+ this->smem_iterator_A_.add_tile_offset({0, -Base::kStages});
342
+ this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
343
+ } else {
344
+ this->warp_tile_iterator_A_.add_tile_offset(
345
+ {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations});
346
+ this->warp_tile_iterator_B_.add_tile_offset(
347
+ {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0});
348
+ }
349
+
350
+ smem_write_stage_idx ^= 1;
351
+ }
352
+
353
+ this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) %
354
+ Base::kWarpGemmIterations);
355
+ this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) %
356
+ Base::kWarpGemmIterations);
357
+
358
+ this->warp_tile_iterator_A_.load(warp_frag_A[(warp_mma_k + 1) % 2]);
359
+ this->warp_tile_iterator_B_.load(warp_frag_B[(warp_mma_k + 1) % 2]);
360
+
361
+ ++this->warp_tile_iterator_A_;
362
+ ++this->warp_tile_iterator_B_;
363
+
364
+ if (warp_mma_k == 0) {
365
+ iterator_A.load(tb_frag_A);
366
+ iterator_B.load(tb_frag_B);
367
+
368
+ ++iterator_A;
369
+ ++iterator_B;
370
+
371
+ // Avoid reading out of bounds if this was the last loop iteration
372
+ iterator_A.clear_mask(gemm_k_iterations <= 2);
373
+ iterator_B.clear_mask(gemm_k_iterations <= 2);
374
+ }
375
+
376
+ warp_mma(accum, warp_frag_A[warp_mma_k % 2], warp_frag_B[warp_mma_k % 2], accum);
377
+ }
378
+ }
379
+ }
380
+ };
381
+
382
+ /////////////////////////////////////////////////////////////////////////////////////////////////
383
+
384
+ } // namespace threadblock
385
+ } // namespace gemm
386
+ } // namespace cutlass
387
+
388
+ /////////////////////////////////////////////////////////////////////////////////////////////////
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/find_default_mma.h ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holdvr nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ // Copyright (c) Microsoft Corporation.
33
+ // SPDX-License-Identifier: Apache-2.0
34
+
35
+ // DeepSpeed Team
36
+
37
+ /*! \file
38
+ \brief Cutlass provides helper template functions to figure out the right
39
+ datastructures to instantiate to run a GEMM with various parameters (see
40
+ `cutlass/gemm/threadblock/default_mma.h`). However, due to template
41
+ instantiation priority rules, it will only create an MmaMultiStage with
42
+ kStages=3 (otherwise creates an MmePipelined - which is not compatible with
43
+ FastF32). kStages=3 uses too much shared memory and we want to use kStages=2,
44
+ so we just copy-pasted some code from `default_mma.h` and
45
+ `default_mma_core.h` files and wrapped this template to allow our usecase.
46
+
47
+ This is really only for the FastF32 case - aka using TensorCores with fp32.
48
+ */
49
+
50
+ #pragma once
51
+
52
+ #include "cutlass/gemm/threadblock/default_mma.h"
53
+ #include "cutlass/gemm/threadblock/default_mma_core_simt.h"
54
+ #include "cutlass/gemm/threadblock/default_mma_core_sm70.h"
55
+ #include "cutlass/gemm/threadblock/default_mma_core_sm75.h"
56
+ #include "cutlass/gemm/threadblock/default_mma_core_sm80.h"
57
+
58
+ namespace cutlass {
59
+ namespace gemm {
60
+ namespace threadblock {
61
+
62
+ template <
63
+ /// Element type for A matrix operand
64
+ typename ElementA,
65
+ /// Layout type for A matrix operand
66
+ typename LayoutA,
67
+ /// Access granularity of A matrix in units of elements
68
+ int kAlignmentA,
69
+ /// Element type for B matrix operand
70
+ typename ElementB,
71
+ /// Layout type for B matrix operand
72
+ typename LayoutB,
73
+ /// Access granularity of B matrix in units of elements
74
+ int kAlignmentB,
75
+ /// Element type for internal accumulation
76
+ typename ElementAccumulator,
77
+ /// Layout type for C and D matrix operand
78
+ typename LayoutC,
79
+ /// Operator class tag
80
+ typename OperatorClass,
81
+ /// Tag indicating architecture to tune for
82
+ typename ArchTag,
83
+ /// Threadblock-level tile size (concept: GemmShape)
84
+ typename ThreadblockShape,
85
+ /// Warp-level tile size (concept: GemmShape)
86
+ typename WarpShape,
87
+ /// Instruction-level tile size (concept: GemmShape)
88
+ typename InstructionShape,
89
+ /// Number of stages used in the pipelined mainloop
90
+ int Stages,
91
+ /// Operation performed by GEMM
92
+ typename Operator,
93
+ typename Enable_ = void>
94
+ struct FindDefaultMma {
95
+ static constexpr bool AccumulatorsInRowMajor = false;
96
+ static constexpr SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone;
97
+ using DefaultMma = cutlass::gemm::threadblock::DefaultMma<ElementA,
98
+ LayoutA,
99
+ kAlignmentA,
100
+ ElementB,
101
+ LayoutB,
102
+ kAlignmentB,
103
+ ElementAccumulator,
104
+ LayoutC,
105
+ OperatorClass,
106
+ ArchTag,
107
+ ThreadblockShape,
108
+ WarpShape,
109
+ InstructionShape,
110
+ Stages,
111
+ Operator,
112
+ AccumulatorsInRowMajor,
113
+ SharedMemoryClear>;
114
+ };
115
+
116
+ /// Specialization for sm80 / FastF32 / multistage with kStages=2
117
+ template <typename ElementA_,
118
+ /// Layout type for A matrix operand
119
+ typename LayoutA_,
120
+ /// Access granularity of A matrix in units of elements
121
+ int kAlignmentA,
122
+ typename ElementB_,
123
+ /// Layout type for B matrix operand
124
+ typename LayoutB_,
125
+ /// Access granularity of B matrix in units of elements
126
+ int kAlignmentB,
127
+ typename ElementAccumulator,
128
+ /// Threadblock-level tile size (concept: GemmShape)
129
+ typename ThreadblockShape,
130
+ /// Warp-level tile size (concept: GemmShape)
131
+ typename WarpShape,
132
+ /// Instruction-level tile size (concept: GemmShape)
133
+ typename InstructionShape,
134
+ int kStages,
135
+ typename Operator>
136
+ struct FindDefaultMma<ElementA_,
137
+ LayoutA_,
138
+ kAlignmentA,
139
+ ElementB_,
140
+ LayoutB_,
141
+ kAlignmentB,
142
+ ElementAccumulator,
143
+ layout::RowMajor,
144
+ arch::OpClassTensorOp,
145
+ arch::Sm80,
146
+ ThreadblockShape,
147
+ WarpShape,
148
+ InstructionShape,
149
+ kStages,
150
+ Operator,
151
+ typename cutlass::platform::enable_if<(kAlignmentA > 1)>::type> {
152
+ using LayoutC = layout::RowMajor;
153
+ using OperatorClass = arch::OpClassTensorOp;
154
+ using ArchTag = arch::Sm80;
155
+
156
+ using DefaultMma_ = cutlass::gemm::threadblock::DefaultMma<ElementA_,
157
+ LayoutA_,
158
+ kAlignmentA,
159
+ ElementB_,
160
+ LayoutB_,
161
+ kAlignmentB,
162
+ ElementAccumulator,
163
+ LayoutC,
164
+ OperatorClass,
165
+ ArchTag,
166
+ ThreadblockShape,
167
+ WarpShape,
168
+ InstructionShape,
169
+ 3,
170
+ Operator>;
171
+ struct DefaultMma : DefaultMma_ {
172
+ using MmaCore_ = typename DefaultMma_::MmaCore;
173
+ // Define the threadblock-scoped multistage matrix multiply
174
+ using ThreadblockMma =
175
+ cutlass::gemm::threadblock::MmaMultistage<typename MmaCore_::Shape,
176
+ typename DefaultMma_::IteratorA,
177
+ typename MmaCore_::SmemIteratorA,
178
+ MmaCore_::kCacheOpA,
179
+ typename DefaultMma_::IteratorB,
180
+ typename MmaCore_::SmemIteratorB,
181
+ MmaCore_::kCacheOpB,
182
+ ElementAccumulator,
183
+ LayoutC,
184
+ typename MmaCore_::MmaPolicy,
185
+ kStages>;
186
+ };
187
+ };
188
+
189
+ } // namespace threadblock
190
+ } // namespace gemm
191
+ } // namespace cutlass
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/mma_accum_lambda_iterator.h ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holdvr nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ // Copyright (c) Microsoft Corporation.
33
+ // SPDX-License-Identifier: Apache-2.0
34
+
35
+ // DeepSpeed Team
36
+
37
+ #pragma once
38
+
39
+ #include "cutlass/functional.h"
40
+ #include "cutlass/gemm/warp/mma_simt_tile_iterator.h"
41
+ #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h"
42
+ #include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h"
43
+ #include "cutlass/matrix_shape.h"
44
+
45
+ /*
46
+ TensorCores have different accumulator layouts.
47
+ This file provides a class to easily map the accumulator
48
+ i-th element with the corresponding matrix row/col.
49
+ */
50
+
51
+ template <typename T, typename accum_t, int kWarpSize>
52
+ struct AccumLambdaIteratorSm80 {
53
+ static_assert(cutlass::platform::is_same<typename T::Layout, cutlass::layout::RowMajor>::value,
54
+ "only RowMajor is supported");
55
+
56
+ using Policy = typename T::Policy;
57
+ using InstructionShape = typename T::InstructionShape;
58
+ using OpDelta = typename T::OpDelta;
59
+ using Shape = typename T::Shape;
60
+ static int const kElementsPerAccess = InstructionShape::kN / 4;
61
+ static int const kRowsPerTile = 8;
62
+ static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile;
63
+
64
+ static cutlass::MatrixCoord CUTLASS_DEVICE
65
+ get_lane_offset(int8_t lane_id, int8_t warp_id, typename T::TensorCoord const& tile_offset)
66
+ {
67
+ int quad = (lane_id >> 2);
68
+ int lane_in_quad = (lane_id & 3);
69
+ return cutlass::MatrixCoord(
70
+ quad + tile_offset.row() * Shape::kRow,
71
+ lane_in_quad * kElementsPerAccess + tile_offset.column() * Shape::kColumn);
72
+ }
73
+
74
+ template <typename FA, typename FB, typename FC>
75
+ CUTLASS_DEVICE static void iterateRows(cutlass::MatrixCoord& lane_offset,
76
+ FA beginRow,
77
+ FB op,
78
+ FC endRow)
79
+ {
80
+ // See cutlass/gemm/warp/mma_tensor_op_tile_iterator.h
81
+ CUTLASS_PRAGMA_UNROLL
82
+ for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
83
+ CUTLASS_PRAGMA_UNROLL
84
+ for (int row = 0; row < kAccumulatorRows; ++row) {
85
+ int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + row * kRowsPerTile +
86
+ lane_offset.row();
87
+ beginRow(accum_m);
88
+
89
+ CUTLASS_PRAGMA_UNROLL
90
+ for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
91
+ int mma_accum_start = kAccumulatorRows * kElementsPerAccess *
92
+ (mma_n * Policy::MmaIterations::kRow + mma_m);
93
+ CUTLASS_PRAGMA_UNROLL
94
+ for (int col = 0; col < kElementsPerAccess; ++col) {
95
+ int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col +
96
+ lane_offset.column();
97
+ int idx = mma_accum_start + row * kElementsPerAccess + col;
98
+ op(accum_m, accum_n, idx);
99
+ }
100
+ }
101
+
102
+ endRow(accum_m);
103
+ }
104
+ }
105
+ }
106
+
107
+ template <typename DT, typename F>
108
+ CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn)
109
+ {
110
+ // In each warp, 4 threads will work on the same row
111
+ // - the ones with the same `quad`
112
+ auto otherV = __shfl_xor_sync(0xffffffff, myValue, 1);
113
+ myValue = fn(myValue, otherV);
114
+ otherV = __shfl_xor_sync(0xffffffff, myValue, 2);
115
+ myValue = fn(myValue, otherV);
116
+ int lane_in_quad = (lane_id & 3);
117
+ return lane_in_quad == 0;
118
+ }
119
+ };
120
+
121
+ template <typename T, typename accum_t, int kWarpSize>
122
+ struct AccumLambdaIteratorSm70 {
123
+ static_assert(cutlass::platform::is_same<typename T::Layout, cutlass::layout::RowMajor>::value,
124
+ "only RowMajor is supported");
125
+
126
+ using Policy = typename T::Policy;
127
+ using InstructionShape = typename T::InstructionShape;
128
+ using OpDelta = typename T::OpDelta;
129
+ using Shape = typename T::Shape;
130
+ using Element = accum_t;
131
+
132
+ static int const kElementsPerPartial = 4;
133
+ using EleShapePerPatial =
134
+ typename cutlass::platform::conditional<cutlass::platform::is_same<Element, float>::value,
135
+ cutlass::MatrixShape<2, 2>,
136
+ cutlass::MatrixShape<1, 4>>::type;
137
+ static int const kElementsPerMma = 8;
138
+ static int const kAccumulatorPatials = 2;
139
+ using QuadShapePerPatialMma = cutlass::MatrixShape<4, 4>;
140
+
141
+ static cutlass::MatrixCoord CUTLASS_DEVICE
142
+ get_lane_offset(int8_t lane_id, int8_t warp_id, typename T::TensorCoord const& tile_offset)
143
+ {
144
+ int quad = (lane_id >> 2);
145
+ int lane_in_quad = (lane_id & 3);
146
+ int accum_m, accum_n;
147
+
148
+ if (cutlass::platform::is_same<Element, float>::value) {
149
+ // (quad[2],quad[0])+lane_in_quad[0]
150
+ accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + (lane_in_quad & 1);
151
+ // (quad[1])+lane_in_quad[1]
152
+ accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials +
153
+ (lane_in_quad & 2);
154
+ } else {
155
+ accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + lane_in_quad; // (quad[2],quad[0])
156
+ accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials;
157
+ }
158
+ return cutlass::MatrixCoord(accum_m + tile_offset.row() * Shape::kRow,
159
+ accum_n + tile_offset.column() * Shape::kColumn);
160
+ }
161
+
162
+ template <typename DT, typename F>
163
+ CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn)
164
+ {
165
+ static_assert(cutlass::platform::is_same<Element, float>::value,
166
+ "update to support non-float accum");
167
+ // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-fragment-mma-884-f16
168
+ // T0 & T2 share same line within a quad
169
+ auto otherV = __shfl_xor_sync(0xffffffff, myValue, 1 << 1);
170
+ myValue = fn(myValue, otherV);
171
+ // quad 0 and quad 2 are on the same lines
172
+ otherV = __shfl_xor_sync(0xffffffff, myValue, 1 << 3);
173
+ myValue = fn(myValue, otherV);
174
+ return (lane_id & ((1 << 1) | (1 << 3))) == 0;
175
+ }
176
+
177
+ template <typename FA, typename FB, typename FC>
178
+ CUTLASS_DEVICE static void iterateRows(cutlass::MatrixCoord& lane_offset,
179
+ FA beginRow,
180
+ FB op,
181
+ FC endRow)
182
+ {
183
+ CUTLASS_PRAGMA_UNROLL
184
+ for (int tile_m = 0; tile_m < Policy::TileIterations::kRow; ++tile_m) {
185
+ CUTLASS_PRAGMA_UNROLL
186
+ for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
187
+ CUTLASS_PRAGMA_UNROLL
188
+ for (int m = 0; m < EleShapePerPatial::kRow; ++m) {
189
+ int accum_m = tile_m * Policy::InterleavedTile::kRow +
190
+ mma_m * QuadShapePerPatialMma::kRow + m * 2 + lane_offset.row();
191
+ beginRow(accum_m);
192
+
193
+ CUTLASS_PRAGMA_UNROLL
194
+ for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) {
195
+ CUTLASS_PRAGMA_UNROLL
196
+ for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
197
+ CUTLASS_PRAGMA_UNROLL
198
+ for (int p = 0; p < kAccumulatorPatials; ++p) {
199
+ CUTLASS_PRAGMA_UNROLL
200
+ for (int n = 0; n < EleShapePerPatial::kColumn; ++n) {
201
+ int mma_accum_start =
202
+ (((tile_n * Policy::TileIterations::kRow + tile_m) *
203
+ Policy::MmaIterations::kColumn +
204
+ mma_n) *
205
+ Policy::MmaIterations::kRow +
206
+ mma_m) *
207
+ kElementsPerMma;
208
+ int accum_n = tile_n * Policy::InterleavedTile::kColumn +
209
+ mma_n * QuadShapePerPatialMma::kColumn +
210
+ p * Policy::InterleavedTile::kColumn / 2 + n +
211
+ lane_offset.column();
212
+ int idx = mma_accum_start + p * kElementsPerPartial +
213
+ m * EleShapePerPatial::kColumn + n;
214
+ op(accum_m, accum_n, idx);
215
+ }
216
+ }
217
+ }
218
+ }
219
+ endRow(accum_m);
220
+ }
221
+ }
222
+ }
223
+ }
224
+ };
225
+
226
+ template <typename T, typename accum_t, int kWarpSize>
227
+ struct AccumLambdaIteratorSimt {
228
+ using Policy = typename T::Policy;
229
+ using Iterations = typename T::Iterations;
230
+ using Element = typename T::Element;
231
+ using Delta = typename T::Delta;
232
+ using Shape = typename T::Shape;
233
+ static_assert(cutlass::platform::is_same<typename T::Layout, cutlass::layout::RowMajor>::value,
234
+ "only RowMajor is supported");
235
+
236
+ template <typename DT, typename F>
237
+ CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn)
238
+ {
239
+ CUTLASS_PRAGMA_UNROLL
240
+ for (int bit = 1; bit < Policy::WarpShape::kColumn; bit *= 2) {
241
+ auto otherV = __shfl_xor_sync(0xffffffff, myValue, bit);
242
+ myValue = fn(myValue, otherV);
243
+ }
244
+ return (lane_id & (Policy::WarpShape::kColumn - 1)) == 0;
245
+ }
246
+
247
+ template <typename FA, typename FB, typename FC>
248
+ CUTLASS_DEVICE static void iterateRows(cutlass::MatrixCoord& lane_offset,
249
+ FA beginRow,
250
+ FB op,
251
+ FC endRow)
252
+ {
253
+ CUTLASS_PRAGMA_UNROLL
254
+ for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) {
255
+ CUTLASS_PRAGMA_UNROLL
256
+ for (int m = 0; m < Policy::LaneMmaShape::kM; ++m) {
257
+ int accum_m = mma_m * Delta::kRow + m + lane_offset.row();
258
+ beginRow(accum_m);
259
+
260
+ CUTLASS_PRAGMA_UNROLL
261
+ for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) {
262
+ int accum_n = mma_n * Policy::WarpShape::kColumn * Policy::LaneMmaShape::kN +
263
+ lane_offset.column();
264
+ CUTLASS_PRAGMA_UNROLL
265
+ for (int n = 0; n < Policy::LaneMmaShape::kN; ++n) {
266
+ int idx = n + Policy::LaneMmaShape::kN *
267
+ (mma_n + Iterations::kColumn *
268
+ (m + mma_m * Policy::LaneMmaShape::kM));
269
+ op(accum_m, accum_n + n, idx);
270
+ }
271
+ }
272
+ endRow(accum_m);
273
+ }
274
+ }
275
+ }
276
+
277
+ static cutlass::MatrixCoord CUTLASS_DEVICE
278
+ get_lane_offset(int8_t lane_id, int8_t warp_id, typename T::TensorCoord const& tile_offset)
279
+ {
280
+ static_assert(cutlass::platform::is_same<typename Policy::LaneLayout,
281
+ cutlass::layout::RowMajorInterleaved<1>>::value,
282
+ "");
283
+ typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
284
+
285
+ cutlass::MatrixCoord lane_offset =
286
+ lane_layout.inverse(lane_id) *
287
+ cutlass::MatrixCoord(Policy::LaneMmaShape::kM, Policy::LaneMmaShape::kN);
288
+ return lane_offset + tile_offset * cutlass::MatrixCoord(Shape::kRow, Shape::kColumn);
289
+ }
290
+ };
291
+
292
+ template <typename T, typename accum_t, int kWarpSize>
293
+ struct DefaultMmaAccumLambdaIterator;
294
+
295
+ // Simt
296
+ template <typename S, typename P, typename accum_t, int kWarpSize>
297
+ struct DefaultMmaAccumLambdaIterator<
298
+ cutlass::gemm::warp::MmaSimtTileIterator<S,
299
+ cutlass::gemm::Operand::kC,
300
+ accum_t,
301
+ cutlass::layout::RowMajor,
302
+ P,
303
+ 1,
304
+ 1>,
305
+ accum_t,
306
+ kWarpSize> {
307
+ using WarpIterator =
308
+ typename cutlass::gemm::warp::MmaSimtTileIterator<S,
309
+ cutlass::gemm::Operand::kC,
310
+ accum_t,
311
+ cutlass::layout::RowMajor,
312
+ P,
313
+ 1,
314
+ 1>;
315
+ using Iterator = AccumLambdaIteratorSimt<WarpIterator, accum_t, kWarpSize>;
316
+ };
317
+
318
+ // TensorOp - Volta
319
+ template <typename S1, typename S2, typename accum_t, int kWarpSize>
320
+ struct DefaultMmaAccumLambdaIterator<
321
+ cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator<S1,
322
+ accum_t,
323
+ cutlass::layout::RowMajor,
324
+ S2,
325
+ cutlass::MatrixShape<1, 1>>,
326
+ accum_t,
327
+ kWarpSize> {
328
+ using WarpIterator = typename cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator<
329
+ S1,
330
+ accum_t,
331
+ cutlass::layout::RowMajor,
332
+ S2,
333
+ cutlass::MatrixShape<1, 1>>;
334
+ using Iterator = AccumLambdaIteratorSm70<WarpIterator, accum_t, kWarpSize>;
335
+ };
336
+
337
+ // TensorOp - Sm75+
338
+ template <typename S1, typename S2, typename S3, typename accum_t, int kWarpSize>
339
+ struct DefaultMmaAccumLambdaIterator<
340
+ cutlass::gemm::warp::
341
+ MmaTensorOpAccumulatorTileIterator<S1, accum_t, cutlass::layout::RowMajor, S2, S3>,
342
+ accum_t,
343
+ kWarpSize> {
344
+ using WarpIterator = typename cutlass::gemm::warp::
345
+ MmaTensorOpAccumulatorTileIterator<S1, accum_t, cutlass::layout::RowMajor, S2, S3>;
346
+ using Iterator = AccumLambdaIteratorSm80<WarpIterator, accum_t, kWarpSize>;
347
+ };
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/mma_from_smem.h ADDED
@@ -0,0 +1,1939 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
3
+ *reserved. SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice,
9
+ *this list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23
+ *LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24
+ *CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25
+ *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ *INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27
+ *CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28
+ *ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
+ *POSSIBILITY OF SUCH DAMAGE.
30
+ *
31
+ **************************************************************************************************/
32
+
33
+ // Copyright (c) Microsoft Corporation.
34
+ // SPDX-License-Identifier: Apache-2.0
35
+
36
+ // DeepSpeed Team
37
+
38
+ /*! \file
39
+ \brief Template for a double-buffered threadblock-scoped GEMM kernel.
40
+ */
41
+
42
+ #pragma once
43
+
44
+ #include "cutlass/aligned_buffer.h"
45
+ #include "cutlass/arch/memory.h"
46
+ #include "cutlass/array.h"
47
+ #include "cutlass/cutlass.h"
48
+ #include "cutlass/epilogue/thread/linear_combination.h"
49
+ #include "cutlass/epilogue/threadblock/default_epilogue_simt.h"
50
+ #include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h"
51
+ #include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h"
52
+ #include "cutlass/functional.h"
53
+ #include "cutlass/gemm/gemm.h"
54
+ #include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h"
55
+ #include "cutlass/matrix_shape.h"
56
+ #include "cutlass/numeric_conversion.h"
57
+ #include "cutlass/numeric_types.h"
58
+ #include "cutlass/platform/platform.h"
59
+ #include "cutlass/transform/threadblock/vector_iterator.h"
60
+
61
+ #include "../epilogue/epilogue_thread_apply_logsumexp.h"
62
+ #include "../gemm/mma_accum_lambda_iterator.h"
63
+ #include "../gemm_kernel_utils.h"
64
+ #include "../iterators/make_residual_last.h"
65
+ #include "../iterators/transpose_warp_iterator.h"
66
+ #include "../iterators/warp_iterator_from_smem.h"
67
+ #include "cutlass/epilogue/threadblock/epilogue_smem_accumulator.h"
68
+ #include "cutlass/gemm/threadblock/mma_base.h"
69
+ #include "cutlass/gemm/threadblock/mma_multistage.h"
70
+ #include "cutlass/gemm/threadblock/mma_pipelined.h"
71
+ #include "cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h"
72
+
73
+ namespace cutlass {
74
+ namespace gemm {
75
+ namespace threadblock {
76
+
77
+ /// Shared storage object needed by accumulator
78
+ /// From 13_two_tensor_op_fusion/threadblock/b2b_mma_base_smem_accumulator.h
79
+ template <typename Shape_, typename Element_, typename Layout_, typename Padding_>
80
+ class AccumulatorSharedStorage {
81
+ public:
82
+ //
83
+ // Type definitions
84
+ //
85
+ using Shape = Shape_;
86
+ using Element = Element_;
87
+ using Layout = Layout_;
88
+ using Padding = Padding_;
89
+
90
+ /// Tensor reference to the accumulator
91
+ using TensorRefAccum = cutlass::TensorRef<Element, Layout>;
92
+
93
+ /// Shape of the accumulator matrix in shared memory
94
+ using ShapeAccum =
95
+ cutlass::MatrixShape<Shape::kM + Padding::kRow, Shape::kN + Padding::kColumn>;
96
+
97
+ public:
98
+ //
99
+ // Data members
100
+ //
101
+
102
+ /// Buffer for accumulator
103
+ cutlass::AlignedBuffer<Element, ShapeAccum::kCount> accum;
104
+
105
+ public:
106
+ //
107
+ // Methods
108
+ //
109
+
110
+ /// Returns a layout object for the Accum matrix
111
+ CUTLASS_DEVICE
112
+ static Layout LayoutAccum() { return Layout::packed({ShapeAccum::kRow, ShapeAccum::kColumn}); }
113
+
114
+ /// Returns a TensorRef to the Accumulator
115
+ CUTLASS_HOST_DEVICE
116
+ TensorRefAccum accum_ref() { return TensorRefAccum{accum.data(), LayoutAccum()}; }
117
+ };
118
+
119
+ ////////////////////////////////////////////////////////////////////////////////
120
+ // Taken from
121
+ // https://github.com/NVIDIA/cutlass/blob/master/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_base_smem_accumulator.h
122
+ ////////////////////////////////////////////////////////////////////////////////
123
+
124
+ /// Structure to compute the matrix product targeting CUDA cores and SIMT math
125
+ /// instructions.
126
+ template <
127
+ /// Size of the Gemm problem - concept: gemm::GemmShape<>
128
+ typename Shape_,
129
+ // Maximum value for K
130
+ int kMaxK,
131
+ /// Policy describing tuning details (concept: MmaPolicy)
132
+ typename Policy_,
133
+ /// Number of stages,
134
+ int Stages,
135
+ /// Used for partial specialization
136
+ typename Enable = bool>
137
+ class MmaBaseFromSharedMemory {
138
+ public:
139
+ ///< Size of the Gemm problem - concept: gemm::GemmShape<>
140
+ using Shape = Shape_;
141
+
142
+ ///< Policy describing tuning details
143
+ using Policy = Policy_;
144
+
145
+ //
146
+ // Dependent types
147
+ //
148
+
149
+ /// Warp-level Mma
150
+ using Operator = typename Policy::Operator;
151
+
152
+ /// Shape describing the overall GEMM computed from shared memory
153
+ /// by each warp.
154
+ using WarpGemm = typename Policy::Operator::Shape;
155
+
156
+ /// Shape describing the number of warps filling the CTA
157
+ using WarpCount =
158
+ GemmShape<Shape::kM / WarpGemm::kM, Shape::kN / WarpGemm::kN, Shape::kK / WarpGemm::kK>;
159
+ using WarpCount1 = WarpCount;
160
+
161
+ /// Number of warp-level GEMM oeprations
162
+ static int const kWarpGemmIterations = (WarpGemm::kK / Operator::Policy::MmaShape::kK);
163
+ static int const kWarpGemmIterations1 = kWarpGemmIterations;
164
+
165
+ /// Number of stages
166
+ static int const kStages = Stages;
167
+
168
+ /// If this is true, we fill the entire shmem buffer at start
169
+ /// and don't need to iterate through it in a circular fashion
170
+ static bool const kSmemContainsEntireB = kMaxK <= Shape::kK * kStages;
171
+
172
+ /// Tensor reference to the A operand
173
+ using TensorRefA = TensorRef<typename Operator::ElementA, typename Operator::LayoutA>;
174
+
175
+ /// Tensor reference to the B operand
176
+ using TensorRefB = TensorRef<typename Operator::ElementB, typename Operator::LayoutB>;
177
+
178
+ //
179
+ // Nested structs
180
+ //
181
+
182
+ /// Shared storage object needed by threadblock-scoped GEMM
183
+ class SharedStorage {
184
+ public:
185
+ //
186
+ // Type definitions
187
+ //
188
+
189
+ /// Shape of the B matrix operand in shared memory
190
+ using ShapeB = MatrixShape<Shape::kK * kStages + Policy::SmemPaddingB::kRow,
191
+ Shape::kN + Policy::SmemPaddingB::kColumn>;
192
+
193
+ public:
194
+ //
195
+ // Data members
196
+ //
197
+
198
+ /// Buffer for B operand
199
+ AlignedBuffer<typename Operator::ElementB, ShapeB::kCount> operand_B;
200
+
201
+ public:
202
+ //
203
+ // Methods
204
+ //
205
+
206
+ /// Returns a layout object for the B matrix
207
+ CUTLASS_HOST_DEVICE
208
+ static typename Operator::LayoutB LayoutB()
209
+ {
210
+ return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn});
211
+ }
212
+
213
+ /// Returns a TensorRef to the B operand
214
+ CUTLASS_HOST_DEVICE
215
+ TensorRefB operand_B_ref() { return TensorRefB{operand_B.data(), LayoutB()}; }
216
+ };
217
+
218
+ protected:
219
+ //
220
+ // Data members
221
+ //
222
+
223
+ // /// Iterator to load a warp-scoped tile of A operand from shared memory
224
+ // typename Operator::IteratorA warp_tile_iterator_A_;
225
+
226
+ /// Iterator to load a warp-scoped tile of B operand from shared memory
227
+ typename Operator::IteratorB warp_tile_iterator_B_;
228
+
229
+ public:
230
+ /// Construct from tensor references
231
+ CUTLASS_DEVICE
232
+ MmaBaseFromSharedMemory(
233
+ ///< Shared storage needed for internal use by threadblock-scoped GEMM
234
+ SharedStorage& shared_storage,
235
+ ///< ID within the threadblock
236
+ int thread_idx,
237
+ ///< ID of warp
238
+ int warp_idx,
239
+ ///< ID of each thread within a warp
240
+ int lane_idx)
241
+ : warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx)
242
+ {
243
+ }
244
+ };
245
+
246
+ namespace {
247
+
248
+ // has necessary trait compliance with WarpIteratorFromSmem but doesn't do
249
+ // anything, can be default initialized, and uses fragment that takes up
250
+ // (almost) no space. this warp iterator is selected at compile time when
251
+ // elementwise on-the-fly scaling for operand A is disabled, in which case
252
+ // operations related to loading scale factors for operand A get wiped out by
253
+ // the compiler.
254
+ template <typename TensorRef>
255
+ class NoOpWarpIteratorScale {
256
+ public:
257
+ // in pipelined+multistage MMA implementations we keep an array of fragments.
258
+ // if we aren't using scaling we don't want to waste registers on fragments
259
+ // of scale elements, so ideally this would be sized 0.
260
+ // Since arrays of zero-sized objects are not allowed, using size as 1.
261
+ // The compiler will most likely wipe it out anyways.
262
+ using Fragment = cutlass::Array<char, 1>;
263
+
264
+ CUTLASS_HOST_DEVICE
265
+ NoOpWarpIteratorScale() {}
266
+
267
+ CUTLASS_HOST_DEVICE
268
+ NoOpWarpIteratorScale(TensorRef const&, int) {}
269
+
270
+ CUTLASS_HOST_DEVICE
271
+ NoOpWarpIteratorScale& add_tile_offset(typename TensorRef::TensorCoord const&) { return *this; }
272
+
273
+ CUTLASS_HOST_DEVICE
274
+ NoOpWarpIteratorScale& operator++() { return *this; }
275
+
276
+ CUTLASS_DEVICE
277
+ void load(Fragment&) const {}
278
+ };
279
+
280
+ // if scaling is enabled, performs fragment elementwise multiplication between
281
+ // fragment and its scaling factor.
282
+ template <typename Fragment, typename FragmentScale, bool ScalingEnabled>
283
+ class FragmentElementwiseScaler;
284
+
285
+ // specialization for scaling being enabled.
286
+ template <typename Fragment, typename FragmentScale>
287
+ class FragmentElementwiseScaler<Fragment, FragmentScale, true> {
288
+ public:
289
+ // cast scale_frag to correct type then apply elementwise to fragment
290
+ CUTLASS_DEVICE
291
+ static Fragment apply(Fragment frag, FragmentScale const& scale_frag)
292
+ {
293
+ Fragment converted_scale_frag =
294
+ cutlass::NumericArrayConverter<typename Fragment::Element,
295
+ typename FragmentScale::Element,
296
+ FragmentScale::kElements>()(scale_frag);
297
+ return cutlass::multiplies<Fragment>()(frag, converted_scale_frag);
298
+ }
299
+ };
300
+
301
+ // specialization for scaling being disabled. doesn't do anything and should
302
+ // just get wiped out by the compiler.
303
+ template <typename Fragment, typename FragmentScale>
304
+ class FragmentElementwiseScaler<Fragment, FragmentScale, false> {
305
+ public:
306
+ CUTLASS_DEVICE
307
+ static Fragment apply(Fragment frag, FragmentScale const&) { return frag; }
308
+ };
309
+ } // namespace
310
+
311
+ ////////////////////////////////////////////////////////////////////////////////
312
+ // Taken from
313
+ // https://github.com/NVIDIA/cutlass/blob/master/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_pipelined_smem_accumulator.h
314
+ ////////////////////////////////////////////////////////////////////////////////
315
+
316
+ /// Structure to compute the matrix product targeting CUDA cores and SIMT math
317
+ /// instructions.
318
+ template <
319
+ /// Size of the Gemm problem - concept: gemm::GemmShape<>
320
+ typename Shape_,
321
+ // BEGIN smem
322
+ /// Iterates over the intermediate accumulator tile in shared memory
323
+ typename WarpIteratorA,
324
+ /// whether or not to perform elementwise multiplication of A
325
+ // by another matrix (A_scale) that is also kept in shared memory prior
326
+ // to matmul A @ B
327
+ bool ScaleOperandA_,
328
+ // Accumulator type
329
+ typename AccumulatorSharedStorage,
330
+ // END smem
331
+ /// Iterates over tiles of B operand in global memory
332
+ // (concept: ReadableTileIterator | ForwardTileIterator |
333
+ // MaskedTileIterator)
334
+ typename IteratorB_,
335
+ /// Iterates over tiles of B operand in shared memory
336
+ /// (concept: WriteableTileIterator | RandomAccessTileIterator)
337
+ typename SmemIteratorB_,
338
+ /// Data type of accumulator matrix
339
+ typename ElementC_,
340
+ /// Data type of accumulator matrix
341
+ typename LayoutC_,
342
+ /// Policy describing tuning details (concept: MmaPolicy)
343
+ typename Policy_,
344
+ /// Transformation applied to B operand
345
+ typename TransformB_ = NumericArrayConverter<typename SmemIteratorB_::Element,
346
+ typename IteratorB_::Element,
347
+ IteratorB_::Fragment::kElements>,
348
+ /// Used for partial specialization
349
+ typename Enable = bool>
350
+ class MmaPipelinedFromSharedMemory
351
+ : public MmaBaseFromSharedMemory<Shape_, AccumulatorSharedStorage::Shape::kN, Policy_, 2> {
352
+ public:
353
+ ///< Base class
354
+ using Base = MmaBaseFromSharedMemory<Shape_, AccumulatorSharedStorage::Shape::kN, Policy_, 2>;
355
+
356
+ using Shape = Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<>
357
+ static constexpr bool ScaleOperandA = ScaleOperandA_;
358
+
359
+ ///< loads fragments of A_scale from shared memory if operand A scaling is
360
+ ///< enabled. otherwise no-op.
361
+ using WarpIteratorAScale = typename cutlass::platform::conditional<
362
+ ScaleOperandA,
363
+ WarpIteratorA,
364
+ NoOpWarpIteratorScale<typename WarpIteratorA::TensorRef>>::type;
365
+
366
+ using IteratorB = IteratorB_; ///< Iterates over tiles of B operand in global memory
367
+ using ElementC = ElementC_; ///< Data type of accumulator matrix
368
+ using LayoutC = LayoutC_; ///< Layout of accumulator matrix
369
+ using Policy = Policy_; ///< Policy describing tuning details
370
+
371
+ using SmemIteratorB = SmemIteratorB_;
372
+
373
+ using TransformB = TransformB_;
374
+
375
+ //
376
+ // Dependent types
377
+ //
378
+
379
+ /// Fragment of operand B loaded from global memory
380
+ using FragmentB = typename IteratorB::Fragment;
381
+
382
+ /// Fragment of accumulator tile
383
+ using FragmentC = typename Policy::Operator::FragmentC;
384
+
385
+ /// Warp-level Mma
386
+ using Operator = typename Policy::Operator;
387
+
388
+ /// Obtain the arch tag from the warp-level operator
389
+ using ArchTag = typename Policy::Operator::ArchTag;
390
+
391
+ /// Complex transform on B operand
392
+ static ComplexTransform const kTransformB = Operator::kTransformB;
393
+
394
+ // staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline)
395
+ static_assert((Base::kStages == 2), "MmaPipelined requires kStages set to value 2");
396
+
397
+ private:
398
+ using WarpFragmentA = typename Operator::FragmentA;
399
+
400
+ /// fragment type of OperandA elementwise scaling matrix. (almost) empty
401
+ /// if operand A scaling is disabled.
402
+ using WarpFragmentAScale = typename WarpIteratorAScale::Fragment;
403
+
404
+ using WarpFragmentB = typename Operator::FragmentB;
405
+
406
+ /// applies scaling factor to operand A fragment if operand A scaling is
407
+ /// enabled. otherwise no-op.
408
+ using FragmentAScaler =
409
+ FragmentElementwiseScaler<WarpFragmentA, WarpFragmentAScale, ScaleOperandA>;
410
+
411
+ protected:
412
+ // /// Iterator to write threadblock-scoped tile of A operand to shared memory
413
+ // SmemIteratorA smem_iterator_A_;
414
+
415
+ /// Iterator to write threadblock-scoped tile of B operand to shared memory
416
+ SmemIteratorB smem_iterator_B_;
417
+
418
+ /// Iterator to load a warp-scoped tile of A operand from intermediate
419
+ /// accumulator tile
420
+ WarpIteratorA warp_tile_iterator_A_;
421
+
422
+ /// Iterator to load a warp-scoped tile of A_scale from intermediate
423
+ /// accumulator tile (only used if ScaleOperandA_ is true)
424
+ WarpIteratorAScale warp_tile_iterator_A_scale_;
425
+
426
+ public:
427
+ /// constructor for MMA with operand A scaling enabled.
428
+ CUTLASS_DEVICE
429
+ MmaPipelinedFromSharedMemory(
430
+ // shared storage needed for internal use by threadblock-scoped GEMM
431
+ typename Base::SharedStorage& shared_storage,
432
+ // warp iterator over A tile held in shared memory
433
+ WarpIteratorA warp_iter_a,
434
+ // warp iterator over A_scale tile held in shared memory
435
+ WarpIteratorAScale warp_iter_a_scale,
436
+ int thread_idx,
437
+ int warp_idx,
438
+ int lane_idx)
439
+ : Base(shared_storage, thread_idx, warp_idx, lane_idx),
440
+ warp_tile_iterator_A_(warp_iter_a),
441
+ warp_tile_iterator_A_scale_(warp_iter_a_scale),
442
+ smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx)
443
+ {
444
+ // Compute warp location within threadblock tile by mapping the warp_id to
445
+ // three coordinates:
446
+ // _m: the warp's position within the threadblock along the M dimension
447
+ // _n: the warp's position within the threadblock along the N dimension
448
+ // _k: the warp's position within the threadblock along the K dimension
449
+ int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
450
+ int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
451
+ int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
452
+ int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
453
+
454
+ // Add per-warp offsets in units of warp-level tiles
455
+ this->warp_tile_iterator_A_.add_tile_offset(
456
+ {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
457
+ this->warp_tile_iterator_A_scale_.add_tile_offset(
458
+ {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
459
+ this->warp_tile_iterator_B_.add_tile_offset(
460
+ {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
461
+ }
462
+
463
+ /// Construct from tensor references
464
+ CUTLASS_DEVICE
465
+ MmaPipelinedFromSharedMemory(
466
+ typename Base::SharedStorage& shared_storage, ///< Shared storage needed for internal use
467
+ ///< by threadblock-scoped GEMM
468
+ AccumulatorSharedStorage& accumulator_shared_storage,
469
+ int thread_idx, ///< ID within the threadblock
470
+ int warp_idx, ///< ID of warp
471
+ int lane_idx, ///< ID of each thread within a warp
472
+ int problem_size_0_n)
473
+ : Base(shared_storage, thread_idx, warp_idx, lane_idx),
474
+ warp_tile_iterator_A_(accumulator_shared_storage.accum_ref(), lane_idx),
475
+ smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx)
476
+ {
477
+ // Compute warp location within threadblock tile by mapping the warp_id to
478
+ // three coordinates:
479
+ // _m: the warp's position within the threadblock along the M dimension
480
+ // _n: the warp's position within the threadblock along the N dimension
481
+ // _k: the warp's position within the threadblock along the K dimension
482
+
483
+ int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN);
484
+ int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN);
485
+
486
+ int warp_idx_m = warp_idx_mn % Base::WarpCount::kM;
487
+ int warp_idx_n = warp_idx_mn / Base::WarpCount::kM;
488
+
489
+ // Add per-warp offsets in units of warp-level tiles
490
+ this->warp_tile_iterator_A_.add_tile_offset(
491
+ {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k});
492
+ this->warp_tile_iterator_B_.add_tile_offset(
493
+ {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n});
494
+ }
495
+
496
+ // For API compatibility with MmaMultistageFromSharedMemory
497
+ // but not supported as it worsens perf: older gpus < sm80 don't
498
+ // support async transfers and have to waste registers
499
+ CUTLASS_DEVICE
500
+ void set_prologue_done(bool value) {}
501
+ CUTLASS_DEVICE
502
+ static void prologue(typename Base::SharedStorage& shared_storage,
503
+ IteratorB iterator_B1,
504
+ int thread_idx,
505
+ int problem_size_0_n)
506
+ {
507
+ }
508
+
509
+ CUTLASS_DEVICE
510
+ static void drain_cp_asyncs() {}
511
+
512
+ /// Perform a threadblock-scoped matrix multiply-accumulate
513
+ CUTLASS_DEVICE
514
+ void operator()(int gemm_k_iterations, ///< number of iterations of the mainloop
515
+ FragmentC& accum, ///< destination accumulator tile
516
+ // IteratorA iterator_A, ///< iterator over A
517
+ // operand in global memory
518
+ IteratorB iterator_B, ///< iterator over B operand in global memory
519
+ FragmentC const& src_accum, ///< source accumulator tile
520
+ // TransformA transform_A = TransformA(), ///< transformation
521
+ // applied to A fragment
522
+ TransformB transform_B = TransformB())
523
+ { ///< transformation applied to B fragment
524
+
525
+ //
526
+ // Prologue
527
+ //
528
+
529
+ // Perform accumulation in the 'd' output operand
530
+ accum = src_accum;
531
+
532
+ FragmentB tb_frag_B;
533
+
534
+ tb_frag_B.clear();
535
+
536
+ // The last kblock is loaded in the prolog
537
+ iterator_B.set_residual_tile(gemm_k_iterations == 1);
538
+ iterator_B.load(tb_frag_B);
539
+
540
+ ++iterator_B;
541
+
542
+ this->smem_iterator_B_.store(transform_B(tb_frag_B));
543
+
544
+ ++this->smem_iterator_B_;
545
+
546
+ __syncthreads();
547
+
548
+ // remember that WarpFragmentAScale and WarpIteratorAScale are empty/no-op
549
+ // if scaling is disabled.
550
+
551
+ // Pair of fragments used to overlap shared memory loads and math
552
+ // instructions
553
+ WarpFragmentA warp_frag_A[2];
554
+ WarpFragmentAScale warp_frag_A_scale[2];
555
+ WarpFragmentB warp_frag_B[2];
556
+ warp_frag_A[0].clear();
557
+ warp_frag_A_scale[0].clear();
558
+ warp_frag_B[0].clear();
559
+
560
+ this->warp_tile_iterator_B_.set_kgroup_index(0);
561
+
562
+ this->warp_tile_iterator_A_.load(warp_frag_A[0]);
563
+ this->warp_tile_iterator_A_scale_.load(warp_frag_A_scale[0]);
564
+ this->warp_tile_iterator_B_.load(warp_frag_B[0]);
565
+
566
+ ++this->warp_tile_iterator_A_;
567
+ ++this->warp_tile_iterator_A_scale_;
568
+ ++this->warp_tile_iterator_B_;
569
+
570
+ Operator warp_mma;
571
+
572
+ int smem_write_stage_idx = 1;
573
+
574
+ // Avoid reading out of bounds
575
+ iterator_B.set_residual_tile(gemm_k_iterations == 2);
576
+ iterator_B.clear_mask(gemm_k_iterations <= 1);
577
+
578
+ // Issue loads during the first warp-level matrix multiply-add *AFTER*
579
+ // issuing shared memory loads (which have the tightest latency requirement).
580
+
581
+ //
582
+ // Mainloop
583
+ //
584
+
585
+ // Note: The main loop does not support Base::kWarpGemmIterations == 2.
586
+ CUTLASS_GEMM_LOOP
587
+ for (; gemm_k_iterations > 0; --gemm_k_iterations) {
588
+ //
589
+ // Loop over GEMM K dimension
590
+ //
591
+
592
+ CUTLASS_PRAGMA_UNROLL
593
+ for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) {
594
+ // Load warp-level tiles from shared memory, wrapping to k offset if
595
+ // this is the last group as the case may be.
596
+ bool hasNext = true;
597
+
598
+ if (warp_mma_k == Base::kWarpGemmIterations - 1) {
599
+ // Write fragments to shared memory
600
+ this->smem_iterator_B_.store(transform_B(tb_frag_B));
601
+
602
+ __syncthreads();
603
+
604
+ ++this->smem_iterator_B_;
605
+
606
+ // Add negative offsets to return iterators to the 'start' of the
607
+ // circular buffer in shared memory SMEM: Don't reset iterator A, as
608
+ // we are continuing our iteration at this point
609
+ if (smem_write_stage_idx == 1) {
610
+ this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0});
611
+ } else {
612
+ this->warp_tile_iterator_B_.add_tile_offset(
613
+ {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0});
614
+ }
615
+
616
+ smem_write_stage_idx ^= 1;
617
+ hasNext = gemm_k_iterations > 1;
618
+ }
619
+
620
+ // Only read the next if we need to
621
+ if (hasNext) {
622
+ this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) %
623
+ Base::kWarpGemmIterations);
624
+
625
+ this->warp_tile_iterator_A_.load(warp_frag_A[(warp_mma_k + 1) % 2]);
626
+ this->warp_tile_iterator_A_scale_.load(warp_frag_A_scale[(warp_mma_k + 1) % 2]);
627
+ this->warp_tile_iterator_B_.load(warp_frag_B[(warp_mma_k + 1) % 2]);
628
+
629
+ ++this->warp_tile_iterator_A_;
630
+ ++this->warp_tile_iterator_A_scale_;
631
+ ++this->warp_tile_iterator_B_;
632
+
633
+ if (warp_mma_k == 0) {
634
+ iterator_B.load(tb_frag_B);
635
+
636
+ ++iterator_B;
637
+
638
+ // Avoid reading out of bounds if this was the last loop iteration
639
+ iterator_B.set_residual_tile(gemm_k_iterations == 3);
640
+ iterator_B.clear_mask(gemm_k_iterations <= 2);
641
+ }
642
+ }
643
+
644
+ warp_mma(accum,
645
+ FragmentAScaler::apply(warp_frag_A[warp_mma_k % 2],
646
+ warp_frag_A_scale[warp_mma_k % 2]),
647
+ warp_frag_B[warp_mma_k % 2],
648
+ accum);
649
+ }
650
+ }
651
+ }
652
+ };
653
+
654
+ ////////////////////////////////////////////////////////////////////////////////
655
+ // Taken from
656
+ // https://github.com/NVIDIA/cutlass/blob/master/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_multistage_smem_accumulator.h
657
+ ////////////////////////////////////////////////////////////////////////////////
658
+
659
+ /// Structure to compute the matrix product targeting CUDA cores and SIMT math
660
+ /// instructions.
661
+ template <
662
+ /// Size of the Gemm problem - concept: gemm::GemmShape<>
663
+ typename Shape1_,
664
+ /// Iterates over the intermediate accumulator tile in shared memory
665
+ typename WarpIteratorA1_,
666
+ /// whether or not to perform elementwise multiplication of A
667
+ // by another matrix (A_scale) that is also kept in shared memory prior
668
+ // to matmul A @ B
669
+ bool ScaleOperandA_,
670
+ // Accumulator type
671
+ typename AccumulatorSharedStorage,
672
+ /// Iterates over tiles of B operand in global memory
673
+ // (concept: ReadableTileIterator | ForwardTileIterator |
674
+ // MaskedTileIterator)
675
+ typename IteratorB1_,
676
+ /// Iterates over tiles of B operand in shared memory
677
+ /// (concept: WriteableTileIterator | RandomAccessTileIterator)
678
+ typename SmemIteratorB1_,
679
+ /// Cache operation for operand B
680
+ cutlass::arch::CacheOperation::Kind CacheOpB1,
681
+ /// Data type of accumulator matrix
682
+ typename ElementC_,
683
+ /// Data type of accumulator matrix
684
+ typename LayoutC_,
685
+ /// Policy describing tuning details (concept: MmaPolicy)
686
+ typename Policy1_,
687
+ /// Number of stages,
688
+ int Stages_,
689
+ int kMaxK_,
690
+ /// Used for partial specialization
691
+ typename Enable = bool>
692
+ class MmaMultistageFromSharedMemory
693
+ : public MmaBaseFromSharedMemory<Shape1_, kMaxK_, Policy1_, Stages_> {
694
+ public:
695
+ ///< Base class
696
+ using Base = MmaBaseFromSharedMemory<Shape1_, kMaxK_, Policy1_, Stages_>;
697
+
698
+ ///< Size of the Gemm problem - concept: gemm::GemmShape<>
699
+ using Shape1 = Shape1_;
700
+ ///< Iterates over tiles of B operand in global memory
701
+ using IteratorB1 = IteratorB1_;
702
+ using IteratorB = IteratorB1;
703
+ ///< Policy describing tuning details
704
+ using Policy1 = Policy1_;
705
+
706
+ using SmemIteratorB1 = SmemIteratorB1_;
707
+ using WarpIteratorA1 = WarpIteratorA1_; ///< Iterates over the intermediate
708
+ ///< accumulator tile in shared memory
709
+ static constexpr bool ScaleOperandA = ScaleOperandA_;
710
+
711
+ ///< warp level iterator over A_scale matrix tile kept in shared memory.
712
+ ///< if elementwise A scaling is disabled then everything this does is no-op.
713
+ using WarpIteratorAScale = typename cutlass::platform::conditional<
714
+ ScaleOperandA,
715
+ WarpIteratorA1,
716
+ NoOpWarpIteratorScale<typename WarpIteratorA1::TensorRef>>::type;
717
+ ///< Data type of accumulator matrix
718
+ using ElementC = ElementC_;
719
+ ///< Layout of accumulator matrix
720
+ using LayoutC = LayoutC_;
721
+
722
+ static cutlass::arch::CacheOperation::Kind const kCacheOpB1 = CacheOpB1;
723
+ static constexpr bool kSmemContainsEntireB = Base::kSmemContainsEntireB;
724
+
725
+ //
726
+ // Dependent types
727
+ //
728
+
729
+ /// Fragment of accumulator tile
730
+ using FragmentC1 = typename Policy1::Operator::FragmentC;
731
+ using FragmentC = FragmentC1;
732
+
733
+ /// Warp-level Mma
734
+ using Operator1 = typename Policy1::Operator;
735
+
736
+ /// Minimum architecture is Sm80 to support cp.async
737
+ using ArchTag = arch::Sm80;
738
+
739
+ /// Complex transform on B operand
740
+ static ComplexTransform const kTransformB1 = Operator1::kTransformB;
741
+
742
+ /// Internal structure exposed for introspection.
743
+ struct Detail {
744
+ static_assert(Base::kWarpGemmIterations1 > 1,
745
+ "The pipelined structure requires at least two warp-level "
746
+ "GEMM operations.");
747
+
748
+ /// Number of cp.async instructions to load one stage of operand B
749
+ static int const TBLoadIterationsB1 = IteratorB1::ThreadMap::Iterations::kCount;
750
+
751
+ /// Number of cp.async instructions to load on group of operand B
752
+ static int const kAccessesPerGroupB1 =
753
+ (TBLoadIterationsB1 + Base::kWarpGemmIterations1 - 1) / Base::kWarpGemmIterations1;
754
+ };
755
+
756
+ static constexpr int kNumStagesConcurrentLoad = kSmemContainsEntireB ? Base::kStages
757
+ : Base::kStages - 1;
758
+
759
+ private:
760
+ using WarpLoadedFragmentA1 = typename Operator1::FragmentA;
761
+ /// fragment of OperandA scale matrix. if operand A scaling is disabled this
762
+ /// is (almost) empty.
763
+ using WarpLoadedFragmentA1Scale = typename WarpIteratorAScale::Fragment;
764
+ using WarpLoadedFragmentB1 = typename Operator1::FragmentB;
765
+ using WarpTransformedFragmentA1 = typename Operator1::TransformedFragmentA;
766
+ using WarpTransformedFragmentB1 = typename Operator1::TransformedFragmentB;
767
+
768
+ /// applies elementwise scaling to fragment of A. if operand A scaling is
769
+ /// disabled this is a no-op.
770
+ using FragmentAScaler =
771
+ FragmentElementwiseScaler<WarpLoadedFragmentA1, WarpLoadedFragmentA1Scale, ScaleOperandA>;
772
+
773
+ private:
774
+ //
775
+ // Data members
776
+ //
777
+
778
+ /// Iterator to load a warp-scoped tile of A1 operand from intermediate
779
+ /// accumulator tile
780
+ WarpIteratorA1 warp_tile_iterator_A1_;
781
+
782
+ /// Iterator to load a warp-scoped tile of A1_scale operand from shared memory
783
+ /// if operand A scaling is disabled everything this does is a no-op.
784
+ WarpIteratorAScale warp_tile_iterator_A1_scale_;
785
+
786
+ /// Iterator to write threadblock-scoped tile of B operand to shared memory
787
+ SmemIteratorB1 smem_iterator_B1_;
788
+
789
+ bool prologue_done_;
790
+
791
+ public:
792
+ /// constructor for MMA with operand A scaling enabled.
793
+ CUTLASS_DEVICE
794
+ MmaMultistageFromSharedMemory(
795
+ // shared storage needed for internal use by threadblock-scoped GEMM
796
+ typename Base::SharedStorage& shared_storage,
797
+ // warp level iterator over operand A tile kept in shared memory
798
+ WarpIteratorA1 warp_tile_iterator_A1,
799
+ // warp level iterator over operand A elementwise scale tile kept in
800
+ // shared memory.
801
+ WarpIteratorAScale warp_tile_iterator_A1_scale,
802
+ int thread_idx,
803
+ int warp_idx,
804
+ int lane_idx)
805
+ : Base(shared_storage, thread_idx, warp_idx, lane_idx),
806
+ warp_tile_iterator_A1_(warp_tile_iterator_A1),
807
+ warp_tile_iterator_A1_scale_(warp_tile_iterator_A1_scale),
808
+ smem_iterator_B1_(shared_storage.operand_B_ref(), thread_idx),
809
+ prologue_done_(false)
810
+ {
811
+ // Compute warp location within threadblock tile by mapping the warp_id to
812
+ // three coordinates:
813
+ // _m: the warp's position within the threadblock along the M dimension
814
+ // _n: the warp's position within the threadblock along the N dimension
815
+ // _k: the warp's position within the threadblock along the K dimension
816
+ int warp_idx_mn_1 = warp_idx % (Base::WarpCount1::kM * Base::WarpCount1::kN);
817
+ int warp_idx_k_1 = warp_idx / (Base::WarpCount1::kM * Base::WarpCount1::kN);
818
+ int warp_idx_m_1 = warp_idx_mn_1 % Base::WarpCount1::kM;
819
+ int warp_idx_n_1 = warp_idx_mn_1 / Base::WarpCount1::kM;
820
+
821
+ // Add per-warp offsets in units of warp-level tiles
822
+ warp_tile_iterator_A1_.add_tile_offset(
823
+ {warp_idx_m_1, Base::kWarpGemmIterations1 * warp_idx_k_1});
824
+ warp_tile_iterator_A1_scale_.add_tile_offset(
825
+ {warp_idx_m_1, Base::kWarpGemmIterations1 * warp_idx_k_1});
826
+ this->warp_tile_iterator_B_.add_tile_offset(
827
+ {Base::kWarpGemmIterations1 * warp_idx_k_1, warp_idx_n_1});
828
+ }
829
+
830
+ /// Construct from tensor references
831
+ CUTLASS_DEVICE
832
+ MmaMultistageFromSharedMemory(
833
+ typename Base::SharedStorage& shared_storage, ///< Shared storage needed for internal use
834
+ ///< by threadblock-scoped GEMM
835
+ AccumulatorSharedStorage& accumulator_shared_storage,
836
+ ///< ID within the threadblock
837
+ int thread_idx,
838
+ ///< ID of warp
839
+ int warp_idx,
840
+ ///< ID of each thread within a warp
841
+ int lane_idx,
842
+ ///< GEMM0 N is used for accumulator extent
843
+ int problem_size_0_n)
844
+ : Base(shared_storage, thread_idx, warp_idx, lane_idx),
845
+ warp_tile_iterator_A1_(accumulator_shared_storage.accum_ref(), lane_idx),
846
+ smem_iterator_B1_(shared_storage.operand_B_ref(), thread_idx),
847
+ prologue_done_(false)
848
+ {
849
+ // Compute warp location within threadblock tile by mapping the warp_id to
850
+ // three coordinates:
851
+ // _m: the warp's position within the threadblock along the M dimension
852
+ // _n: the warp's position within the threadblock along the N dimension
853
+ // _k: the warp's position within the threadblock along the K dimension
854
+
855
+ int warp_idx_mn_1 = warp_idx % (Base::WarpCount1::kM * Base::WarpCount1::kN);
856
+ int warp_idx_k_1 = warp_idx / (Base::WarpCount1::kM * Base::WarpCount1::kN);
857
+
858
+ int warp_idx_m_1 = warp_idx_mn_1 % Base::WarpCount1::kM;
859
+ int warp_idx_n_1 = warp_idx_mn_1 / Base::WarpCount1::kM;
860
+
861
+ // Add per-warp offsets in units of warp-level tiles
862
+ warp_tile_iterator_A1_.add_tile_offset(
863
+ {warp_idx_m_1, Base::kWarpGemmIterations1 * warp_idx_k_1});
864
+ this->warp_tile_iterator_B_.add_tile_offset(
865
+ {Base::kWarpGemmIterations1 * warp_idx_k_1, warp_idx_n_1});
866
+ }
867
+
868
+ CUTLASS_DEVICE
869
+ void set_prologue_done(bool value) { prologue_done_ = value; }
870
+
871
+ CUTLASS_DEVICE
872
+ static void prologue(typename Base::SharedStorage& shared_storage,
873
+ IteratorB iterator_B1,
874
+ int thread_idx,
875
+ int problem_size_0_n)
876
+ {
877
+ SmemIteratorB1 smem_iterator_B1(shared_storage.operand_B_ref(), thread_idx);
878
+ _prologue(iterator_B1,
879
+ (problem_size_0_n + Base::Shape::kK - 1) / Base::Shape::kK,
880
+ smem_iterator_B1);
881
+ }
882
+
883
+ CUTLASS_DEVICE
884
+ static void drain_cp_asyncs()
885
+ {
886
+ // commit and drain all pending and predicated cp.async pnz from the GEMM
887
+ // mainloop
888
+ cutlass::arch::cp_async_fence();
889
+ cutlass::arch::cp_async_wait<0>();
890
+ __syncthreads();
891
+ }
892
+
893
+ CUTLASS_DEVICE
894
+ void copy_tiles_and_advance_1(IteratorB1& iterator_B1, int group_start_B1 = 0)
895
+ {
896
+ iterator_B1.set_iteration_index(group_start_B1 * IteratorB1::kAccessesPerVector);
897
+ this->smem_iterator_B1_.set_iteration_index(group_start_B1);
898
+
899
+ // Load for operand B
900
+ CUTLASS_PRAGMA_UNROLL
901
+ for (int j = 0; j < Detail::kAccessesPerGroupB1; ++j) {
902
+ if (group_start_B1 + j < Detail::TBLoadIterationsB1) {
903
+ typename IteratorB1::AccessType* dst_ptr =
904
+ reinterpret_cast<typename IteratorB1::AccessType*>(
905
+ this->smem_iterator_B1_.get());
906
+
907
+ int const kSrcBytes = sizeof_bits<typename IteratorB1::Element>::value *
908
+ IteratorB1::ThreadMap::kElementsPerAccess /
909
+ IteratorB1::kAccessesPerVector / 8;
910
+
911
+ CUTLASS_PRAGMA_UNROLL
912
+ for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) {
913
+ auto gmem_ptr = iterator_B1.get();
914
+
915
+ cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB1>(
916
+ dst_ptr + v, gmem_ptr, iterator_B1.valid());
917
+
918
+ ++iterator_B1;
919
+ }
920
+ ++this->smem_iterator_B1_;
921
+ }
922
+ }
923
+ }
924
+
925
+ CUTLASS_DEVICE
926
+ static void _prologue(IteratorB& iterator_B1,
927
+ int32_t gemm_k_iterations_1,
928
+ SmemIteratorB1& smem_iterator_B1_)
929
+ {
930
+ // Issue several complete stages
931
+ CUTLASS_PRAGMA_UNROLL
932
+ for (int stage = 0; stage < kNumStagesConcurrentLoad; ++stage, --gemm_k_iterations_1) {
933
+ iterator_B1.set_residual_tile(gemm_k_iterations_1 == 1);
934
+ iterator_B1.clear_mask(gemm_k_iterations_1 == 0);
935
+
936
+ iterator_B1.set_iteration_index(0);
937
+ smem_iterator_B1_.set_iteration_index(0);
938
+
939
+ // Load for operand B
940
+ CUTLASS_PRAGMA_UNROLL
941
+ for (int j = 0; j < Detail::TBLoadIterationsB1; ++j) {
942
+ typename IteratorB1::AccessType* dst_ptr =
943
+ reinterpret_cast<typename IteratorB1::AccessType*>(smem_iterator_B1_.get());
944
+
945
+ CUTLASS_PRAGMA_UNROLL
946
+ for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) {
947
+ int const kSrcBytes = sizeof_bits<typename IteratorB1::Element>::value *
948
+ IteratorB1::ThreadMap::kElementsPerAccess /
949
+ IteratorB1::kAccessesPerVector / 8;
950
+
951
+ cutlass::arch::cp_async_zfill<kSrcBytes, kCacheOpB1>(
952
+ dst_ptr + v, iterator_B1.get(), iterator_B1.valid());
953
+
954
+ ++iterator_B1;
955
+ }
956
+
957
+ ++smem_iterator_B1_;
958
+ }
959
+
960
+ // Move to the next stage
961
+ iterator_B1.add_tile_offset({1, 0});
962
+
963
+ smem_iterator_B1_.add_tile_offset({1, 0});
964
+
965
+ // Defines the boundary of a stage of cp.async.
966
+ cutlass::arch::cp_async_fence();
967
+ }
968
+ iterator_B1.set_residual_tile(gemm_k_iterations_1 == 1);
969
+ iterator_B1.clear_mask(gemm_k_iterations_1 == 0);
970
+ }
971
+
972
+ /// Perform a threadblock-scoped matrix multiply-accumulate
973
+ CUTLASS_DEVICE
974
+ void operator()(
975
+ ///< problem size of GEMM
976
+ int gemm_k_iterations_1_,
977
+ ///< destination accumulator tile
978
+ FragmentC1& accum,
979
+ ///< iterator over B1 operand in global memory
980
+ IteratorB1 iterator_B1,
981
+ ///< initial value of accumulator
982
+ FragmentC1 const& src_accum)
983
+ {
984
+ // 2nd Gemm
985
+
986
+ //
987
+ // Prologue
988
+ //
989
+ // Perform accumulation in the 'd' output operand
990
+ accum = src_accum;
991
+
992
+ if (!prologue_done_) {
993
+ _prologue(iterator_B1, gemm_k_iterations_1_, smem_iterator_B1_);
994
+ } else if (!kSmemContainsEntireB) {
995
+ // Restore the iterators increments
996
+
997
+ int gemm_k_iterations_1 = gemm_k_iterations_1_;
998
+ // Issue several complete stages
999
+ CUTLASS_PRAGMA_UNROLL
1000
+ for (int stage = 0; stage < kNumStagesConcurrentLoad; ++stage, --gemm_k_iterations_1) {
1001
+ iterator_B1.set_iteration_index(0);
1002
+ this->smem_iterator_B1_.set_iteration_index(0);
1003
+
1004
+ // Load for operand B
1005
+ CUTLASS_PRAGMA_UNROLL
1006
+ for (int j = 0; j < Detail::TBLoadIterationsB1; ++j) {
1007
+ CUTLASS_PRAGMA_UNROLL
1008
+ for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) { ++iterator_B1; }
1009
+ ++this->smem_iterator_B1_;
1010
+ }
1011
+ iterator_B1.add_tile_offset({1, 0});
1012
+ this->smem_iterator_B1_.add_tile_offset({1, 0});
1013
+ }
1014
+ iterator_B1.set_residual_tile(gemm_k_iterations_1 <= 1);
1015
+ iterator_B1.clear_mask(gemm_k_iterations_1 <= 0);
1016
+ }
1017
+
1018
+ // DEPBAR+SYNC
1019
+ cutlass::arch::cp_async_wait<kNumStagesConcurrentLoad - 1>();
1020
+ __syncthreads();
1021
+
1022
+ // remember that WarpFragmentAScale and WarpIteratorAScale are no-op/empty
1023
+ // if scaling is disabled.
1024
+
1025
+ // Pair of fragments used to overlap shared memory loads and math
1026
+ // instructions
1027
+ WarpLoadedFragmentA1 warp_loaded_frag_A1[2];
1028
+ WarpLoadedFragmentA1Scale warp_loaded_frag_A1_scale[2];
1029
+ WarpLoadedFragmentB1 warp_loaded_frag_B1[2];
1030
+ WarpTransformedFragmentA1 warp_transformed_frag_A1[2];
1031
+ WarpTransformedFragmentB1 warp_transformed_frag_B1[2];
1032
+
1033
+ Operator1 warp_mma1;
1034
+
1035
+ warp_tile_iterator_A1_.load(warp_loaded_frag_A1[0]);
1036
+ ++warp_tile_iterator_A1_;
1037
+
1038
+ warp_tile_iterator_A1_scale_.load(warp_loaded_frag_A1_scale[0]);
1039
+ ++warp_tile_iterator_A1_scale_;
1040
+
1041
+ this->warp_tile_iterator_B_.set_kgroup_index(0);
1042
+ this->warp_tile_iterator_B_.load(warp_loaded_frag_B1[0]);
1043
+ ++this->warp_tile_iterator_B_;
1044
+
1045
+ int smem_write_stage_idx = Base::kStages - 1;
1046
+ int smem_read_stage_idx = 0;
1047
+
1048
+ warp_mma1.transform(
1049
+ warp_transformed_frag_A1[0],
1050
+ warp_transformed_frag_B1[0],
1051
+ FragmentAScaler::apply(warp_loaded_frag_A1[0], warp_loaded_frag_A1_scale[0]),
1052
+ warp_loaded_frag_B1[0]);
1053
+
1054
+ // tf32x3 kernels use staging accumulation. warp_mma uses a temporary
1055
+ // accumulator and this temporary accumulator is added to the final
1056
+ // accumulator once in every mainloop iteration.
1057
+ plus<FragmentC1> plus_accum;
1058
+
1059
+ FragmentC1 tmp_accum;
1060
+
1061
+ if (platform::is_same<typename Operator1::MathOperator,
1062
+ arch::OpMultiplyAddFastF32>::value ||
1063
+ platform::is_same<typename Operator1::MathOperator,
1064
+ arch::OpMultiplyAddComplexFastF32>::value) {
1065
+ tmp_accum.clear();
1066
+ }
1067
+
1068
+ //
1069
+ // Mainloop
1070
+ //
1071
+
1072
+ CUTLASS_PRAGMA_UNROLL
1073
+ for (int gemm_k_iterations_1 = gemm_k_iterations_1_ - (Base::kStages - 1);
1074
+ gemm_k_iterations_1 > (-Base::kStages + 1);
1075
+ gemm_k_iterations_1--) {
1076
+ //
1077
+ // Loop over GEMM K dimension
1078
+ //
1079
+
1080
+ // Computes a warp-level GEMM on data held in shared memory
1081
+ // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate
1082
+ CUTLASS_PRAGMA_UNROLL
1083
+ for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations1; ++warp_mma_k) {
1084
+ // Load warp-level tile from accumulator fragment (A)
1085
+ // or shared memory (operand B)
1086
+ this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) %
1087
+ Base::kWarpGemmIterations1);
1088
+ // skip warp tile loading for the last kgroup (we are out of the buf)
1089
+ if (gemm_k_iterations_1 > (-Base::kStages + 2) ||
1090
+ warp_mma_k < Base::kWarpGemmIterations1 - 1) {
1091
+ warp_tile_iterator_A1_.load(warp_loaded_frag_A1[(warp_mma_k + 1) % 2]);
1092
+ warp_tile_iterator_A1_scale_.load(
1093
+ warp_loaded_frag_A1_scale[(warp_mma_k + 1) % 2]);
1094
+ this->warp_tile_iterator_B_.load(warp_loaded_frag_B1[(warp_mma_k + 1) % 2]);
1095
+ }
1096
+ ++warp_tile_iterator_A1_;
1097
+ ++warp_tile_iterator_A1_scale_;
1098
+ ++this->warp_tile_iterator_B_;
1099
+
1100
+ if (warp_mma_k > 0)
1101
+ warp_mma1.transform(
1102
+ warp_transformed_frag_A1[warp_mma_k % 2],
1103
+ warp_transformed_frag_B1[warp_mma_k % 2],
1104
+ FragmentAScaler::apply(warp_loaded_frag_A1[warp_mma_k % 2],
1105
+ warp_loaded_frag_A1_scale[warp_mma_k % 2]),
1106
+ warp_loaded_frag_B1[warp_mma_k % 2]);
1107
+
1108
+ if (platform::is_same<typename Operator1::MathOperator,
1109
+ arch::OpMultiplyAddFastF32>::value ||
1110
+ platform::is_same<typename Operator1::MathOperator,
1111
+ arch::OpMultiplyAddComplexFastF32>::value) {
1112
+ warp_mma1(tmp_accum,
1113
+ warp_transformed_frag_A1[warp_mma_k % 2],
1114
+ warp_transformed_frag_B1[warp_mma_k % 2],
1115
+ tmp_accum);
1116
+
1117
+ if (warp_mma_k == 0) {
1118
+ accum = plus_accum(accum, tmp_accum);
1119
+ tmp_accum.clear();
1120
+ }
1121
+ } else {
1122
+ warp_mma1(accum,
1123
+ warp_transformed_frag_A1[warp_mma_k % 2],
1124
+ warp_transformed_frag_B1[warp_mma_k % 2],
1125
+ accum);
1126
+ }
1127
+
1128
+ // Issue global->shared copies for the this stage
1129
+ if (warp_mma_k < Base::kWarpGemmIterations1 - 1) {
1130
+ int group_start_iteration_B1;
1131
+
1132
+ group_start_iteration_B1 = warp_mma_k * Detail::kAccessesPerGroupB1;
1133
+
1134
+ if (!kSmemContainsEntireB) {
1135
+ copy_tiles_and_advance_1(iterator_B1, group_start_iteration_B1);
1136
+ }
1137
+ }
1138
+
1139
+ if (warp_mma_k + 2 == Base::kWarpGemmIterations1) {
1140
+ int group_start_iteration_B1;
1141
+ group_start_iteration_B1 = (warp_mma_k + 1) * Detail::kAccessesPerGroupB1;
1142
+
1143
+ if (!kSmemContainsEntireB) {
1144
+ copy_tiles_and_advance_1(iterator_B1, group_start_iteration_B1);
1145
+ }
1146
+
1147
+ // Inserts a memory fence between stages of cp.async instructions.
1148
+ cutlass::arch::cp_async_fence();
1149
+
1150
+ // Waits until kStages-2 stages have committed.
1151
+ arch::cp_async_wait<kNumStagesConcurrentLoad - 1>();
1152
+ __syncthreads();
1153
+
1154
+ // Move to the next stage
1155
+ iterator_B1.add_tile_offset({1, 0});
1156
+
1157
+ this->smem_iterator_B1_.add_tile_offset({1, 0});
1158
+
1159
+ // Add negative offsets to return iterators to the 'start' of the
1160
+ // circular buffer in shared memory
1161
+ if (!kSmemContainsEntireB) {
1162
+ if (smem_write_stage_idx == (Base::kStages - 1)) {
1163
+ this->smem_iterator_B1_.add_tile_offset({-Base::kStages, 0});
1164
+ smem_write_stage_idx = 0;
1165
+ } else {
1166
+ ++smem_write_stage_idx;
1167
+ }
1168
+
1169
+ if (smem_read_stage_idx == (Base::kStages - 1)) {
1170
+ this->warp_tile_iterator_B_.add_tile_offset(
1171
+ {-Base::kStages * Policy1::kPartitionsK *
1172
+ Base::kWarpGemmIterations1,
1173
+ 0});
1174
+ smem_read_stage_idx = 0;
1175
+ } else {
1176
+ ++smem_read_stage_idx;
1177
+ }
1178
+ }
1179
+
1180
+ iterator_B1.set_residual_tile(gemm_k_iterations_1 == 2);
1181
+ iterator_B1.clear_mask(gemm_k_iterations_1 == 1);
1182
+ }
1183
+
1184
+ // Do any conversions feeding the first stage at the end of the loop so
1185
+ // we can start right away on mma instructions
1186
+ if (warp_mma_k + 1 == Base::kWarpGemmIterations1)
1187
+ warp_mma1.transform(
1188
+ warp_transformed_frag_A1[(warp_mma_k + 1) % 2],
1189
+ warp_transformed_frag_B1[(warp_mma_k + 1) % 2],
1190
+ FragmentAScaler::apply(warp_loaded_frag_A1[(warp_mma_k + 1) % 2],
1191
+ warp_loaded_frag_A1_scale[(warp_mma_k + 1) % 2]),
1192
+ warp_loaded_frag_B1[(warp_mma_k + 1) % 2]);
1193
+ }
1194
+ }
1195
+
1196
+ if (platform::is_same<typename Operator1::MathOperator,
1197
+ arch::OpMultiplyAddFastF32>::value ||
1198
+ platform::is_same<typename Operator1::MathOperator,
1199
+ arch::OpMultiplyAddComplexFastF32>::value) {
1200
+ accum = plus_accum(accum, tmp_accum);
1201
+ }
1202
+ }
1203
+ };
1204
+
1205
+ template <typename WarpShape,
1206
+ typename InstructionShape,
1207
+ typename RegularWarpIterator,
1208
+ typename Policy,
1209
+ typename Enable = void>
1210
+ struct DefaultWarpIteratorAFromSharedMemory {};
1211
+
1212
+ // TensorOp - Ampere half
1213
+ template <typename RegularWarpIterator, typename Policy>
1214
+ struct DefaultWarpIteratorAFromSharedMemory<
1215
+ cutlass::gemm::GemmShape<32, 32, 32>,
1216
+ cutlass::gemm::GemmShape<16, 8, 8>,
1217
+ RegularWarpIterator,
1218
+ Policy,
1219
+ typename platform::enable_if<(sizeof_bits<typename RegularWarpIterator::Element>::value == 16 &&
1220
+ Policy::Operator::Policy::OpDelta::kRow == 1)>::type> {
1221
+ static constexpr auto kWarpSize = 32;
1222
+ using OpDelta = typename Policy::Operator::Policy::OpDelta;
1223
+ using WarpShape = cutlass::MatrixShape<32, 32>;
1224
+
1225
+ using WarpIterator =
1226
+ cutlass::gemm::warp::WarpIteratorFromSmem<cutlass::gemm::Operand::kA,
1227
+ typename RegularWarpIterator::Element>;
1228
+ };
1229
+
1230
+ // TensorOp - Ampere f32
1231
+ template <typename WarpShape, typename RegularWarpIterator, typename Policy>
1232
+ struct DefaultWarpIteratorAFromSharedMemory<
1233
+ WarpShape,
1234
+ cutlass::gemm::GemmShape<16, 8, 8>,
1235
+ RegularWarpIterator,
1236
+ Policy,
1237
+ typename platform::enable_if<(sizeof_bits<typename RegularWarpIterator::Element>::value != 16 ||
1238
+ Policy::Operator::Policy::OpDelta::kRow != 1)>::type> {
1239
+ using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>;
1240
+ static constexpr auto kWarpSize = 32;
1241
+ using OpDelta = typename Policy::Operator::Policy::OpDelta;
1242
+
1243
+ using WarpIterator = cutlass::gemm::warp::MmaTensorOpMultiplicandTileAccessIterator<
1244
+ cutlass::MatrixShape<WarpShape::kM, WarpShape::kK>,
1245
+ cutlass::gemm::Operand::kA,
1246
+ typename RegularWarpIterator::Element,
1247
+ cutlass::layout::RowMajor,
1248
+ cutlass::MatrixShape<InstructionShape::kM, InstructionShape::kK>,
1249
+ OpDelta::kRow,
1250
+ kWarpSize>;
1251
+ };
1252
+
1253
+ // TensorOp - Volta
1254
+ template <typename WarpShape, typename RegularWarpIterator, typename Policy>
1255
+ struct DefaultWarpIteratorAFromSharedMemory<WarpShape,
1256
+ cutlass::gemm::GemmShape<16, 16, 4>,
1257
+ RegularWarpIterator,
1258
+ Policy> {
1259
+ using InstructionShape = cutlass::gemm::GemmShape<16, 16, 4>;
1260
+ static constexpr auto kWarpSize = 32;
1261
+ using OpDelta = typename Policy::Operator::Policy::OpDelta;
1262
+
1263
+ using WarpIterator = cutlass::gemm::warp::MmaVoltaTensorOpMultiplicandTileIterator<
1264
+ cutlass::MatrixShape<32, 32>, // MatrixShape<WarpShape::kM,
1265
+ // WarpShape::kK>,
1266
+ cutlass::gemm::Operand::kA,
1267
+ typename RegularWarpIterator::Element,
1268
+ cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise<16, 32>,
1269
+ cutlass::MatrixShape<16, 4>,
1270
+ OpDelta::kRow,
1271
+ kWarpSize>;
1272
+ };
1273
+
1274
+ // Simt
1275
+ template <typename WarpShape, typename RegularWarpIterator, typename Policy>
1276
+ struct DefaultWarpIteratorAFromSharedMemory<WarpShape,
1277
+ cutlass::gemm::GemmShape<1, 1, 1>,
1278
+ RegularWarpIterator,
1279
+ Policy> {
1280
+ using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>;
1281
+ static constexpr auto kWarpSize = 32;
1282
+
1283
+ // We just use the same iterator, as we reproduced the same shared-memory
1284
+ // schema. Just modify it to handle non-complete tiles.
1285
+ using WarpIterator = RegularWarpIterator;
1286
+ };
1287
+
1288
+ // Converts a "regular" Mma into their counterpart from shared memory
1289
+ template <typename Mma_,
1290
+ typename AccumulatorSharedStorage,
1291
+ /// whether or not to apply elementwise multiplication of operand A by
1292
+ /// another matrix in shared memory before usage in A @ B
1293
+ bool kScaleOperandA,
1294
+ bool kTransposeA = false>
1295
+ struct DefaultMmaFromSharedMemory;
1296
+
1297
+ // Mma pipelined
1298
+ template <
1299
+ /// Size of the Gemm problem - concept: gemm::GemmShape<>
1300
+ typename Shape_,
1301
+ /// Iterates over tiles of A operand in global memory
1302
+ // (concept: ReadableTileIterator | ForwardTileIterator |
1303
+ // MaskedTileIterator)
1304
+ typename IteratorA_,
1305
+ /// Iterates over tiles of A operand in shared memory
1306
+ /// (concept: WriteableTileIterator | RandomAccessTileIterator)
1307
+ typename SmemIteratorA_,
1308
+ /// Iterates over tiles of B operand in global memory
1309
+ // (concept: ReadableTileIterator | ForwardTileIterator |
1310
+ // MaskedTileIterator)
1311
+ typename IteratorB_,
1312
+ /// Iterates over tiles of B operand in shared memory
1313
+ /// (concept: WriteableTileIterator | RandomAccessTileIterator)
1314
+ typename SmemIteratorB_,
1315
+ /// Data type of accumulator matrix
1316
+ typename ElementC_,
1317
+ /// Data type of accumulator matrix
1318
+ typename LayoutC_,
1319
+ /// Policy describing tuning details (concept: MmaPolicy)
1320
+ typename Policy_,
1321
+ /// Transformation applied to A operand
1322
+ typename TransformA_,
1323
+ /// Transformation applied to B operand
1324
+ typename TransformB_,
1325
+ typename AccumulatorSharedStorage_,
1326
+ /// whether or not to apply elementwise multiplication of operand A by
1327
+ /// another matrix in shared memory before usage in A @ B
1328
+ bool kScaleOperandA,
1329
+ bool kTransposeA>
1330
+ struct DefaultMmaFromSharedMemory<MmaPipelined<Shape_,
1331
+ IteratorA_,
1332
+ SmemIteratorA_,
1333
+ IteratorB_,
1334
+ SmemIteratorB_,
1335
+ ElementC_,
1336
+ LayoutC_,
1337
+ Policy_,
1338
+ TransformA_,
1339
+ TransformB_>,
1340
+ AccumulatorSharedStorage_,
1341
+ kScaleOperandA,
1342
+ kTransposeA> {
1343
+ static constexpr int kWarpSize = 32;
1344
+ using SmemAccumulatorLayout = cutlass::layout::RowMajor;
1345
+
1346
+ using RegularMma = MmaPipelined<Shape_,
1347
+ IteratorA_,
1348
+ SmemIteratorA_,
1349
+ IteratorB_,
1350
+ SmemIteratorB_,
1351
+ ElementC_,
1352
+ LayoutC_,
1353
+ Policy_,
1354
+ TransformA_,
1355
+ TransformB_>;
1356
+
1357
+ using WarpShape = typename Policy_::Operator::Shape;
1358
+ using InstructionShape = typename Policy_::Operator::InstructionShape;
1359
+ using ArchMmaOperator = typename Policy_::Operator;
1360
+
1361
+ static constexpr bool kIsTransposedA = false;
1362
+ using WarpIteratorA =
1363
+ typename DefaultWarpIteratorAFromSharedMemory<WarpShape,
1364
+ InstructionShape,
1365
+ typename RegularMma::Operator::IteratorA,
1366
+ Policy_>::WarpIterator;
1367
+ using IteratorB =
1368
+ typename cutlass::transform::threadblock::MakeIteratorResidualLast<IteratorB_>::Iterator;
1369
+
1370
+ using Mma =
1371
+ typename cutlass::gemm::threadblock::MmaPipelinedFromSharedMemory<Shape_,
1372
+ WarpIteratorA,
1373
+ kScaleOperandA,
1374
+ AccumulatorSharedStorage_,
1375
+ IteratorB,
1376
+ SmemIteratorB_,
1377
+ ElementC_,
1378
+ LayoutC_,
1379
+ Policy_>;
1380
+ };
1381
+
1382
+ template <
1383
+ /// Size of the Gemm problem - concept: gemm::GemmShape<>
1384
+ typename Shape_,
1385
+ /// Iterates over tiles of A operand in global memory
1386
+ // (concept: ReadableTileIterator | ForwardTileIterator |
1387
+ // MaskedTileIterator)
1388
+ typename IteratorA_,
1389
+ /// Iterates over tiles of A operand in shared memory
1390
+ /// (concept: WriteableTileIterator | RandomAccessTileIterator)
1391
+ typename SmemIteratorA_,
1392
+ /// Cache operation for operand A
1393
+ cutlass::arch::CacheOperation::Kind CacheOpA,
1394
+ /// Iterates over tiles of B operand in global memory
1395
+ // (concept: ReadableTileIterator | ForwardTileIterator |
1396
+ // MaskedTileIterator)
1397
+ typename IteratorB_,
1398
+ /// Iterates over tiles of B operand in shared memory
1399
+ /// (concept: WriteableTileIterator | RandomAccessTileIterator)
1400
+ typename SmemIteratorB_,
1401
+ /// Cache operation for operand B
1402
+ cutlass::arch::CacheOperation::Kind CacheOpB,
1403
+ /// Data type of accumulator matrix
1404
+ typename ElementC_,
1405
+ /// Data type of accumulator matrix
1406
+ typename LayoutC_,
1407
+ /// Policy describing tuning details (concept: MmaPolicy)
1408
+ typename Policy_,
1409
+ /// Number of stages,
1410
+ int Stages,
1411
+ /// Use zfill or predicate for out-of-bound cp.async
1412
+ SharedMemoryClearOption SharedMemoryClear,
1413
+ typename AccumulatorSharedStorage_,
1414
+ /// whether or not to apply elementwise multiplication of operand A by
1415
+ /// another matrix in shared memory before usage in A @ B
1416
+ bool kScaleOperandA,
1417
+ bool kTransposeA>
1418
+ struct DefaultMmaFromSharedMemory<MmaMultistage<Shape_,
1419
+ IteratorA_,
1420
+ SmemIteratorA_,
1421
+ CacheOpA,
1422
+ IteratorB_,
1423
+ SmemIteratorB_,
1424
+ CacheOpB,
1425
+ ElementC_,
1426
+ LayoutC_,
1427
+ Policy_,
1428
+ Stages,
1429
+ SharedMemoryClear>,
1430
+ AccumulatorSharedStorage_,
1431
+ kScaleOperandA,
1432
+ kTransposeA> {
1433
+ static constexpr int kWarpSize = 32;
1434
+
1435
+ using RegularMma = MmaMultistage<Shape_,
1436
+ IteratorA_,
1437
+ SmemIteratorA_,
1438
+ CacheOpA,
1439
+ IteratorB_,
1440
+ SmemIteratorB_,
1441
+ CacheOpB,
1442
+ ElementC_,
1443
+ LayoutC_,
1444
+ Policy_,
1445
+ Stages,
1446
+ SharedMemoryClear>;
1447
+
1448
+ using WarpShape = typename Policy_::Operator::Shape;
1449
+ using InstructionShape = typename Policy_::Operator::InstructionShape;
1450
+ using WarpIteratorA_ =
1451
+ typename DefaultWarpIteratorAFromSharedMemory<WarpShape,
1452
+ InstructionShape,
1453
+ typename RegularMma::Operator::IteratorA,
1454
+ Policy_>::WarpIterator;
1455
+ using WarpIteratorTranspose = TransposeWarpIterator<WarpIteratorA_>;
1456
+ static constexpr bool kIsTransposedA = WarpIteratorTranspose::kSupportsTranspose && kTransposeA;
1457
+ using WarpIteratorA = typename platform::
1458
+ conditional<kIsTransposedA, typename WarpIteratorTranspose::Iterator, WarpIteratorA_>::type;
1459
+
1460
+ static int constexpr kMaxK = kIsTransposedA ? AccumulatorSharedStorage_::Shape::kM
1461
+ : AccumulatorSharedStorage_::Shape::kN;
1462
+ // Reduce the number of stages if we don't need that many
1463
+ static int constexpr kStagesMax = (kMaxK + int(Shape_::kK) - 1) / int(Shape_::kK);
1464
+ static int constexpr kStages = cutlass::const_min(Stages, kStagesMax);
1465
+
1466
+ using IteratorB =
1467
+ typename cutlass::transform::threadblock::MakeIteratorResidualLast<IteratorB_>::Iterator;
1468
+ using Mma = typename cutlass::gemm::threadblock::MmaMultistageFromSharedMemory<
1469
+ Shape_,
1470
+ WarpIteratorA,
1471
+ kScaleOperandA,
1472
+ AccumulatorSharedStorage_,
1473
+ IteratorB,
1474
+ SmemIteratorB_,
1475
+ RegularMma::kCacheOpB,
1476
+ ElementC_,
1477
+ LayoutC_,
1478
+ Policy_,
1479
+ kStages,
1480
+ kMaxK>;
1481
+ };
1482
+
1483
+ /////////////////////////////////////////////////////////////////////////////////////////////////
1484
+
1485
+ template <typename IteratorC,
1486
+ typename Operator,
1487
+ typename scalar_t,
1488
+ typename WarpShape_,
1489
+ typename ThreadblockShape_>
1490
+ struct B2bGemm;
1491
+
1492
+ // Tensor Cores >= Sm75 specialization (Ampere ...)
1493
+ template < /// Size of the matrix to load (concept: MatrixShape)
1494
+ typename Shape_,
1495
+ /// Element type
1496
+ typename Element_,
1497
+ /// Layout of operand in memory
1498
+ typename Layout_,
1499
+ /// Shape of one matrix product operation (concept: MatrixShape)
1500
+ typename InstructionShape_,
1501
+ /// Interval between adjacent *MMA instructions (in units of MMA
1502
+ /// instructions, concept: MatrixShape)
1503
+ typename OpDelta_,
1504
+ typename Operator,
1505
+ typename scalar_t,
1506
+ typename WarpShape_,
1507
+ typename ThreadblockShape_>
1508
+ struct B2bGemm<
1509
+ cutlass::gemm::warp::
1510
+ MmaTensorOpAccumulatorTileIterator<Shape_, Element_, Layout_, InstructionShape_, OpDelta_>,
1511
+ Operator,
1512
+ scalar_t,
1513
+ WarpShape_,
1514
+ ThreadblockShape_> {
1515
+ using IteratorC = typename cutlass::gemm::warp::
1516
+ MmaTensorOpAccumulatorTileIterator<Shape_, Element_, Layout_, InstructionShape_, OpDelta_>;
1517
+ using FragmentC = typename IteratorC::Fragment;
1518
+ using InstructionShape = InstructionShape_;
1519
+ using WarpShape = WarpShape_;
1520
+ using ThreadblockShape = ThreadblockShape_;
1521
+ using accum_t = Element_;
1522
+ using lse_scalar_t = float;
1523
+
1524
+ using SmemAccumulatorLayout = cutlass::layout::RowMajor;
1525
+
1526
+ // Iterator to load accumulators (results of matmul in registers)
1527
+ using FragmentIteratorAccumulator = cutlass::epilogue::warp::FragmentIteratorTensorOp<
1528
+ WarpShape,
1529
+ InstructionShape,
1530
+ accum_t,
1531
+ typename Operator::Policy::Operator::FragmentC,
1532
+ cutlass::layout::RowMajor>;
1533
+
1534
+ // Iterator to store to shared-memory
1535
+ using SmemIteratorD0 =
1536
+ typename cutlass::epilogue::warp::TileIteratorTensorOp<WarpShape,
1537
+ InstructionShape,
1538
+ scalar_t, // accum_t,
1539
+ SmemAccumulatorLayout>;
1540
+ using AccumulatorSharedStorage =
1541
+ cutlass::gemm::threadblock::AccumulatorSharedStorage<ThreadblockShape,
1542
+ typename SmemIteratorD0::Element,
1543
+ typename SmemIteratorD0::TensorLayout,
1544
+ typename SmemIteratorD0::Padding>;
1545
+ // We need to provide an operation for the epilogue. Let's create an
1546
+ // operation that does nothing (ScaleType::Nothing), just converts
1547
+ // from accum_t (float) -> scalar_t (can be half)
1548
+ using OutputOpNoOp = cutlass::epilogue::thread::LinearCombination<
1549
+ typename SmemIteratorD0::Element, // ElementOutput
1550
+ FragmentIteratorAccumulator::Fragment::kElements,
1551
+ accum_t, // ElementAccumulator
1552
+ typename SmemIteratorD0::Element, // ElementCompute
1553
+ cutlass::epilogue::thread::ScaleType::Nothing>;
1554
+ using Epilogue = cutlass::epilogue::threadblock::EpilogueSmemAccumulator<
1555
+ SmemIteratorD0,
1556
+ FragmentIteratorAccumulator,
1557
+ SmemIteratorD0, // ScaleBiasIterator
1558
+ // - not used
1559
+ OutputOpNoOp>;
1560
+
1561
+ // Epilogue 2: with LSE (for backwards pass)
1562
+ static int const kElementsPerAccess = 2; // TODO: Why 2?
1563
+ using IteratorAccumulatorLSE = cutlass::transform::threadblock::VectorIterator<
1564
+ cutlass::transform::threadblock::PredicatedVectorAccessIterator<
1565
+ // Shape
1566
+ cutlass::MatrixShape<ThreadblockShape::kM, ThreadblockShape::kN>,
1567
+ // WarpShape
1568
+ cutlass::MatrixShape<WarpShape::kM, WarpShape::kN>,
1569
+ lse_scalar_t,
1570
+ cutlass::layout::RowMajor,
1571
+ kElementsPerAccess>>;
1572
+ using EpilogueOpApplyLSE = cutlass::epilogue::thread::ApplyLogSumExp<
1573
+ scalar_t, // ElementOutput_
1574
+ lse_scalar_t, // ElementLSE_
1575
+ accum_t, // ElementAccumulator_
1576
+ accum_t, // ElementCompute_
1577
+ 128 / cutlass::sizeof_bits<scalar_t>::value
1578
+ // FragmentIteratorAccumulator::Fragment::kElements
1579
+ // InstructionShape::kM * InstructionShape::kN / 32
1580
+ >;
1581
+ using EpilogueWithLSE =
1582
+ cutlass::epilogue::threadblock::EpilogueSmemAccumulator<SmemIteratorD0,
1583
+ FragmentIteratorAccumulator,
1584
+ IteratorAccumulatorLSE,
1585
+ EpilogueOpApplyLSE>;
1586
+
1587
+ static void CUTLASS_DEVICE accumToSmem(AccumulatorSharedStorage& shared_storage,
1588
+ FragmentC const& accum,
1589
+ int lane_id,
1590
+ cutlass::MatrixCoord const& tile_coords)
1591
+ {
1592
+ SmemIteratorD0 smem_iterator_attn(shared_storage.accum_ref(), lane_id);
1593
+ smem_iterator_attn.add_tile_offset(
1594
+ tile_coords * cutlass::MatrixCoord{SmemIteratorD0::TileIterations::kRow,
1595
+ SmemIteratorD0::TileIterations::kColumn});
1596
+ Epilogue epilogue;
1597
+ epilogue(OutputOpNoOp({}), smem_iterator_attn, accum);
1598
+ }
1599
+
1600
+ static void CUTLASS_DEVICE accumApplyLSEToSmem(AccumulatorSharedStorage& shared_storage,
1601
+ FragmentC& accum,
1602
+ lse_scalar_t const* lse,
1603
+ int32_t lse_extents,
1604
+ int thread_id,
1605
+ int warp_id,
1606
+ int lane_id,
1607
+ cutlass::MatrixCoord const& tile_coords)
1608
+ {
1609
+ constexpr int32_t kAlignLSE = 32;
1610
+ IteratorAccumulatorLSE iterator_lse(
1611
+ lse,
1612
+ {(int32_t)0, (int32_t)ceil_div(lse_extents, kAlignLSE) * kAlignLSE},
1613
+ thread_id,
1614
+ warp_id,
1615
+ cutlass::MatrixCoord{0, 0} // offset
1616
+ );
1617
+
1618
+ SmemIteratorD0 smem_iterator_attn(shared_storage.accum_ref(), lane_id);
1619
+ smem_iterator_attn.add_tile_offset(
1620
+ tile_coords * cutlass::MatrixCoord{SmemIteratorD0::TileIterations::kRow,
1621
+ SmemIteratorD0::TileIterations::kColumn});
1622
+ EpilogueWithLSE epilogue;
1623
+ EpilogueOpApplyLSE minus_lse_exp({});
1624
+ epilogue(minus_lse_exp,
1625
+ smem_iterator_attn,
1626
+ accum,
1627
+ // scale - unused
1628
+ iterator_lse,
1629
+ // bias
1630
+ iterator_lse);
1631
+ }
1632
+ };
1633
+
1634
+ // Volta Specialization
1635
+ // only supported for f16
1636
+ template <typename Operator, typename WarpShape_, typename ThreadblockShape_>
1637
+ struct B2bGemm<cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator<
1638
+ cutlass::MatrixShape<32, 32>,
1639
+ float,
1640
+ cutlass::layout::RowMajor,
1641
+ cutlass::gemm::GemmShape<16, 16, 4>,
1642
+ cutlass::MatrixShape<1, 1>>,
1643
+ Operator,
1644
+ cutlass::half_t,
1645
+ WarpShape_,
1646
+ ThreadblockShape_> {
1647
+ using IteratorC = cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator<
1648
+ cutlass::MatrixShape<32, 32>,
1649
+ float,
1650
+ cutlass::layout::RowMajor,
1651
+ cutlass::gemm::GemmShape<16, 16, 4>,
1652
+ cutlass::MatrixShape<1, 1>>;
1653
+ using scalar_t = cutlass::half_t;
1654
+ using accum_t = IteratorC::Element;
1655
+ using WarpShape = WarpShape_;
1656
+ using ThreadblockShape = ThreadblockShape_;
1657
+ using FragmentC = IteratorC::Fragment;
1658
+ using lse_scalar_t = float;
1659
+
1660
+ using SmemAccumulatorLayout = cutlass::layout::RowMajor;
1661
+ using SmemIteratorD0 =
1662
+ cutlass::epilogue::warp::TileIteratorVoltaTensorOp<WarpShape,
1663
+ cutlass::gemm::GemmShape<32, 32, 4>,
1664
+ scalar_t,
1665
+ SmemAccumulatorLayout>;
1666
+
1667
+ // // Storage in shared-memory for Q.Kt
1668
+ using AccumulatorSharedStorage = cutlass::gemm::threadblock::AccumulatorSharedStorage<
1669
+ ThreadblockShape,
1670
+ scalar_t,
1671
+ cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise<
1672
+ 16,
1673
+ 32>, // typename SmemIteratorD0::TensorLayout,
1674
+ cutlass::MatrixShape<0, 0> // Padding
1675
+ >;
1676
+
1677
+ using OutputLayout = cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise<16, 32>;
1678
+ using TensorRef = cutlass::TensorRef<scalar_t, OutputLayout>;
1679
+ using Policy = typename IteratorC::Policy;
1680
+ using Element = accum_t;
1681
+ // Those are MmaVoltaTensorOpAccumulatorTileIterator private fields
1682
+ // Let's copy their values
1683
+ static int const kElementsPerPartial = 4;
1684
+ using EleShapePerPatial =
1685
+ typename cutlass::platform::conditional<cutlass::platform::is_same<Element, float>::value,
1686
+ cutlass::MatrixShape<2, 2>,
1687
+ cutlass::MatrixShape<1, 4>>::type;
1688
+ static int const kElementsPerMma = 8;
1689
+ static int const kAccumulatorPatials = 2;
1690
+ using QuadShapePerPatialMma = cutlass::MatrixShape<4, 4>;
1691
+
1692
+ static void CUTLASS_DEVICE accumToSmem(AccumulatorSharedStorage& shared_storage,
1693
+ FragmentC const& accum,
1694
+ int lane_id,
1695
+ cutlass::MatrixCoord const& tile_coords)
1696
+ {
1697
+ // ctor - from MmaVoltaTensorOpAccumulatorTileIterator
1698
+ TensorRef ref_(shared_storage.accum_ref());
1699
+ int quad = (lane_id >> 2);
1700
+ int lane_in_quad = (lane_id & 3);
1701
+ int accum_m, accum_n;
1702
+
1703
+ if (cutlass::platform::is_same<Element, float>::value) {
1704
+ // (quad[2],quad[0])+lane_in_quad[0]
1705
+ accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + (lane_in_quad & 1);
1706
+ // (quad[1])+lane_in_quad[1]
1707
+ accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials +
1708
+ (lane_in_quad & 2);
1709
+ } else {
1710
+ accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + lane_in_quad; // (quad[2],quad[0])
1711
+ accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials;
1712
+ }
1713
+ cutlass::MatrixCoord lane_offset(accum_m, accum_n);
1714
+
1715
+ // Tile offset
1716
+ ref_.add_coord_offset(tile_coords * cutlass::MatrixCoord({IteratorC::Shape::kRow,
1717
+ IteratorC::Shape::kColumn}));
1718
+
1719
+ using AccessType = cutlass::Array<scalar_t, EleShapePerPatial::kColumn>;
1720
+
1721
+ // store - from MmaVoltaTensorOpAccumulatorTileIterator
1722
+ CUTLASS_PRAGMA_UNROLL
1723
+ for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) {
1724
+ CUTLASS_PRAGMA_UNROLL
1725
+ for (int tile_m = 0; tile_m < Policy::TileIterations::kRow; ++tile_m) {
1726
+ CUTLASS_PRAGMA_UNROLL
1727
+ for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) {
1728
+ CUTLASS_PRAGMA_UNROLL
1729
+ for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) {
1730
+ int mma_accum_start = (((tile_n * Policy::TileIterations::kRow + tile_m) *
1731
+ Policy::MmaIterations::kColumn +
1732
+ mma_n) *
1733
+ Policy::MmaIterations::kRow +
1734
+ mma_m) *
1735
+ kElementsPerMma;
1736
+
1737
+ CUTLASS_PRAGMA_UNROLL
1738
+ for (int p = 0; p < kAccumulatorPatials; ++p) {
1739
+ CUTLASS_PRAGMA_UNROLL
1740
+ for (int m = 0; m < EleShapePerPatial::kRow; ++m) {
1741
+ int accum_m = tile_m * Policy::InterleavedTile::kRow +
1742
+ mma_m * QuadShapePerPatialMma::kRow + m * 2;
1743
+ int accum_n = tile_n * Policy::InterleavedTile::kColumn +
1744
+ mma_n * QuadShapePerPatialMma::kColumn +
1745
+ p * Policy::InterleavedTile::kColumn / 2;
1746
+ int r = (accum_m + lane_offset.row());
1747
+ AccessType to_store;
1748
+ CUTLASS_PRAGMA_UNROLL
1749
+ for (int n = 0; n < EleShapePerPatial::kColumn; ++n) {
1750
+ int idx = mma_accum_start + p * kElementsPerPartial +
1751
+ m * EleShapePerPatial::kColumn + n;
1752
+ int c = (accum_n + n + lane_offset.column());
1753
+ to_store[n] = scalar_t(accum[idx]);
1754
+ }
1755
+ int c = (accum_n + lane_offset.column());
1756
+ assert(r < 32);
1757
+ assert(c < 32);
1758
+ *reinterpret_cast<AccessType*>(ref_.data() + ref_.offset({r, c})) =
1759
+ to_store;
1760
+ }
1761
+ }
1762
+ }
1763
+ }
1764
+ }
1765
+ }
1766
+ }
1767
+
1768
+ static void CUTLASS_DEVICE accumApplyLSEToSmem(AccumulatorSharedStorage& shared_storage,
1769
+ typename IteratorC::Fragment& accum,
1770
+ lse_scalar_t const* lse,
1771
+ int lse_extent,
1772
+ int thread_id,
1773
+ int warp_id,
1774
+ int lane_id,
1775
+ cutlass::MatrixCoord const& tile_coords)
1776
+ {
1777
+ // Non-optimized way to apply LSE to registers
1778
+ // NOTE: accum is attn.T
1779
+ // TODO: Optimize for each architecture
1780
+ static constexpr int WarpSize = 32;
1781
+ using AccumLambdaIterator =
1782
+ typename DefaultMmaAccumLambdaIterator<IteratorC, accum_t, WarpSize>::Iterator;
1783
+ auto lane_offset = AccumLambdaIterator::get_lane_offset(lane_id, warp_id, tile_coords);
1784
+
1785
+ cutlass::Array<lse_scalar_t, IteratorC::Fragment::kElements> lse_prefetched;
1786
+ lse_prefetched.clear();
1787
+ int rowIdx = 0;
1788
+ int colIdx = 0;
1789
+ AccumLambdaIterator::iterateRows(
1790
+ lane_offset,
1791
+ [&](int accum_m) {
1792
+ ++rowIdx;
1793
+ colIdx = 0;
1794
+ },
1795
+ [&](int accum_m, int accum_n, int idx) {
1796
+ if (rowIdx == 1) {
1797
+ lse_prefetched[colIdx] = accum_n < lse_extent
1798
+ ? lse[accum_n]
1799
+ : platform::numeric_limits<accum_t>::infinity();
1800
+ }
1801
+ accum[idx] = expf(accum[idx] - lse_prefetched[colIdx]);
1802
+ ++colIdx;
1803
+ },
1804
+ [&](int accum_m) {});
1805
+ accumToSmem(shared_storage, accum, lane_id, tile_coords);
1806
+ }
1807
+ };
1808
+
1809
+ // Simt Specialization
1810
+ // for f32 on Sm70-Sm75 and f16/f32 below
1811
+
1812
+ template <typename Operator,
1813
+ typename OperatorPolicy,
1814
+ typename scalar_t,
1815
+ typename WarpShape_,
1816
+ typename ThreadblockShape_>
1817
+ struct B2bGemm<cutlass::gemm::warp::MmaSimtTileIterator<cutlass::MatrixShape<32, 32>,
1818
+ cutlass::gemm::Operand::kC,
1819
+ float,
1820
+ cutlass::layout::RowMajor,
1821
+ OperatorPolicy,
1822
+ 1,
1823
+ 1>,
1824
+ Operator,
1825
+ scalar_t,
1826
+ WarpShape_,
1827
+ ThreadblockShape_> {
1828
+ using IteratorC = cutlass::gemm::warp::MmaSimtTileIterator<cutlass::MatrixShape<32, 32>,
1829
+ cutlass::gemm::Operand::kC,
1830
+ float,
1831
+ cutlass::layout::RowMajor,
1832
+ OperatorPolicy,
1833
+ 1,
1834
+ 1>;
1835
+ using accum_t = typename IteratorC::Element;
1836
+ using WarpShape = WarpShape_;
1837
+ using ThreadblockShape = ThreadblockShape_;
1838
+ using FragmentC = typename IteratorC::Fragment;
1839
+ using lse_scalar_t = float;
1840
+
1841
+ // Storage in shared-memory for Q.Kt
1842
+ using AccumulatorSharedStorage =
1843
+ cutlass::gemm::threadblock::AccumulatorSharedStorage<ThreadblockShape,
1844
+ scalar_t,
1845
+ cutlass::layout::ColumnMajor,
1846
+ cutlass::MatrixShape<0, 0> // Padding
1847
+ >;
1848
+
1849
+ static void CUTLASS_DEVICE accumToSmem(AccumulatorSharedStorage& shared_storage,
1850
+ FragmentC const& accum,
1851
+ int lane_id,
1852
+ cutlass::MatrixCoord const& tile_coords)
1853
+ {
1854
+ using Policy = typename IteratorC::Policy;
1855
+ using Element = typename IteratorC::Element;
1856
+ using Iterations = typename IteratorC::Iterations;
1857
+ using Delta = typename IteratorC::Delta;
1858
+
1859
+ auto ref_ = shared_storage.accum_ref();
1860
+ // ctor - MmaSimtTileIterator
1861
+ // compute offset based on thread ID and lane layout
1862
+ typename Policy::LaneLayout lane_layout = Policy::get_lane_layout();
1863
+
1864
+ MatrixCoord lane_offset = lane_layout.inverse(lane_id) *
1865
+ MatrixCoord(Policy::LaneMmaShape::kM, Policy::LaneMmaShape::kN);
1866
+
1867
+ ref_.add_coord_offset(lane_offset);
1868
+
1869
+ // Tile offset
1870
+ ref_.add_coord_offset(tile_coords * cutlass::MatrixCoord({IteratorC::Shape::kRow,
1871
+ IteratorC::Shape::kColumn}));
1872
+
1873
+ // store - MmaSimtTileIterator
1874
+ CUTLASS_PRAGMA_UNROLL
1875
+ for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) {
1876
+ CUTLASS_PRAGMA_UNROLL
1877
+ for (int n = 0; n < Policy::LaneMmaShape::kN; ++n) {
1878
+ CUTLASS_PRAGMA_UNROLL
1879
+ for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) {
1880
+ CUTLASS_PRAGMA_UNROLL
1881
+ for (int m = 0; m < Policy::LaneMmaShape::kM; ++m) {
1882
+ int r = Policy::LaneMmaShape::kM * (mma_m * Policy::WarpShape::kRow) + m;
1883
+ int c = mma_n * Delta::kColumn + n;
1884
+ int idx = n + Policy::LaneMmaShape::kN *
1885
+ (mma_n + Iterations::kColumn *
1886
+ (m + mma_m * Policy::LaneMmaShape::kM));
1887
+ ref_.at({r, c}) = scalar_t(accum[idx]);
1888
+ }
1889
+ }
1890
+ }
1891
+ }
1892
+ }
1893
+
1894
+ static void CUTLASS_DEVICE accumApplyLSEToSmem(AccumulatorSharedStorage& shared_storage,
1895
+ typename IteratorC::Fragment& accum,
1896
+ lse_scalar_t const* lse,
1897
+ int lse_extent,
1898
+ int thread_id,
1899
+ int warp_id,
1900
+ int lane_id,
1901
+ cutlass::MatrixCoord const& tile_coords)
1902
+ {
1903
+ // Non-optimized way to apply LSE to registers
1904
+ // NOTE: accum is attn.T
1905
+ // TODO: Optimize for each architecture
1906
+ static constexpr int WarpSize = 32;
1907
+ using AccumLambdaIterator =
1908
+ typename DefaultMmaAccumLambdaIterator<IteratorC, accum_t, WarpSize>::Iterator;
1909
+ auto lane_offset = AccumLambdaIterator::get_lane_offset(lane_id, warp_id, tile_coords);
1910
+
1911
+ cutlass::Array<lse_scalar_t, IteratorC::Fragment::kElements> lse_prefetched;
1912
+ lse_prefetched.clear();
1913
+ int rowIdx = 0;
1914
+ int colIdx = 0;
1915
+ AccumLambdaIterator::iterateRows(
1916
+ lane_offset,
1917
+ [&](int accum_m) {
1918
+ ++rowIdx;
1919
+ colIdx = 0;
1920
+ },
1921
+ [&](int accum_m, int accum_n, int idx) {
1922
+ if (rowIdx == 1) {
1923
+ lse_prefetched[colIdx] = accum_n < lse_extent
1924
+ ? lse[accum_n]
1925
+ : platform::numeric_limits<accum_t>::infinity();
1926
+ }
1927
+ accum[idx] = expf(accum[idx] - lse_prefetched[colIdx]);
1928
+ ++colIdx;
1929
+ },
1930
+ [&](int accum_m) {});
1931
+ accumToSmem(shared_storage, accum, lane_id, tile_coords);
1932
+ }
1933
+ };
1934
+
1935
+ } // namespace threadblock
1936
+ } // namespace gemm
1937
+ } // namespace cutlass
1938
+
1939
+ /////////////////////////////////////////////////////////////////////////////////////////////////
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/epilogue_predicated_tile_iterator.h ADDED
@@ -0,0 +1,691 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
3
+ *reserved. SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice,
9
+ *this list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23
+ *LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24
+ *CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25
+ *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ *INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27
+ *CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28
+ *ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
+ *POSSIBILITY OF SUCH DAMAGE.
30
+ *
31
+ **************************************************************************************************/
32
+
33
+ // Copyright (c) Microsoft Corporation.
34
+ // SPDX-License-Identifier: Apache-2.0
35
+
36
+ // DeepSpeed Team
37
+
38
+ /*! \file
39
+ \brief Epilogue iterator that supports prefetching
40
+
41
+ Mostly copied from "cutlass/epilogue/threadblock/predicated_tile_iterator.h"
42
+ */
43
+
44
+ #pragma once
45
+
46
+ #include "cutlass/arch/arch.h"
47
+ #include "cutlass/arch/memory.h"
48
+ #include "cutlass/array.h"
49
+ #include "cutlass/cutlass.h"
50
+ #include "cutlass/epilogue/threadblock/output_tile_thread_map.h"
51
+ #include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h"
52
+ #include "cutlass/layout/matrix.h"
53
+ #include "cutlass/layout/tensor.h"
54
+ #include "cutlass/matrix_shape.h"
55
+ #include "cutlass/numeric_types.h"
56
+ #include "cutlass/tensor_ref.h"
57
+ #include "cutlass/transform/pitch_linear_thread_map.h"
58
+
59
+ ////////////////////////////////////////////////////////////////////////////////
60
+
61
+ namespace cutlass {
62
+
63
+ ////////////////////////////////////////////////////////////////////////////////
64
+
65
+ namespace epilogue {
66
+ namespace threadblock {
67
+
68
+ ////////////////////////////////////////////////////////////////////////////////
69
+
70
+ /// Tile iterator used to load and store output tile from global memory in
71
+ /// epilogue.
72
+ ///
73
+ /// Satisfies: ReadableTileIterator | PredicatedTileIterator |
74
+ /// ForwardTileIterator
75
+ ///
76
+ template <typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
77
+ typename Element_, ///< Element data type
78
+ bool ScatterD = false, ///< Scatter D operand or not
79
+ bool UseCUDAStore = false>
80
+ class PredicatedTileIteratorPrefetch {
81
+ public:
82
+ using ThreadMap = ThreadMap_;
83
+ using Shape = typename ThreadMap::Shape;
84
+
85
+ using Element = Element_;
86
+
87
+ using Layout = layout::RowMajor;
88
+ using TensorRef = TensorRef<Element, Layout>;
89
+ using ConstTensorRef = typename TensorRef::ConstTensorRef;
90
+
91
+ using Index = typename Layout::Index;
92
+ using LongIndex = typename Layout::LongIndex;
93
+ using TensorCoord = MatrixCoord;
94
+
95
+ static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
96
+ static int const kThreads = ThreadMap::kThreads;
97
+ static int const kIterations = ThreadMap::Count::kTile;
98
+
99
+ static_assert(ThreadMap::Iterations::kRow > 0, "ThreadMap::Iterations::kRow must be > 0");
100
+ static_assert(ThreadMap::Iterations::kGroup > 0, "ThreadMap::Iterations::kGroup must be > 0");
101
+ static_assert(ThreadMap::Iterations::kCluster > 0,
102
+ "ThreadMap::Iterations::kCluster must be > 0");
103
+ static_assert(ThreadMap::Iterations::kColumn > 0, "ThreadMap::Iterations::kColumn must be > 0");
104
+
105
+ /// Fragment object
106
+ using Fragment = Array<Element,
107
+ ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow *
108
+ ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster *
109
+ ThreadMap::kElementsPerAccess>;
110
+
111
+ /// Memory access size
112
+ using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
113
+
114
+ //
115
+ // Parameters struct
116
+ //
117
+
118
+ /// Uses a non-template class
119
+ struct Params : PredicatedTileIteratorParams {
120
+ using Base = PredicatedTileIteratorParams;
121
+
122
+ CUTLASS_HOST_DEVICE
123
+ Params() {}
124
+
125
+ CUTLASS_HOST_DEVICE
126
+ Params(Layout const& layout)
127
+ : PredicatedTileIteratorParams(
128
+ layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
129
+ make_OutputTileThreadMapDesc<ThreadMap>())
130
+ {
131
+ }
132
+
133
+ CUTLASS_HOST_DEVICE
134
+ Params(Base const& base) : Base(base) {}
135
+ };
136
+
137
+ /// Mask object
138
+ struct Mask {
139
+ static int const kCount = ThreadMap::Iterations::kColumn;
140
+
141
+ /// Predicate state
142
+ bool predicates[kCount];
143
+
144
+ //
145
+ // Mask
146
+ //
147
+ CUTLASS_HOST_DEVICE
148
+ Mask() { enable(); }
149
+
150
+ ///< Efficiently disables all accesses guarded by mask
151
+ CUTLASS_HOST_DEVICE void clear()
152
+ {
153
+ CUTLASS_PRAGMA_UNROLL
154
+ for (int i = 0; i < kCount; ++i) { predicates[i] = false; }
155
+ }
156
+
157
+ ///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
158
+ CUTLASS_DEVICE void enable()
159
+ {
160
+ CUTLASS_PRAGMA_UNROLL
161
+ for (int i = 0; i < kCount; ++i) { predicates[i] = true; }
162
+ }
163
+ };
164
+
165
+ private:
166
+ //
167
+ // Data members
168
+ //
169
+
170
+ /// Parameters structure containing reference and precomputed state.
171
+ PredicatedTileIteratorParams params_;
172
+
173
+ /// Byte-level pointer
174
+ uint8_t* byte_pointer_;
175
+
176
+ /// Array of boolean values to contain steady-state predicates
177
+ Mask mask_;
178
+
179
+ /// Extent of the matrix tile in rows
180
+ Index extent_row_;
181
+
182
+ /// Extent of the matrix tile in rows
183
+ Index extent_column_;
184
+
185
+ /// A thread's starting row position (assuming steady-state predicates have
186
+ /// been computed)
187
+ Index thread_start_row_;
188
+
189
+ /// A thread's starting column
190
+ Index thread_start_column_;
191
+
192
+ /// Internal state counter
193
+ int state_[3];
194
+
195
+ /// Scatter indices
196
+ int const* indices_;
197
+
198
+ //
199
+ // Static asserts about internal strides
200
+ //
201
+
202
+ static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
203
+ static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
204
+ static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides");
205
+
206
+ private:
207
+ //
208
+ // Methods
209
+ //
210
+
211
+ public:
212
+ //
213
+ // Methods
214
+ //
215
+
216
+ /// Constructor
217
+ CUTLASS_DEVICE
218
+ PredicatedTileIteratorPrefetch(PredicatedTileIteratorParams const& params,
219
+ Element* pointer,
220
+ TensorCoord extent,
221
+ int thread_idx,
222
+ TensorCoord threadblock_offset = TensorCoord(),
223
+ int const* indices = nullptr)
224
+ : params_(params), indices_(indices)
225
+ {
226
+ TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
227
+
228
+ extent_row_ = extent.row();
229
+ extent_column_ = extent.column();
230
+
231
+ thread_start_row_ = thread_offset.row();
232
+ thread_start_column_ = thread_offset.column();
233
+
234
+ // Initialize predicates
235
+ CUTLASS_PRAGMA_UNROLL
236
+ for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
237
+ mask_.predicates[c] =
238
+ ((thread_offset.column() + ThreadMap::Delta::kColumn * c) < extent.column());
239
+ }
240
+
241
+ // Null pointer performs no accesses
242
+ if (!pointer) { mask_.clear(); }
243
+
244
+ if (ScatterD && !indices) { mask_.clear(); }
245
+
246
+ // Initialize pointer
247
+ byte_pointer_ = reinterpret_cast<uint8_t*>(pointer) +
248
+ LongIndex(thread_offset.row()) * LongIndex(params_.stride) +
249
+ LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
250
+
251
+ if (ScatterD) {
252
+ byte_pointer_ =
253
+ reinterpret_cast<uint8_t*>(pointer) +
254
+ LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
255
+ }
256
+
257
+ // Initialize internal state counter
258
+ state_[0] = state_[1] = state_[2] = 0;
259
+ }
260
+
261
+ /// Adds a pointer offset in units of Element
262
+ CUTLASS_HOST_DEVICE
263
+ void add_pointer_offset(LongIndex pointer_offset)
264
+ {
265
+ byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
266
+ }
267
+
268
+ CUTLASS_DEVICE
269
+ void prefetch_all()
270
+ {
271
+ CUTLASS_PRAGMA_UNROLL
272
+ for (int iter = 0; iter < kIterations; ++iter) {
273
+ prefetch();
274
+ ++(*this);
275
+ }
276
+ }
277
+
278
+ CUTLASS_DEVICE
279
+ void prefetch()
280
+ {
281
+ uint8_t* byte_pointer = byte_pointer_;
282
+
283
+ CUTLASS_PRAGMA_UNROLL
284
+ for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
285
+ CUTLASS_PRAGMA_UNROLL
286
+ for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
287
+ CUTLASS_PRAGMA_UNROLL
288
+ for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
289
+ int row_offset = row * ThreadMap::Delta::kRow +
290
+ group * ThreadMap::Delta::kGroup +
291
+ cluster * ThreadMap::Delta::kCluster;
292
+
293
+ AccessType* memory_pointer = reinterpret_cast<AccessType*>(byte_pointer);
294
+
295
+ CUTLASS_PRAGMA_UNROLL
296
+ for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
297
+ // on windows using unsigned long here gives the error
298
+ // error: asm operand type size(4) does not match
299
+ // type/size implied by constraint 'l'
300
+ uint64_t addr =
301
+ (uint64_t)((void*)&memory_pointer[column * ThreadMap::Delta::kColumn /
302
+ kElementsPerAccess]);
303
+ asm volatile("prefetch.global.L1 [ %1 ];" : "=l"(addr) : "l"(addr));
304
+ }
305
+
306
+ if (row + 1 < ThreadMap::Iterations::kRow) {
307
+ if (!ScatterD) { byte_pointer += params_.increment_row; }
308
+ }
309
+ }
310
+
311
+ if (group + 1 < ThreadMap::Iterations::kGroup) {
312
+ byte_pointer += params_.increment_group;
313
+ }
314
+ }
315
+
316
+ if (cluster + 1 < ThreadMap::Iterations::kCluster) {
317
+ byte_pointer += params_.increment_cluster;
318
+ }
319
+ }
320
+ }
321
+
322
+ /// Loads a fragment from memory
323
+ CUTLASS_DEVICE
324
+ void load_with_byte_offset(Fragment& frag, int64_t byte_offset) const
325
+ {
326
+ uint8_t* byte_pointer = byte_pointer_;
327
+ AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
328
+
329
+ CUTLASS_PRAGMA_UNROLL
330
+ for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
331
+ CUTLASS_PRAGMA_UNROLL
332
+ for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
333
+ CUTLASS_PRAGMA_UNROLL
334
+ for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
335
+ int frag_row_idx =
336
+ (row + ThreadMap::Iterations::kRow *
337
+ (group + ThreadMap::Iterations::kGroup * cluster));
338
+
339
+ int row_offset = row * ThreadMap::Delta::kRow +
340
+ group * ThreadMap::Delta::kGroup +
341
+ cluster * ThreadMap::Delta::kCluster;
342
+
343
+ bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
344
+
345
+ AccessType* memory_pointer =
346
+ reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
347
+
348
+ if (ScatterD && row_guard) {
349
+ assert(indices_);
350
+
351
+ memory_pointer = reinterpret_cast<AccessType*>(
352
+ byte_pointer + byte_offset +
353
+ LongIndex(indices_[row_offset + thread_start_row_]) *
354
+ LongIndex(params_.stride));
355
+ }
356
+
357
+ CUTLASS_PRAGMA_UNROLL
358
+ for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
359
+ bool guard = row_guard && mask_.predicates[column];
360
+
361
+ cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
362
+ frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
363
+ (void*)&memory_pointer[column * ThreadMap::Delta::kColumn /
364
+ kElementsPerAccess],
365
+ guard);
366
+ }
367
+
368
+ if (row + 1 < ThreadMap::Iterations::kRow) {
369
+ if (!ScatterD) { byte_pointer += params_.increment_row; }
370
+ }
371
+ }
372
+
373
+ if (group + 1 < ThreadMap::Iterations::kGroup) {
374
+ byte_pointer += params_.increment_group;
375
+ }
376
+ }
377
+
378
+ if (cluster + 1 < ThreadMap::Iterations::kCluster) {
379
+ byte_pointer += params_.increment_cluster;
380
+ }
381
+ }
382
+ }
383
+
384
+ /// Loads a fragment from memory
385
+ CUTLASS_DEVICE
386
+ void load(Fragment& frag) const { load_with_byte_offset(frag, 0); }
387
+
388
+ /// Stores a fragment to memory
389
+ CUTLASS_DEVICE
390
+ void store_with_byte_offset(Fragment const& frag, int64_t byte_offset) const
391
+ {
392
+ uint8_t* byte_pointer = byte_pointer_;
393
+ AccessType const* frag_ptr = reinterpret_cast<AccessType const*>(&frag);
394
+
395
+ CUTLASS_PRAGMA_UNROLL
396
+ for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
397
+ CUTLASS_PRAGMA_UNROLL
398
+ for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
399
+ CUTLASS_PRAGMA_UNROLL
400
+ for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
401
+ int frag_row_idx =
402
+ (row + ThreadMap::Iterations::kRow *
403
+ (group + ThreadMap::Iterations::kGroup * cluster));
404
+
405
+ int row_offset = row * ThreadMap::Delta::kRow +
406
+ group * ThreadMap::Delta::kGroup +
407
+ cluster * ThreadMap::Delta::kCluster;
408
+
409
+ bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
410
+
411
+ AccessType* memory_pointer =
412
+ reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
413
+
414
+ if (ScatterD && row_guard) {
415
+ assert(indices_);
416
+
417
+ memory_pointer = reinterpret_cast<AccessType*>(
418
+ byte_pointer + byte_offset +
419
+ LongIndex(indices_[row_offset + thread_start_row_]) *
420
+ LongIndex(params_.stride));
421
+ }
422
+
423
+ CUTLASS_PRAGMA_UNROLL
424
+ for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
425
+ bool guard = row_guard && mask_.predicates[column];
426
+
427
+ if (UseCUDAStore) {
428
+ if (guard) {
429
+ memory_pointer[column * ThreadMap::Delta::kColumn /
430
+ kElementsPerAccess] =
431
+ frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn +
432
+ column];
433
+ }
434
+ } else {
435
+ cutlass::arch::global_store<AccessType, sizeof(AccessType)>(
436
+ frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
437
+ (void*)&memory_pointer[column * ThreadMap::Delta::kColumn /
438
+ kElementsPerAccess],
439
+ guard);
440
+ }
441
+ }
442
+
443
+ if (row + 1 < ThreadMap::Iterations::kRow) {
444
+ if (!ScatterD) { byte_pointer += params_.increment_row; }
445
+ }
446
+ }
447
+
448
+ if (group + 1 < ThreadMap::Iterations::kGroup) {
449
+ byte_pointer += params_.increment_group;
450
+ }
451
+ }
452
+
453
+ if (cluster + 1 < ThreadMap::Iterations::kCluster) {
454
+ byte_pointer += params_.increment_cluster;
455
+ }
456
+ }
457
+ }
458
+
459
+ /// Stores a fragment to memory
460
+ CUTLASS_DEVICE
461
+ void store(Fragment const& frag) const { store_with_byte_offset(frag, 0); }
462
+
463
+ /// Loads a fragment from memory
464
+ CUTLASS_DEVICE
465
+ void downsample_load_with_byte_offset(Fragment& frag,
466
+ int64_t byte_offset,
467
+ int convolution_P,
468
+ int convolution_Q,
469
+ int add_P,
470
+ int add_Q,
471
+ int problem_N) const
472
+ {
473
+ uint8_t* byte_pointer = byte_pointer_;
474
+ AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
475
+
476
+ CUTLASS_PRAGMA_UNROLL
477
+ for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
478
+ CUTLASS_PRAGMA_UNROLL
479
+ for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
480
+ CUTLASS_PRAGMA_UNROLL
481
+ for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
482
+ int frag_row_idx =
483
+ (row + ThreadMap::Iterations::kRow *
484
+ (group + ThreadMap::Iterations::kGroup * cluster));
485
+
486
+ int row_offset = row * ThreadMap::Delta::kRow +
487
+ group * ThreadMap::Delta::kGroup +
488
+ cluster * ThreadMap::Delta::kCluster;
489
+
490
+ bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
491
+
492
+ int output_row = row_offset + thread_start_row_;
493
+ int output_N = output_row / (convolution_P * convolution_Q);
494
+ int output_PQ = output_row % (convolution_P * convolution_Q);
495
+ int output_P = output_PQ / convolution_Q;
496
+ int output_Q = output_PQ % convolution_Q;
497
+
498
+ int input_row = output_N * 2 * convolution_P * 2 * convolution_Q +
499
+ (2 * output_P + add_P) * 2 * convolution_Q + 2 * output_Q +
500
+ add_Q;
501
+
502
+ int64_t byte_offset = (input_row - output_row) * problem_N * sizeof(float);
503
+
504
+ AccessType* memory_pointer =
505
+ reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
506
+
507
+ CUTLASS_PRAGMA_UNROLL
508
+ for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
509
+ bool guard = row_guard && mask_.predicates[column];
510
+
511
+ cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
512
+ frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
513
+ (void*)&memory_pointer[column * ThreadMap::Delta::kColumn /
514
+ kElementsPerAccess],
515
+ guard);
516
+ }
517
+
518
+ if (row + 1 < ThreadMap::Iterations::kRow) {
519
+ byte_pointer += params_.increment_row;
520
+ }
521
+ }
522
+
523
+ if (group + 1 < ThreadMap::Iterations::kGroup) {
524
+ byte_pointer += params_.increment_group;
525
+ }
526
+ }
527
+
528
+ if (cluster + 1 < ThreadMap::Iterations::kCluster) {
529
+ byte_pointer += params_.increment_cluster;
530
+ }
531
+ }
532
+ }
533
+
534
+ /// Loads a fragment from memory
535
+ CUTLASS_DEVICE
536
+ void upsample_load_with_byte_offset(Fragment& frag,
537
+ int64_t byte_offset,
538
+ int convolution_P,
539
+ int convolution_Q,
540
+ int add_P,
541
+ int add_Q,
542
+ int problem_N) const
543
+ {
544
+ uint8_t* byte_pointer = byte_pointer_;
545
+ AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
546
+
547
+ CUTLASS_PRAGMA_UNROLL
548
+ for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
549
+ CUTLASS_PRAGMA_UNROLL
550
+ for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
551
+ CUTLASS_PRAGMA_UNROLL
552
+ for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
553
+ int frag_row_idx =
554
+ (row + ThreadMap::Iterations::kRow *
555
+ (group + ThreadMap::Iterations::kGroup * cluster));
556
+
557
+ int row_offset = row * ThreadMap::Delta::kRow +
558
+ group * ThreadMap::Delta::kGroup +
559
+ cluster * ThreadMap::Delta::kCluster;
560
+
561
+ bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
562
+
563
+ int output_row = row_offset + thread_start_row_;
564
+ int output_N = output_row / (convolution_P * convolution_Q);
565
+ int output_PQ = output_row % (convolution_P * convolution_Q);
566
+ int output_P = output_PQ / convolution_Q;
567
+ int output_Q = output_PQ % convolution_Q;
568
+ int row_add_P = add_P;
569
+ int row_add_Q = add_Q;
570
+ if (output_P > convolution_P - 2) row_add_P = 0;
571
+ if (output_Q > convolution_Q - 2) row_add_Q = 0;
572
+
573
+ int input_row = output_N * (convolution_P / 2) * (convolution_Q / 2) +
574
+ ((output_P + row_add_P) / 2) * (convolution_Q / 2) +
575
+ (output_Q + row_add_Q) / 2;
576
+
577
+ int64_t byte_offset = (input_row - output_row) * problem_N * sizeof(float);
578
+
579
+ AccessType* memory_pointer =
580
+ reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
581
+
582
+ CUTLASS_PRAGMA_UNROLL
583
+ for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
584
+ bool guard = row_guard && mask_.predicates[column];
585
+
586
+ cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
587
+ frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
588
+ (void*)&memory_pointer[column * ThreadMap::Delta::kColumn /
589
+ kElementsPerAccess],
590
+ guard);
591
+ }
592
+
593
+ if (row + 1 < ThreadMap::Iterations::kRow) {
594
+ byte_pointer += params_.increment_row;
595
+ }
596
+ }
597
+
598
+ if (group + 1 < ThreadMap::Iterations::kGroup) {
599
+ byte_pointer += params_.increment_group;
600
+ }
601
+ }
602
+
603
+ if (cluster + 1 < ThreadMap::Iterations::kCluster) {
604
+ byte_pointer += params_.increment_cluster;
605
+ }
606
+ }
607
+ }
608
+
609
+ CUTLASS_DEVICE
610
+ MatrixCoord thread_start() const
611
+ {
612
+ return MatrixCoord(thread_start_row_, thread_start_column_);
613
+ }
614
+
615
+ /// Need to get the thread start row from the tile iterator
616
+ CUTLASS_DEVICE
617
+ int32_t thread_start_row() const { return thread_start_row_; }
618
+
619
+ /// Need to get the thread start row from the tile iterator
620
+ CUTLASS_DEVICE
621
+ int32_t thread_start_column() const { return thread_start_column_; }
622
+
623
+ /// Extent of the matrix in rows
624
+ CUTLASS_DEVICE
625
+ Index extent_row() const { return extent_row_; }
626
+
627
+ /// Extent of the matrix in columns
628
+ CUTLASS_DEVICE
629
+ Index extent_column() const { return extent_column_; }
630
+
631
+ /// Advances to the next position to load or store
632
+ CUTLASS_HOST_DEVICE
633
+ PredicatedTileIteratorPrefetch& operator++()
634
+ {
635
+ ++state_[0];
636
+
637
+ if (!ScatterD) { byte_pointer_ += params_.advance_row; }
638
+
639
+ thread_start_row_ += ThreadMap::Shape::kRow;
640
+
641
+ if (state_[0] == ThreadMap::Count::kRow) {
642
+ state_[0] = 0;
643
+ ++state_[1];
644
+ byte_pointer_ += params_.advance_group;
645
+
646
+ thread_start_row_ +=
647
+ (ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
648
+
649
+ if (state_[1] == ThreadMap::Count::kGroup) {
650
+ state_[1] = 0;
651
+ ++state_[2];
652
+ byte_pointer_ += params_.advance_cluster;
653
+
654
+ thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup *
655
+ ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
656
+
657
+ if (state_[2] == ThreadMap::Count::kCluster) {
658
+ state_[2] = 0;
659
+ byte_pointer_ += params_.advance_tile;
660
+ }
661
+ }
662
+ }
663
+
664
+ return *this;
665
+ }
666
+
667
+ ///< Efficiently disables all accesses guarded by mask
668
+ CUTLASS_DEVICE void clear_mask() { mask_.clear(); }
669
+
670
+ ///< Efficiently enables all accesses guarded by mask
671
+ CUTLASS_DEVICE void enable_mask() { mask_.enable(); }
672
+
673
+ ///< Sets the mask
674
+ CUTLASS_DEVICE void get_mask(Mask& mask) const { mask = mask_; }
675
+
676
+ ///< Sets the mask
677
+ CUTLASS_DEVICE void set_mask(Mask const& mask) { mask_ = mask; }
678
+ };
679
+
680
+ template <typename IT>
681
+ struct MakePrefetchableIterator {
682
+ using Iterator = PredicatedTileIteratorPrefetch<typename IT::ThreadMap, typename IT::Element>;
683
+ };
684
+
685
+ ///////////////////////////////////////////////////////////////////////////////
686
+
687
+ } // namespace threadblock
688
+ } // namespace epilogue
689
+ } // namespace cutlass
690
+
691
+ ////////////////////////////////////////////////////////////////////////////////
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/make_residual_last.h ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holdvr nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ // Copyright (c) Microsoft Corporation.
33
+ // SPDX-License-Identifier: Apache-2.0
34
+
35
+ // DeepSpeed Team
36
+
37
+ #pragma once
38
+
39
+ #include "predicated_tile_access_iterator_residual_last.h"
40
+ #include "predicated_tile_iterator_residual_last.h"
41
+
42
+ namespace cutlass {
43
+ namespace transform {
44
+ namespace threadblock {
45
+
46
+ template <typename BaseIterator>
47
+ struct MakeIteratorResidualLast;
48
+
49
+ template <typename Shape,
50
+ typename Element,
51
+ typename Layout,
52
+ int AdvanceRank,
53
+ typename ThreadMap,
54
+ int AccessSize,
55
+ bool Gather>
56
+ struct MakeIteratorResidualLast<
57
+ PredicatedTileIterator<Shape, Element, Layout, AdvanceRank, ThreadMap, AccessSize, Gather>> {
58
+ using Iterator = PredicatedTileIteratorResidualLast<Shape,
59
+ Element,
60
+ Layout,
61
+ AdvanceRank,
62
+ ThreadMap,
63
+ AccessSize,
64
+ Gather>;
65
+ };
66
+
67
+ template <typename Shape,
68
+ typename Element,
69
+ typename Layout,
70
+ int AdvanceRank,
71
+ typename ThreadMap,
72
+ typename AccessType,
73
+ bool Gather>
74
+ struct MakeIteratorResidualLast<PredicatedTileAccessIterator<Shape,
75
+ Element,
76
+ Layout,
77
+ AdvanceRank,
78
+ ThreadMap,
79
+ AccessType,
80
+ Gather>> {
81
+ using Iterator = PredicatedTileAccessIteratorResidualLast<Shape,
82
+ Element,
83
+ Layout,
84
+ AdvanceRank,
85
+ ThreadMap,
86
+ AccessType,
87
+ Gather>;
88
+ };
89
+ } // namespace threadblock
90
+ } // namespace transform
91
+ } // namespace cutlass
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_access_iterator_residual_last.h ADDED
@@ -0,0 +1,1964 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
3
+ *reserved. SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice,
9
+ *this list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23
+ *LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24
+ *CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25
+ *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ *INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27
+ *CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28
+ *ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
+ *POSSIBILITY OF SUCH DAMAGE.
30
+ *
31
+ **************************************************************************************************/
32
+
33
+ // Copyright (c) Microsoft Corporation.
34
+ // SPDX-License-Identifier: Apache-2.0
35
+
36
+ // DeepSpeed Team
37
+
38
+ /*! \file
39
+ \brief Templates calculating the address and predicates to the load of tiles
40
+ from pitch-linear rank=2 tensors.
41
+
42
+ This iterator uses masks to guard out-of-bounds accesses. The first tile
43
+ this iterator visits maybe partial, then the remaining tiles are complete.
44
+ So, we only need to compute the predicates twice, once before the first tile
45
+ and once for the remaining full tiles which can share the same predicates.
46
+
47
+ A precomputed "Params" object minimizes the amount of state that must be
48
+ stored in registers, and integer addition is used to advance the pointer
49
+ through memory.
50
+ */
51
+
52
+ #pragma once
53
+
54
+ #include "cutlass/array.h"
55
+ #include "cutlass/coord.h"
56
+ #include "cutlass/cutlass.h"
57
+ #include "cutlass/layout/matrix.h"
58
+ #include "cutlass/layout/pitch_linear.h"
59
+ #include "cutlass/matrix_shape.h"
60
+ #include "cutlass/predicate_vector.h"
61
+ #include "cutlass/tensor_ref.h"
62
+ #include "cutlass/tensor_view.h"
63
+ #include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h"
64
+
65
+ ////////////////////////////////////////////////////////////////////////////////
66
+
67
+ ////////////////////////////////////////////////////////////////////////////////
68
+
69
+ namespace cutlass {
70
+ namespace transform {
71
+ namespace threadblock {
72
+
73
+ ////////////////////////////////////////////////////////////////////////////////
74
+
75
+ /// PredicatedTileAccessIteratorResidualLast
76
+ ///
77
+ template <typename Shape,
78
+ typename Element,
79
+ typename Layout,
80
+ int AdvanceRank,
81
+ typename ThreadMap,
82
+ typename AccessType,
83
+ bool Gather = false>
84
+ class PredicatedTileAccessIteratorResidualLast;
85
+
86
+ ////////////////////////////////////////////////////////////////////////////////
87
+
88
+ /// Specialization of PredicatedTileAccessIteratorResidualLast for pitch-linear
89
+ /// data.
90
+ ///
91
+ template <typename Shape_,
92
+ typename Element_,
93
+ int AdvanceRank,
94
+ typename ThreadMap_,
95
+ typename AccessType_,
96
+ bool Gather>
97
+ class PredicatedTileAccessIteratorResidualLast<Shape_,
98
+ Element_,
99
+ layout::PitchLinear,
100
+ AdvanceRank,
101
+ ThreadMap_,
102
+ AccessType_,
103
+ Gather> {
104
+ public:
105
+ static_assert(AdvanceRank == 0 || AdvanceRank == 1,
106
+ "Specialization for pitch-linear iterator may along advance along the "
107
+ "contiguous(rank=0) or strided(rank=1) dimension.");
108
+
109
+ using Shape = Shape_;
110
+ using Element = Element_;
111
+ using Layout = layout::PitchLinear;
112
+ static int const kAdvanceRank = AdvanceRank;
113
+ using ThreadMap = ThreadMap_;
114
+ using AccessType = AccessType_;
115
+
116
+ using Index = typename Layout::Index;
117
+ using LongIndex = typename Layout::LongIndex;
118
+
119
+ using TensorRef = TensorRef<Element, Layout>;
120
+ using TensorView = TensorView<Element, Layout>;
121
+ using TensorCoord = typename Layout::TensorCoord;
122
+
123
+ using Pointer = Element*;
124
+ using NonConstPointer = typename platform::remove_const<Element>::type*;
125
+
126
+ using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates<Shape,
127
+ Element,
128
+ Layout,
129
+ AdvanceRank,
130
+ ThreadMap,
131
+ AccessType>;
132
+
133
+ static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
134
+
135
+ static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
136
+ "Vectors implied by the thread map must be divisible by the access type.");
137
+
138
+ using Mask = typename UnderlyingPredicates::Mask;
139
+
140
+ /// Uses a non-template class
141
+ struct Params : PredicatedTileAccessIteratorParams {
142
+ using Base = PredicatedTileAccessIteratorParams;
143
+
144
+ // Default ctor
145
+ CUTLASS_HOST_DEVICE
146
+ Params() {}
147
+
148
+ /// Construct the Params object given a pitch-linear tensor's layout
149
+ CUTLASS_HOST_DEVICE
150
+ Params(Layout const& layout)
151
+ : Base(layout.stride(0),
152
+ MakePredicatedTileAccessIteratorDesc<Shape,
153
+ Element,
154
+ Layout,
155
+ kAdvanceRank,
156
+ ThreadMap>()())
157
+ {
158
+ }
159
+
160
+ CUTLASS_HOST_DEVICE
161
+ Params(Base const& base) : Base(base) {}
162
+ };
163
+
164
+ private:
165
+ /// Internal pointer type permits fast address arithmetic
166
+ using BytePointer = char*;
167
+
168
+ private:
169
+ //
170
+ // Data members
171
+ //
172
+
173
+ UnderlyingPredicates the_predicates;
174
+ Mask residual_tile_mask;
175
+
176
+ /// Parameters object with precomputed internal state
177
+ Params params_;
178
+
179
+ /// Internal pointer to first access of tile
180
+ BytePointer pointer_;
181
+
182
+ /// Below is used when Gather is turned on. We need to record strided_offset
183
+ /// and contiguous_offset separated to compute the offset by using
184
+ ///
185
+ /// offset = contiguous_offset + indices[strided_offset]
186
+ ///
187
+
188
+ /// Gather indices
189
+ int const* indices_;
190
+
191
+ Index gather_offset_strided;
192
+
193
+ private:
194
+ /// Computes predicates based on internally tracked per-thread offset.
195
+ CUTLASS_DEVICE
196
+ void compute_predicates_(
197
+ /// Extent of the matrix window
198
+ TensorCoord extent,
199
+ /// optionally, simplify predicate calculation during 'steady state' phase
200
+ bool is_steady_state = false)
201
+ {
202
+ the_predicates.compute_predicates_(extent, is_steady_state);
203
+ }
204
+
205
+ public:
206
+ /// Constructs a TileIterator from its precomputed state, threadblock offset,
207
+ /// and thread ID
208
+ CUTLASS_HOST_DEVICE
209
+ PredicatedTileAccessIteratorResidualLast(
210
+ /// Precomputed parameters object
211
+ Params const& params,
212
+ /// Pointer to start of tensor
213
+ Pointer pointer,
214
+ /// Extent of tensor
215
+ TensorCoord extent,
216
+ /// ID of each participating thread
217
+ int thread_id,
218
+ /// Initial offset of threadblock
219
+ TensorCoord const& threadblock_offset,
220
+ /// Gather indices
221
+ int const* indices = nullptr)
222
+ : params_(params),
223
+ pointer_(reinterpret_cast<BytePointer>(const_cast<NonConstPointer>(pointer))),
224
+ the_predicates(extent),
225
+ indices_(indices)
226
+ {
227
+ the_predicates.set_predicates(thread_id, threadblock_offset);
228
+ the_predicates.get_mask(residual_tile_mask);
229
+
230
+ // Working around a weird compiler bug happening on P100 for the backward.
231
+ // I've seen together: the_predicates.predicates_[0] = 14 (instead of 15)
232
+ // residual_tile_mask[0] = 15 (correct)
233
+ //
234
+ // Adding prints when the value is calculated (in `compute_predicates_`)
235
+ // sometimes removes the bug. The consequence is that we skip some
236
+ // element of a tensor, leading to wrong results
237
+ // Setting `compute_predicates_`'s second argument (`is_steady_state`) to
238
+ // true also seems to get rid of the bug - at the cost of twice as many
239
+ // comparisons.
240
+ #if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700)
241
+ constexpr bool kWorkAroundCompilerBug = false;
242
+ #else
243
+ constexpr bool kWorkAroundCompilerBug = true;
244
+ #endif
245
+ the_predicates.compute_predicates_(extent, true && !kWorkAroundCompilerBug);
246
+
247
+ // update internal pointers
248
+ Layout layout(params_.stride_);
249
+
250
+ if (!Gather) {
251
+ add_pointer_offset(layout(the_predicates.thread_offset_));
252
+ } else {
253
+ gather_offset_strided = the_predicates.thread_offset_.strided();
254
+ add_pointer_offset(layout(make_Coord(the_predicates.thread_offset_.contiguous(), 0)));
255
+ }
256
+ }
257
+
258
+ /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
259
+ /// offset
260
+ CUTLASS_HOST_DEVICE
261
+ PredicatedTileAccessIteratorResidualLast(
262
+ /// Precomputed parameters object
263
+ Params const& params,
264
+ /// Pointer to start of tensor
265
+ Pointer pointer,
266
+ /// Extent of tensor
267
+ TensorCoord extent,
268
+ ///< ID of each participating thread
269
+ int thread_id)
270
+ : PredicatedTileAccessIteratorResidualLast(params,
271
+ pointer,
272
+ extent,
273
+ thread_id,
274
+ make_Coord(0, 0))
275
+ {
276
+ }
277
+
278
+ /// Overrides the internal iteration index
279
+ CUTLASS_HOST_DEVICE
280
+ void set_iteration_index(int index) { the_predicates.set_iteration_index(index); }
281
+
282
+ CUTLASS_HOST_DEVICE
283
+ void set_residual_tile(bool is_residual_tile)
284
+ {
285
+ if (is_residual_tile) { the_predicates.set_mask(residual_tile_mask); }
286
+ }
287
+
288
+ /// Adds a pointer offset in units of Element
289
+ CUTLASS_HOST_DEVICE
290
+ void add_pointer_offset(LongIndex pointer_offset)
291
+ {
292
+ pointer_ += sizeof_bits<Element>::value * pointer_offset / 8;
293
+ }
294
+
295
+ /// Advances an iterator along logical dimensions of matrix in units of whole
296
+ /// tiles
297
+ CUTLASS_DEVICE
298
+ void add_tile_offset(TensorCoord const& tile_offset)
299
+ {
300
+ if (!Gather) {
301
+ if (kAdvanceRank) {
302
+ pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided());
303
+ pointer_ += Shape::kContiguous * tile_offset.contiguous();
304
+ } else {
305
+ pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous());
306
+ pointer_ += Shape::kStrided * tile_offset.strided();
307
+ }
308
+ } else {
309
+ add_pointer_offset(Shape::kContiguous * tile_offset.contiguous());
310
+ gather_offset_strided += Shape::kStrided * tile_offset.strided();
311
+ }
312
+ }
313
+
314
+ /// Returns a pointer
315
+ CUTLASS_HOST_DEVICE
316
+ AccessType* get() const
317
+ {
318
+ if (Gather) {
319
+ assert(indices_);
320
+
321
+ if (!valid()) { return nullptr; }
322
+
323
+ LongIndex contiguous_offset =
324
+ the_predicates.iteration_contiguous_ *
325
+ (ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value / 8) +
326
+ the_predicates.iteration_vector_;
327
+ int strided_index = gather_offset_strided +
328
+ the_predicates.iteration_strided_ * ThreadMap::Delta::kStrided;
329
+
330
+ LongIndex strided_offset = indices_[strided_index] * LongIndex(params_.stride_) *
331
+ sizeof_bits<Element>::value / 8;
332
+
333
+ return reinterpret_cast<AccessType*>(pointer_ + contiguous_offset + strided_offset);
334
+ }
335
+
336
+ return reinterpret_cast<AccessType*>(
337
+ pointer_ + the_predicates.iteration_contiguous_ *
338
+ (ThreadMap::Delta::kContiguous * sizeof_bits<Element>::value) /
339
+ 8) +
340
+ the_predicates.iteration_vector_;
341
+ }
342
+
343
+ /// Increment and return an instance to self.
344
+ CUTLASS_HOST_DEVICE
345
+ PredicatedTileAccessIteratorResidualLast& operator++()
346
+ {
347
+ the_predicates.operator++();
348
+
349
+ ++the_predicates.iteration_vector_;
350
+ if (the_predicates.iteration_vector_ < kAccessesPerVector) { return *this; }
351
+
352
+ the_predicates.iteration_vector_ = 0;
353
+ ++the_predicates.iteration_contiguous_;
354
+
355
+ if (the_predicates.iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
356
+ return *this;
357
+ }
358
+
359
+ // Enter here only if (iteration_contiguous_ ==
360
+ // ThreadMap::Iteration::kContiguous)
361
+ the_predicates.iteration_contiguous_ = 0;
362
+ ++the_predicates.iteration_strided_;
363
+
364
+ if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) {
365
+ if (!Gather) { pointer_ += params_.inc_strided_; }
366
+
367
+ return *this;
368
+ }
369
+
370
+ // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
371
+ // which means we enter the next tile.
372
+ the_predicates.iteration_strided_ = 0;
373
+
374
+ if (!Gather) {
375
+ // advance to next tile
376
+ pointer_ += params_.inc_next_;
377
+
378
+ // now return to start tile - if the iterator is subsequently advanced,
379
+ // this subtraction as well as the subsequent integer addition are both
380
+ // elided by the compiler.
381
+ pointer_ -= params_.inc_advance_;
382
+ }
383
+
384
+ return *this;
385
+ }
386
+
387
+ /// Increment and return an instance to self.
388
+ CUTLASS_HOST_DEVICE
389
+ PredicatedTileAccessIteratorResidualLast operator++(int)
390
+ {
391
+ PredicatedTileAccessIteratorResidualLast self(*this);
392
+ operator++();
393
+ return self;
394
+ }
395
+
396
+ /// Clears the predicate set efficiently
397
+ CUTLASS_HOST_DEVICE
398
+ void clear_mask(bool enable = true) { the_predicates.clear_mask(enable); }
399
+
400
+ /// Clears the predicate set efficiently
401
+ CUTLASS_HOST_DEVICE
402
+ void enable_mask() { the_predicates.enable_mask(); }
403
+
404
+ /// Sets the predicate mask, overriding value stored in predicate iterator
405
+ CUTLASS_HOST_DEVICE
406
+ void set_mask(Mask const& mask) { the_predicates.set_mask(mask); }
407
+
408
+ /// Gets the mask
409
+ CUTLASS_HOST_DEVICE
410
+ void get_mask(Mask& mask) { the_predicates.get_mask(mask); }
411
+
412
+ /// Returns whether access is valid or not
413
+ CUTLASS_HOST_DEVICE
414
+ bool valid() const { return the_predicates.valid(); }
415
+ };
416
+
417
+ ////////////////////////////////////////////////////////////////////////////////
418
+
419
+ /// Specialization of PredicatedTileAccessIteratorResidualLast for column-major
420
+ /// data.
421
+ ///
422
+ /// Satisfies: ForwardTileIteratorConcept |
423
+ /// ReadableContiguousTileIteratorConcept |
424
+ /// WriteableContiguousTileIteratorConcept |
425
+ /// MaskedTileIteratorConcept
426
+ ///
427
+ template <typename Shape_,
428
+ typename Element_,
429
+ int AdvanceRank,
430
+ typename ThreadMap_,
431
+ typename AccessType_,
432
+ bool Gather>
433
+ class PredicatedTileAccessIteratorResidualLast<Shape_,
434
+ Element_,
435
+ layout::ColumnMajor,
436
+ AdvanceRank,
437
+ ThreadMap_,
438
+ AccessType_,
439
+ Gather> {
440
+ public:
441
+ static_assert(AdvanceRank == 0 || AdvanceRank == 1,
442
+ "Specialization for pitch-linear iterator may along advance along the "
443
+ "contiguous(rank=0) or strided(rank=1) dimension.");
444
+
445
+ using Shape = Shape_;
446
+ using Element = Element_;
447
+ using Layout = layout::ColumnMajor;
448
+ static int const kAdvanceRank = AdvanceRank;
449
+ using ThreadMap = ThreadMap_;
450
+ using AccessType = AccessType_;
451
+
452
+ using Index = typename Layout::Index;
453
+ using LongIndex = typename Layout::LongIndex;
454
+
455
+ using TensorRef = TensorRef<Element, Layout>;
456
+ using TensorView = TensorView<Element, Layout>;
457
+ using TensorCoord = typename Layout::TensorCoord;
458
+
459
+ using Pointer = Element*;
460
+ using NonConstPointer = typename platform::remove_const<Element>::type*;
461
+
462
+ using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
463
+ layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
464
+ Element,
465
+ layout::PitchLinear,
466
+ (kAdvanceRank == 0 ? 0 : 1),
467
+ ThreadMap,
468
+ AccessType,
469
+ Gather>;
470
+
471
+ /// Predicate vector stores mask to guard accesses
472
+ using Mask = typename UnderlyingIterator::Mask;
473
+
474
+ static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
475
+
476
+ /// Parameters object is precomputed state and is host-constructible
477
+ class Params {
478
+ private:
479
+ friend PredicatedTileAccessIteratorResidualLast;
480
+
481
+ /// Parameters object
482
+ typename UnderlyingIterator::Params params_;
483
+
484
+ public:
485
+ /// Default ctor
486
+ CUTLASS_HOST_DEVICE
487
+ Params() {}
488
+
489
+ /// Construct the Params object given a pitch-linear tensor's layout
490
+ CUTLASS_HOST_DEVICE
491
+ Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))){};
492
+
493
+ /// Construct the Params object given a pitch-linear tensor's layout
494
+ CUTLASS_HOST_DEVICE
495
+ Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {}
496
+ };
497
+
498
+ private:
499
+ //
500
+ // Data members
501
+ //
502
+
503
+ /// Underlying pitch-linear tile iterator
504
+ UnderlyingIterator iterator_;
505
+
506
+ public:
507
+ /// Constructs a TileIterator from its precomputed state, threadblock offset,
508
+ /// and thread ID
509
+ CUTLASS_HOST_DEVICE
510
+ PredicatedTileAccessIteratorResidualLast(
511
+ ///< Precomputed parameters object
512
+ Params const& params,
513
+ ///< Pointer to start of tensor
514
+ Pointer pointer,
515
+ ///< Extent of tensor
516
+ TensorCoord extent,
517
+ ///< ID of each participating thread
518
+ int thread_id,
519
+ ///< Initial offset of threadblock
520
+ TensorCoord const& threadblock_offset,
521
+ int const* indices = nullptr ///< gather/scatter indices, note no support for
522
+ ///< gather/scatter at this specialization
523
+ )
524
+ : iterator_(params.params_,
525
+ pointer,
526
+ layout::PitchLinearCoord(extent.row(), extent.column()),
527
+ thread_id,
528
+ layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column()),
529
+ indices)
530
+ {
531
+ }
532
+
533
+ /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
534
+ /// offset
535
+ CUTLASS_HOST_DEVICE
536
+ PredicatedTileAccessIteratorResidualLast(
537
+ Params const& params, ///< Precomputed parameters object
538
+ Pointer pointer, ///< Pointer to start of tensor
539
+ TensorCoord extent, ///< Extent of tensor
540
+ int thread_id ///< ID of each participating thread
541
+ )
542
+ : PredicatedTileAccessIteratorResidualLast(params,
543
+ pointer,
544
+ extent,
545
+ thread_id,
546
+ make_Coord(0, 0))
547
+ {
548
+ }
549
+
550
+ /// Overrides the internal iteration index
551
+ CUTLASS_HOST_DEVICE
552
+ void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
553
+
554
+ CUTLASS_HOST_DEVICE
555
+ void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
556
+
557
+ /// Adds a pointer offset in units of Element
558
+ CUTLASS_HOST_DEVICE
559
+ void add_pointer_offset(LongIndex pointer_offset)
560
+ {
561
+ iterator_.add_pointer_offset(pointer_offset);
562
+ }
563
+
564
+ /// Advances an iterator along logical dimensions of matrix in units of whole
565
+ /// tiles
566
+ CUTLASS_HOST_DEVICE
567
+ void add_tile_offset(TensorCoord const& tile_offset)
568
+ {
569
+ iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
570
+ }
571
+
572
+ /// Returns a pointer
573
+ CUTLASS_HOST_DEVICE
574
+ AccessType* get() const { return reinterpret_cast<AccessType*>(iterator_.get()); }
575
+
576
+ /// Advances to the next tile in memory.
577
+ ///
578
+ /// The first time this method is called, predicates are updated, and the
579
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
580
+ /// Subsequent calls are lightweight and must only update the internal
581
+ /// pointer.
582
+ CUTLASS_HOST_DEVICE
583
+ PredicatedTileAccessIteratorResidualLast& operator++()
584
+ {
585
+ ++iterator_;
586
+ return *this;
587
+ }
588
+
589
+ /// Advances to the next tile in memory.
590
+ ///
591
+ /// The first time this method is called, predicates are updated, and the
592
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
593
+ /// Subsequent calls are lightweight and must only update the internal
594
+ /// pointer.
595
+ CUTLASS_HOST_DEVICE
596
+ PredicatedTileAccessIteratorResidualLast operator++(int)
597
+ {
598
+ PredicatedTileAccessIteratorResidualLast self(*this);
599
+ operator++();
600
+ return self;
601
+ }
602
+
603
+ /// Clears the predicate set efficiently
604
+ CUTLASS_HOST_DEVICE
605
+ void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
606
+
607
+ /// Clears the predicate set efficiently
608
+ CUTLASS_HOST_DEVICE
609
+ void enable_mask() { iterator_.enable_mask(); }
610
+
611
+ /// Sets the predicate mask, overriding value stored in predicate iterator
612
+ CUTLASS_HOST_DEVICE
613
+ void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
614
+
615
+ /// Gets the mask
616
+ CUTLASS_HOST_DEVICE
617
+ void get_mask(Mask& mask) { iterator_.get_mask(mask); }
618
+
619
+ /// Returns whether access is valid or not
620
+ CUTLASS_HOST_DEVICE
621
+ bool valid() { return iterator_.valid(); }
622
+ };
623
+
624
+ ////////////////////////////////////////////////////////////////////////////////
625
+
626
+ /// Specialization of PredicatedTileAccessIteratorResidualLast for row-major
627
+ /// data.
628
+ ///
629
+ /// Satisfies: ForwardTileIteratorConcept |
630
+ /// ReadableContiguousTileIteratorConcept |
631
+ /// WriteableContiguousTileIteratorConcept |
632
+ /// MaskedTileIteratorConcept
633
+ ///
634
+ template <typename Shape_,
635
+ typename Element_,
636
+ int AdvanceRank,
637
+ typename ThreadMap_,
638
+ typename AccessType_,
639
+ bool Gather>
640
+ class PredicatedTileAccessIteratorResidualLast<Shape_,
641
+ Element_,
642
+ layout::RowMajor,
643
+ AdvanceRank,
644
+ ThreadMap_,
645
+ AccessType_,
646
+ Gather> {
647
+ public:
648
+ static_assert(AdvanceRank == 0 || AdvanceRank == 1,
649
+ "Specialization for pitch-linear iterator may along advance along the "
650
+ "contiguous(rank=0) or strided(rank=1) dimension.");
651
+
652
+ using Shape = Shape_;
653
+ using Element = Element_;
654
+ using Layout = layout::RowMajor;
655
+ static int const kAdvanceRank = AdvanceRank;
656
+ using ThreadMap = ThreadMap_;
657
+ using AccessType = AccessType_;
658
+
659
+ using Index = typename Layout::Index;
660
+ using LongIndex = typename Layout::LongIndex;
661
+
662
+ using TensorRef = TensorRef<Element, Layout>;
663
+ using TensorView = TensorView<Element, Layout>;
664
+ using TensorCoord = typename Layout::TensorCoord;
665
+
666
+ using Pointer = Element*;
667
+ using NonConstPointer = typename platform::remove_const<Element>::type*;
668
+
669
+ using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
670
+ layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
671
+ Element,
672
+ layout::PitchLinear,
673
+ (kAdvanceRank == 0 ? 1 : 0),
674
+ ThreadMap,
675
+ AccessType,
676
+ Gather>;
677
+
678
+ static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
679
+
680
+ /// Predicate vector stores mask to guard accesses
681
+ using Mask = typename UnderlyingIterator::Mask;
682
+
683
+ /// Parameters object is precomputed state and is host-constructible
684
+ class Params {
685
+ private:
686
+ friend PredicatedTileAccessIteratorResidualLast;
687
+
688
+ /// Parameters object
689
+ typename UnderlyingIterator::Params params_;
690
+
691
+ public:
692
+ /// Default ctor
693
+ CUTLASS_HOST_DEVICE
694
+ Params() {}
695
+
696
+ /// Construct the Params object given a pitch-linear tensor's layout
697
+ CUTLASS_HOST_DEVICE
698
+ Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))){};
699
+
700
+ /// Construct the Params object given a pitch-linear tensor's layout
701
+ CUTLASS_HOST_DEVICE
702
+ Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {}
703
+ };
704
+
705
+ private:
706
+ //
707
+ // Data members
708
+ //
709
+
710
+ /// Underlying pitch-linear tile iterator
711
+ UnderlyingIterator iterator_;
712
+
713
+ public:
714
+ /// Constructs a TileIterator from its precomputed state, threadblock offset,
715
+ /// and thread ID
716
+ CUTLASS_HOST_DEVICE
717
+ PredicatedTileAccessIteratorResidualLast(
718
+ ///< Precomputed parameters object
719
+ Params const& params,
720
+ ///< Pointer to start of tensor
721
+ Pointer pointer,
722
+ ///< Extent of tensor
723
+ TensorCoord extent,
724
+ ///< ID of each participating thread
725
+ int thread_id,
726
+ ///< Initial offset of threadblock
727
+ TensorCoord const& threadblock_offset,
728
+ /// Gather indices
729
+ int const* indices = nullptr)
730
+ : iterator_(params.params_,
731
+ pointer,
732
+ layout::PitchLinearCoord(extent.column(), extent.row()),
733
+ thread_id,
734
+ layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row()),
735
+ indices)
736
+ {
737
+ }
738
+
739
+ /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
740
+ /// offset
741
+ CUTLASS_HOST_DEVICE
742
+ PredicatedTileAccessIteratorResidualLast(
743
+ Params const& params, ///< Precomputed parameters object
744
+ Pointer pointer, ///< Pointer to start of tensor
745
+ TensorCoord extent, ///< Extent of tensor
746
+ int thread_id ///< ID of each participating thread
747
+ )
748
+ : PredicatedTileAccessIteratorResidualLast(params,
749
+ pointer,
750
+ extent,
751
+ thread_id,
752
+ make_Coord(0, 0))
753
+ {
754
+ }
755
+
756
+ /// Overrides the internal iteration index
757
+ CUTLASS_HOST_DEVICE
758
+ void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
759
+
760
+ CUTLASS_HOST_DEVICE
761
+ void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
762
+
763
+ /// Adds a pointer offset in units of Element
764
+ CUTLASS_HOST_DEVICE
765
+ void add_pointer_offset(LongIndex pointer_offset)
766
+ {
767
+ iterator_.add_pointer_offset(pointer_offset);
768
+ }
769
+
770
+ /// Advances an iterator along logical dimensions of matrix in units of whole
771
+ /// tiles
772
+ CUTLASS_HOST_DEVICE
773
+ void add_tile_offset(TensorCoord const& tile_offset)
774
+ {
775
+ iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
776
+ }
777
+
778
+ /// Returns a pointer
779
+ CUTLASS_HOST_DEVICE
780
+ AccessType* get() const { return reinterpret_cast<AccessType*>(iterator_.get()); }
781
+
782
+ /// Advances to the next tile in memory.
783
+ ///
784
+ /// The first time this method is called, predicates are updated, and the
785
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
786
+ /// Subsequent calls are lightweight and must only update the internal
787
+ /// pointer.
788
+ CUTLASS_HOST_DEVICE
789
+ PredicatedTileAccessIteratorResidualLast& operator++()
790
+ {
791
+ ++iterator_;
792
+ return *this;
793
+ }
794
+
795
+ /// Advances to the next tile in memory.
796
+ ///
797
+ /// The first time this method is called, predicates are updated, and the
798
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
799
+ /// Subsequent calls are lightweight and must only update the internal
800
+ /// pointer.
801
+ CUTLASS_HOST_DEVICE
802
+ PredicatedTileAccessIteratorResidualLast operator++(int)
803
+ {
804
+ PredicatedTileAccessIteratorResidualLast self(*this);
805
+ operator++();
806
+ return self;
807
+ }
808
+
809
+ /// Clears the predicate set efficiently
810
+ CUTLASS_HOST_DEVICE
811
+ void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
812
+
813
+ /// Clears the predicate set efficiently
814
+ CUTLASS_HOST_DEVICE
815
+ void enable_mask() { iterator_.enable_mask(); }
816
+
817
+ /// Sets the predicate mask, overriding value stored in predicate iterator
818
+ CUTLASS_HOST_DEVICE
819
+ void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
820
+
821
+ /// Gets the mask
822
+ CUTLASS_HOST_DEVICE
823
+ void get_mask(Mask& mask) { iterator_.get_mask(mask); }
824
+
825
+ /// Returns whether access is valid or not
826
+ CUTLASS_HOST_DEVICE
827
+ bool valid() { return iterator_.valid(); }
828
+ };
829
+
830
+ ////////////////////////////////////////////////////////////////////////////////
831
+
832
+ /// Specialization of PredicatedTileAccessIteratorResidualLast for affine rank 2
833
+ /// data.
834
+ ///
835
+ /// Satisfies: ForwardTileIteratorConcept |
836
+ /// ReadableContiguousTileIteratorConcept |
837
+ /// WriteableContiguousTileIteratorConcept |
838
+ /// MaskedTileIteratorConcept
839
+ ///
840
+ template <typename Shape_,
841
+ typename Element_,
842
+ int AdvanceRank,
843
+ typename ThreadMap_,
844
+ typename AccessType_>
845
+ class PredicatedTileAccessIteratorResidualLast<Shape_,
846
+ Element_,
847
+ layout::AffineRankN<2>,
848
+ AdvanceRank,
849
+ ThreadMap_,
850
+ AccessType_,
851
+ false> {
852
+ public:
853
+ static_assert(AdvanceRank == 0 || AdvanceRank == 1,
854
+ "Specialization for pitch-linear iterator may along advance along the "
855
+ "contiguous(rank=0) or strided(rank=1) dimension.");
856
+
857
+ using Shape = Shape_;
858
+ using Element = Element_;
859
+ using Layout = layout::AffineRankN<2>;
860
+ static int const kAdvanceRank = AdvanceRank;
861
+ using ThreadMap = ThreadMap_;
862
+ using AccessType = AccessType_;
863
+
864
+ using Index = typename Layout::Index;
865
+ using LongIndex = typename Layout::LongIndex;
866
+
867
+ using TensorRef = TensorRef<Element, Layout>;
868
+ using TensorView = TensorView<Element, Layout>;
869
+ using TensorCoord = typename Layout::TensorCoord;
870
+
871
+ using Pointer = Element*;
872
+ using NonConstPointer = typename platform::remove_const<Element>::type*;
873
+
874
+ using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates<Shape,
875
+ Element,
876
+ layout::PitchLinear,
877
+ AdvanceRank,
878
+ ThreadMap,
879
+ AccessType>;
880
+
881
+ static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements;
882
+
883
+ static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements),
884
+ "Vectors implied by the thread map must be divisible by the access type.");
885
+
886
+ /// Predicate vector stores mask to guard accesses
887
+ using Mask = typename UnderlyingPredicates::Mask;
888
+
889
+ /// Parameters object is precomputed state and is host-constructible
890
+ class Params {
891
+ public:
892
+ friend PredicatedTileAccessIteratorResidualLast;
893
+
894
+ private:
895
+ /// stride of pitch-linear layout (units of Element)
896
+ Coord<Layout::kStrideRank, Layout::LongIndex> stride_;
897
+ /// amount (in byte) to increment pointer to move to next access along
898
+ /// contiguous dimension
899
+ LongIndex inc_contiguous_;
900
+ /// amount (in byte) to increment pointer from first access of current
901
+ /// contiguous dimension to first access of next one.
902
+ LongIndex inc_strided_;
903
+ /// amount (in byte) to increment pointer from last access of current
904
+ /// contiguous dimension to first access of next one.
905
+ LongIndex inc_next_strided_;
906
+ /// amount (in byte) to increment pointer from last access to first access
907
+ /// of next tile
908
+ LongIndex inc_next_;
909
+ /// amount (in byte) to increment pointer from first access of current tile
910
+ /// to first access of next tile
911
+ LongIndex inc_advance_;
912
+
913
+ public:
914
+ // Default ctor
915
+ CUTLASS_HOST_DEVICE
916
+ Params() : stride_(0), inc_contiguous_(0), inc_strided_(0), inc_next_(0), inc_advance_(0) {}
917
+
918
+ /// Construct the Params object given a pitch-linear tensor's layout
919
+ CUTLASS_HOST_DEVICE
920
+ Params(Layout const& layout) : stride_({layout.stride(0), layout.stride(1)})
921
+ {
922
+ inc_contiguous_ = (LongIndex(stride_[0]) * ThreadMap::Delta::kContiguous) *
923
+ sizeof_bits<Element>::value / 8;
924
+
925
+ inc_strided_ = (LongIndex(stride_[1]) * ThreadMap::Delta::kStrided) *
926
+ sizeof_bits<Element>::value / 8;
927
+
928
+ inc_next_strided_ =
929
+ inc_strided_ - LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_;
930
+
931
+ if (kAdvanceRank) {
932
+ // advance along strided dimension
933
+ inc_advance_ =
934
+ Shape::kStrided * LongIndex(stride_[1]) * sizeof_bits<Element>::value / 8;
935
+ } else {
936
+ // advance along contiguous dimension
937
+ inc_advance_ = Shape::kContiguous * stride_[0] * sizeof_bits<Element>::value / 8;
938
+ }
939
+
940
+ inc_next_ = inc_advance_ -
941
+ LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_ -
942
+ LongIndex(ThreadMap::Iterations::kStrided - 1) * inc_strided_;
943
+ };
944
+ };
945
+
946
+ private:
947
+ /// Internal pointer type permits fast address arithmetic
948
+ using BytePointer = char*;
949
+
950
+ //
951
+ // Data members
952
+ //
953
+
954
+ /// Parameters object with precomputed internal state
955
+ Params params_;
956
+
957
+ /// Internal pointer to first access of tile
958
+ BytePointer pointer_;
959
+
960
+ UnderlyingPredicates the_predicates;
961
+ Mask residual_tile_mask;
962
+
963
+ private:
964
+ /// Computes predicates based on internally tracked per-thread offset.
965
+ CUTLASS_DEVICE
966
+ void compute_predicates_(
967
+ /// Extent of the matrix window
968
+ TensorCoord extent,
969
+ /// optionally, simplify predicate calculation during 'steady state' phase
970
+ bool is_steady_state = false)
971
+ {
972
+ the_predicates.compute_predicates_(extent, is_steady_state);
973
+ }
974
+
975
+ public:
976
+ /// Constructs a TileIterator from its precomputed state, threadblock offset,
977
+ /// and thread ID
978
+ CUTLASS_HOST_DEVICE
979
+ PredicatedTileAccessIteratorResidualLast(
980
+ ///< Precomputed parameters object
981
+ Params const& params,
982
+ ///< Pointer to start of tensor
983
+ Pointer pointer,
984
+ ///< Extent of tensor
985
+ TensorCoord extent,
986
+ ///< ID of each participating thread
987
+ int thread_id,
988
+ ///< Initial offset of threadblock
989
+ TensorCoord const& threadblock_offset,
990
+ int const* indices = nullptr ///< gather/scatter indices, note no support for
991
+ ///< gather/scatter at this specialization
992
+ )
993
+ : params_(params),
994
+ pointer_(reinterpret_cast<BytePointer>(const_cast<NonConstPointer>(pointer))),
995
+ the_predicates(extent)
996
+ {
997
+ the_predicates.set_predicates(thread_id, threadblock_offset);
998
+
999
+ // update internal pointers
1000
+ Layout layout(params_.stride_);
1001
+ add_pointer_offset(layout(the_predicates.thread_offset_));
1002
+ }
1003
+
1004
+ /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
1005
+ /// offset
1006
+ CUTLASS_HOST_DEVICE
1007
+ PredicatedTileAccessIteratorResidualLast(
1008
+ Params const& params, ///< Precomputed parameters object
1009
+ Pointer pointer, ///< Pointer to start of tensor
1010
+ TensorCoord extent, ///< Extent of tensor
1011
+ int thread_id ///< ID of each participating thread
1012
+ )
1013
+ : PredicatedTileAccessIteratorResidualLast(params,
1014
+ pointer,
1015
+ extent,
1016
+ thread_id,
1017
+ make_Coord(0, 0))
1018
+ {
1019
+ }
1020
+
1021
+ /// Overrides the internal iteration index
1022
+ CUTLASS_HOST_DEVICE
1023
+ void set_iteration_index(int index) { the_predicates.set_iteration_index(index); }
1024
+
1025
+ CUTLASS_HOST_DEVICE
1026
+ void set_residual_tile(bool is_residual_tile)
1027
+ {
1028
+ if (is_residual_tile) { the_predicates.set_mask(residual_tile_mask); }
1029
+ }
1030
+
1031
+ /// Adds a pointer offset in units of Element
1032
+ CUTLASS_HOST_DEVICE
1033
+ void add_pointer_offset(LongIndex pointer_offset)
1034
+ {
1035
+ pointer_ += sizeof_bits<Element>::value * pointer_offset / 8;
1036
+ }
1037
+
1038
+ /// Advances an iterator along logical dimensions of matrix in units of whole
1039
+ /// tiles
1040
+ CUTLASS_HOST_DEVICE
1041
+ void add_tile_offset(TensorCoord const& tile_offset)
1042
+ {
1043
+ if (kAdvanceRank) {
1044
+ pointer_ += params_.inc_advance_ * LongIndex(tile_offset[1]);
1045
+ pointer_ += Shape::kContiguous * tile_offset[0];
1046
+ } else {
1047
+ pointer_ += params_.inc_advance_ * LongIndex(tile_offset[0]);
1048
+ pointer_ += Shape::kStrided * tile_offset[1];
1049
+ }
1050
+ }
1051
+
1052
+ /// Returns a pointer
1053
+ CUTLASS_HOST_DEVICE
1054
+ AccessType* get() const
1055
+ {
1056
+ return reinterpret_cast<AccessType*>(pointer_) + the_predicates.iteration_vector_;
1057
+ }
1058
+
1059
+ /// Advances to the next tile in memory.
1060
+ ///
1061
+ /// The first time this method is called, predicates are updated, and the
1062
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
1063
+ /// Subsequent calls are lightweight and must only update the internal
1064
+ /// pointer.
1065
+ CUTLASS_HOST_DEVICE
1066
+ PredicatedTileAccessIteratorResidualLast& operator++()
1067
+ {
1068
+ the_predicates.operator++();
1069
+ ++the_predicates.iteration_vector_;
1070
+ if (the_predicates.iteration_vector_ < kAccessesPerVector) { return *this; }
1071
+
1072
+ the_predicates.iteration_vector_ = 0;
1073
+ ++the_predicates.iteration_contiguous_;
1074
+
1075
+ if (the_predicates.iteration_contiguous_ < ThreadMap::Iterations::kContiguous) {
1076
+ pointer_ += params_.inc_contiguous_;
1077
+ return *this;
1078
+ }
1079
+
1080
+ // Enter here only if (iteration_contiguous_ ==
1081
+ // ThreadMap::Iteration::kContiguous)
1082
+ the_predicates.iteration_contiguous_ = 0;
1083
+ ++the_predicates.iteration_strided_;
1084
+
1085
+ if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) {
1086
+ pointer_ += params_.inc_next_strided_;
1087
+ return *this;
1088
+ }
1089
+
1090
+ // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided)
1091
+ // which means we enter the next tile.
1092
+ the_predicates.iteration_strided_ = 0;
1093
+
1094
+ // advance to next tile
1095
+ pointer_ += params_.inc_next_;
1096
+
1097
+ // now return to start tile - if the iterator is subsequently advanced, this
1098
+ // subtraction as well as the subsequent integer addition are both elided by
1099
+ // the compiler.
1100
+ pointer_ -= params_.inc_advance_;
1101
+
1102
+ return *this;
1103
+ }
1104
+
1105
+ /// Advances to the next tile in memory.
1106
+ ///
1107
+ /// The first time this method is called, predicates are updated, and the
1108
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
1109
+ /// Subsequent calls are lightweight and must only update the internal
1110
+ /// pointer.
1111
+ CUTLASS_HOST_DEVICE
1112
+ PredicatedTileAccessIteratorResidualLast operator++(int)
1113
+ {
1114
+ PredicatedTileAccessIteratorResidualLast self(*this);
1115
+ operator++();
1116
+ return self;
1117
+ }
1118
+
1119
+ /// Clears the predicate set efficiently
1120
+ CUTLASS_HOST_DEVICE
1121
+ void clear_mask(bool enable = true) { the_predicates.clear_mask(enable); }
1122
+
1123
+ /// Clears the predicate set efficiently
1124
+ CUTLASS_HOST_DEVICE
1125
+ void enable_mask() { the_predicates.enable_mask(); }
1126
+
1127
+ /// Sets the predicate mask, overriding value stored in predicate iterator
1128
+ CUTLASS_HOST_DEVICE
1129
+ void set_mask(Mask const& mask) { the_predicates.set_mask(mask); }
1130
+
1131
+ /// Gets the mask
1132
+ CUTLASS_HOST_DEVICE
1133
+ void get_mask(Mask& mask) { the_predicates.get_mask(mask); }
1134
+
1135
+ /// Returns whether access is valid or not
1136
+ CUTLASS_HOST_DEVICE
1137
+ bool valid() { return the_predicates.valid(); }
1138
+ };
1139
+
1140
+ ////////////////////////////////////////////////////////////////////////////////
1141
+
1142
+ /// Specialization of PredicatedTileAccessIteratorResidualLast for affine rank 2
1143
+ /// column-major data.
1144
+ ///
1145
+ /// Satisfies: ForwardTileIteratorConcept |
1146
+ /// ReadableContiguousTileIteratorConcept |
1147
+ /// WriteableContiguousTileIteratorConcept |
1148
+ /// MaskedTileIteratorConcept
1149
+ ///
1150
+ template <typename Shape_,
1151
+ typename Element_,
1152
+ int AdvanceRank,
1153
+ typename ThreadMap_,
1154
+ typename AccessType_>
1155
+ class PredicatedTileAccessIteratorResidualLast<Shape_,
1156
+ Element_,
1157
+ layout::AffineRank2ColumnMajor,
1158
+ AdvanceRank,
1159
+ ThreadMap_,
1160
+ AccessType_,
1161
+ false> {
1162
+ public:
1163
+ static_assert(AdvanceRank == 0 || AdvanceRank == 1,
1164
+ "Specialization for pitch-linear iterator may along advance along the "
1165
+ "contiguous(rank=0) or strided(rank=1) dimension.");
1166
+
1167
+ using Shape = Shape_;
1168
+ using Element = Element_;
1169
+ using Layout = layout::AffineRank2ColumnMajor;
1170
+ static int const kAdvanceRank = AdvanceRank;
1171
+ using ThreadMap = ThreadMap_;
1172
+ using AccessType = AccessType_;
1173
+
1174
+ using Index = typename Layout::Index;
1175
+ using LongIndex = typename Layout::LongIndex;
1176
+
1177
+ using TensorRef = TensorRef<Element, Layout>;
1178
+ using TensorView = TensorView<Element, Layout>;
1179
+ using TensorCoord = typename Layout::TensorCoord;
1180
+
1181
+ using Pointer = Element*;
1182
+ using NonConstPointer = typename platform::remove_const<Element>::type*;
1183
+
1184
+ // Map to the underlying AffineRankN<2> layout
1185
+ using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
1186
+ layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
1187
+ Element,
1188
+ layout::AffineRankN<2>,
1189
+ (kAdvanceRank == 0 ? 0 : 1),
1190
+ ThreadMap,
1191
+ AccessType>;
1192
+
1193
+ static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
1194
+
1195
+ /// Predicate vector stores mask to guard accesses
1196
+ using Mask = typename UnderlyingIterator::Mask;
1197
+
1198
+ /// Parameters object is precomputed state and is host-constructible
1199
+ class Params {
1200
+ private:
1201
+ friend PredicatedTileAccessIteratorResidualLast;
1202
+
1203
+ /// Parameters object
1204
+ typename UnderlyingIterator::Params params_;
1205
+
1206
+ public:
1207
+ /// Default ctor
1208
+ CUTLASS_HOST_DEVICE
1209
+ Params() {}
1210
+
1211
+ /// Construct the Params object given an AffineRankN<2> tensor's layout
1212
+ CUTLASS_HOST_DEVICE
1213
+ Params(Layout const& layout)
1214
+ : params_(layout::AffineRankN<2>(layout.stride(0), layout.stride(1))){};
1215
+ };
1216
+
1217
+ private:
1218
+ //
1219
+ // Data members
1220
+ //
1221
+
1222
+ /// Underlying AffineRankN<2> tile iterator
1223
+ UnderlyingIterator iterator_;
1224
+
1225
+ public:
1226
+ /// Constructs a TileIterator from its precomputed state, threadblock offset,
1227
+ /// and thread ID
1228
+ CUTLASS_HOST_DEVICE
1229
+ PredicatedTileAccessIteratorResidualLast(
1230
+ ///< Precomputed parameters object
1231
+ Params const& params,
1232
+ ///< Pointer to start of tensor
1233
+ Pointer pointer,
1234
+ ///< Extent of tensor
1235
+ TensorCoord extent,
1236
+ ///< ID of each participating thread
1237
+ int thread_id,
1238
+ ///< Initial offset of threadblock
1239
+ TensorCoord const& threadblock_offset,
1240
+ int const* indices = nullptr ///< gather/scatter indices, note no support for
1241
+ ///< gather/scatter at this specialization
1242
+ )
1243
+ : iterator_(params.params_,
1244
+ pointer,
1245
+ layout::PitchLinearCoord(extent.row(), extent.column()),
1246
+ thread_id,
1247
+ layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column()))
1248
+ {
1249
+ }
1250
+
1251
+ /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
1252
+ /// offset
1253
+ CUTLASS_HOST_DEVICE
1254
+ PredicatedTileAccessIteratorResidualLast(
1255
+ Params const& params, ///< Precomputed parameters object
1256
+ Pointer pointer, ///< Pointer to start of tensor
1257
+ TensorCoord extent, ///< Extent of tensor
1258
+ int thread_id ///< ID of each participating thread
1259
+ )
1260
+ : PredicatedTileAccessIteratorResidualLast(params,
1261
+ pointer,
1262
+ extent,
1263
+ thread_id,
1264
+ make_Coord(0, 0))
1265
+ {
1266
+ }
1267
+
1268
+ /// Overrides the internal iteration index
1269
+ CUTLASS_HOST_DEVICE
1270
+ void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
1271
+
1272
+ CUTLASS_HOST_DEVICE
1273
+ void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
1274
+
1275
+ /// Adds a pointer offset in units of Element
1276
+ CUTLASS_HOST_DEVICE
1277
+ void add_pointer_offset(LongIndex pointer_offset)
1278
+ {
1279
+ iterator_.add_pointer_offset(pointer_offset);
1280
+ }
1281
+
1282
+ /// Advances an iterator along logical dimensions of matrix in units of whole
1283
+ /// tiles
1284
+ CUTLASS_HOST_DEVICE
1285
+ void add_tile_offset(TensorCoord const& tile_offset)
1286
+ {
1287
+ iterator_.add_tile_offset(make_Coord(tile_offset.row(), tile_offset.column()));
1288
+ }
1289
+
1290
+ /// Returns a pointer
1291
+ CUTLASS_HOST_DEVICE
1292
+ AccessType* get() const { return reinterpret_cast<AccessType*>(iterator_.get()); }
1293
+
1294
+ /// Advances to the next tile in memory.
1295
+ ///
1296
+ /// The first time this method is called, predicates are updated, and the
1297
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
1298
+ /// Subsequent calls are lightweight and must only update the internal
1299
+ /// pointer.
1300
+ CUTLASS_HOST_DEVICE
1301
+ PredicatedTileAccessIteratorResidualLast& operator++()
1302
+ {
1303
+ ++iterator_;
1304
+ return *this;
1305
+ }
1306
+
1307
+ /// Advances to the next tile in memory.
1308
+ ///
1309
+ /// The first time this method is called, predicates are updated, and the
1310
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
1311
+ /// Subsequent calls are lightweight and must only update the internal
1312
+ /// pointer.
1313
+ CUTLASS_HOST_DEVICE
1314
+ PredicatedTileAccessIteratorResidualLast operator++(int)
1315
+ {
1316
+ PredicatedTileAccessIteratorResidualLast self(*this);
1317
+ operator++();
1318
+ return self;
1319
+ }
1320
+
1321
+ /// Clears the predicate set efficiently
1322
+ CUTLASS_HOST_DEVICE
1323
+ void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
1324
+
1325
+ /// Clears the predicate set efficiently
1326
+ CUTLASS_HOST_DEVICE
1327
+ void enable_mask() { iterator_.enable_mask(); }
1328
+
1329
+ /// Sets the predicate mask, overriding value stored in predicate iterator
1330
+ CUTLASS_HOST_DEVICE
1331
+ void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
1332
+
1333
+ /// Gets the mask
1334
+ CUTLASS_HOST_DEVICE
1335
+ void get_mask(Mask& mask) { iterator_.get_mask(mask); }
1336
+
1337
+ /// Returns whether access is valid or not
1338
+ CUTLASS_HOST_DEVICE
1339
+ bool valid() { return iterator_.valid(); }
1340
+ };
1341
+
1342
+ ////////////////////////////////////////////////////////////////////////////////
1343
+
1344
+ /// Specialization of PredicatedTileAccessIteratorResidualLast for affine rank-2
1345
+ /// row-major data.
1346
+ ///
1347
+ /// Satisfies: ForwardTileIteratorConcept |
1348
+ /// ReadableContiguousTileIteratorConcept |
1349
+ /// WriteableContiguousTileIteratorConcept |
1350
+ /// MaskedTileIteratorConcept
1351
+ ///
1352
+ template <typename Shape_,
1353
+ typename Element_,
1354
+ int AdvanceRank,
1355
+ typename ThreadMap_,
1356
+ typename AccessType_>
1357
+ class PredicatedTileAccessIteratorResidualLast<Shape_,
1358
+ Element_,
1359
+ layout::AffineRank2RowMajor,
1360
+ AdvanceRank,
1361
+ ThreadMap_,
1362
+ AccessType_,
1363
+ false> {
1364
+ public:
1365
+ static_assert(AdvanceRank == 0 || AdvanceRank == 1,
1366
+ "Specialization for pitch-linear iterator may along advance along the "
1367
+ "contiguous(rank=0) or strided(rank=1) dimension.");
1368
+
1369
+ using Shape = Shape_;
1370
+ using Element = Element_;
1371
+ using Layout = layout::AffineRank2RowMajor;
1372
+ static int const kAdvanceRank = AdvanceRank;
1373
+ using ThreadMap = ThreadMap_;
1374
+ using AccessType = AccessType_;
1375
+
1376
+ using Index = typename Layout::Index;
1377
+ using LongIndex = typename Layout::LongIndex;
1378
+
1379
+ using TensorRef = TensorRef<Element, Layout>;
1380
+ using TensorView = TensorView<Element, Layout>;
1381
+ using TensorCoord = typename Layout::TensorCoord;
1382
+
1383
+ using Pointer = Element*;
1384
+ using NonConstPointer = typename platform::remove_const<Element>::type*;
1385
+
1386
+ // Map to the underlying AffineRankN<2> layout
1387
+ using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
1388
+ layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
1389
+ Element,
1390
+ layout::AffineRankN<2>,
1391
+ (kAdvanceRank == 0 ? 1 : 0),
1392
+ ThreadMap,
1393
+ AccessType>;
1394
+
1395
+ static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
1396
+
1397
+ /// Predicate vector stores mask to guard accesses
1398
+ using Mask = typename UnderlyingIterator::Mask;
1399
+
1400
+ /// Parameters object is precomputed state and is host-constructible
1401
+ class Params {
1402
+ private:
1403
+ friend PredicatedTileAccessIteratorResidualLast;
1404
+
1405
+ /// Parameters object
1406
+ typename UnderlyingIterator::Params params_;
1407
+
1408
+ public:
1409
+ /// Default ctor
1410
+ CUTLASS_HOST_DEVICE
1411
+ Params() {}
1412
+
1413
+ /// Construct the Params object given an AffineRankN<2> tensor's layout
1414
+ CUTLASS_HOST_DEVICE
1415
+ Params(Layout const& layout)
1416
+ : params_(layout::AffineRankN<2>(layout.stride(1), layout.stride(0))){};
1417
+ };
1418
+
1419
+ private:
1420
+ //
1421
+ // Data members
1422
+ //
1423
+
1424
+ /// Underlying AffineRankN<2> tile iterator
1425
+ UnderlyingIterator iterator_;
1426
+
1427
+ public:
1428
+ /// Constructs a TileIterator from its precomputed state, threadblock offset,
1429
+ /// and thread ID
1430
+ CUTLASS_HOST_DEVICE
1431
+ PredicatedTileAccessIteratorResidualLast(
1432
+ ///< Precomputed parameters object
1433
+ Params const& params,
1434
+ ///< Pointer to start of tensor
1435
+ Pointer pointer,
1436
+ ///< Extent of tensor
1437
+ TensorCoord extent,
1438
+ ///< ID of each participating thread
1439
+ int thread_id,
1440
+ ///< Initial offset of threadblock
1441
+ TensorCoord const& threadblock_offset,
1442
+ int const* indices = nullptr ///< gather/scatter indices, note no support for
1443
+ ///< gather/scatter at this specialization
1444
+ )
1445
+ : iterator_(params.params_,
1446
+ pointer,
1447
+ layout::PitchLinearCoord(extent.column(), extent.row()),
1448
+ thread_id,
1449
+ layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row()))
1450
+ {
1451
+ }
1452
+
1453
+ /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
1454
+ /// offset
1455
+ CUTLASS_HOST_DEVICE
1456
+ PredicatedTileAccessIteratorResidualLast(
1457
+ Params const& params, ///< Precomputed parameters object
1458
+ Pointer pointer, ///< Pointer to start of tensor
1459
+ TensorCoord extent, ///< Extent of tensor
1460
+ int thread_id ///< ID of each participating thread
1461
+ )
1462
+ : PredicatedTileAccessIteratorResidualLast(params,
1463
+ pointer,
1464
+ extent,
1465
+ thread_id,
1466
+ make_Coord(0, 0))
1467
+ {
1468
+ }
1469
+
1470
+ /// Overrides the internal iteration index
1471
+ CUTLASS_HOST_DEVICE
1472
+ void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
1473
+
1474
+ CUTLASS_HOST_DEVICE
1475
+ void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
1476
+
1477
+ /// Adds a pointer offset in units of Element
1478
+ CUTLASS_HOST_DEVICE
1479
+ void add_pointer_offset(LongIndex pointer_offset)
1480
+ {
1481
+ iterator_.add_pointer_offset(pointer_offset);
1482
+ }
1483
+
1484
+ /// Advances an iterator along logical dimensions of matrix in units of whole
1485
+ /// tiles
1486
+ CUTLASS_HOST_DEVICE
1487
+ void add_tile_offset(TensorCoord const& tile_offset)
1488
+ {
1489
+ iterator_.add_tile_offset(make_Coord(tile_offset.column(), tile_offset.row()));
1490
+ }
1491
+
1492
+ /// Returns a pointer
1493
+ CUTLASS_HOST_DEVICE
1494
+ AccessType* get() const { return reinterpret_cast<AccessType*>(iterator_.get()); }
1495
+
1496
+ /// Advances to the next tile in memory.
1497
+ ///
1498
+ /// The first time this method is called, predicates are updated, and the
1499
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
1500
+ /// Subsequent calls are lightweight and must only update the internal
1501
+ /// pointer.
1502
+ CUTLASS_HOST_DEVICE
1503
+ PredicatedTileAccessIteratorResidualLast& operator++()
1504
+ {
1505
+ ++iterator_;
1506
+ return *this;
1507
+ }
1508
+
1509
+ /// Advances to the next tile in memory.
1510
+ ///
1511
+ /// The first time this method is called, predicates are updated, and the
1512
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
1513
+ /// Subsequent calls are lightweight and must only update the internal
1514
+ /// pointer.
1515
+ CUTLASS_HOST_DEVICE
1516
+ PredicatedTileAccessIteratorResidualLast operator++(int)
1517
+ {
1518
+ PredicatedTileAccessIteratorResidualLast self(*this);
1519
+ operator++();
1520
+ return self;
1521
+ }
1522
+
1523
+ /// Clears the predicate set efficiently
1524
+ CUTLASS_HOST_DEVICE
1525
+ void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
1526
+
1527
+ /// Clears the predicate set efficiently
1528
+ CUTLASS_HOST_DEVICE
1529
+ void enable_mask() { iterator_.enable_mask(); }
1530
+
1531
+ /// Sets the predicate mask, overriding value stored in predicate iterator
1532
+ CUTLASS_HOST_DEVICE
1533
+ void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
1534
+
1535
+ /// Gets the mask
1536
+ CUTLASS_HOST_DEVICE
1537
+ void get_mask(Mask& mask) { iterator_.get_mask(mask); }
1538
+
1539
+ /// Returns whether access is valid or not
1540
+ CUTLASS_HOST_DEVICE
1541
+ bool valid() { return iterator_.valid(); }
1542
+ };
1543
+
1544
+ ////////////////////////////////////////////////////////////////////////////////
1545
+
1546
+ /// Specialization of PredicatedTileAccessIteratorResidualLast for column-major
1547
+ /// interleaved data. It is mapped to the congruous layout.
1548
+ ///
1549
+ /// Satisfies: ForwardTileIteratorConcept |
1550
+ /// ReadableContiguousTileIteratorConcept |
1551
+ /// WriteableContiguousTileIteratorConcept |
1552
+ /// MaskedTileIteratorConcept
1553
+ ///
1554
+
1555
+ template <typename Shape_,
1556
+ typename Element_,
1557
+ int AdvanceRank,
1558
+ typename ThreadMap_,
1559
+ typename AccessType_,
1560
+ int InterleavedK>
1561
+ class PredicatedTileAccessIteratorResidualLast<Shape_,
1562
+ Element_,
1563
+ layout::ColumnMajorInterleaved<InterleavedK>,
1564
+ AdvanceRank,
1565
+ ThreadMap_,
1566
+ AccessType_,
1567
+ false> {
1568
+ public:
1569
+ static_assert(AdvanceRank == 0 || AdvanceRank == 1,
1570
+ "Specialization for pitch-linear iterator may along advance along the "
1571
+ "contiguous(rank=0) or strided(rank=1) dimension.");
1572
+
1573
+ using Shape = Shape_;
1574
+ using Element = Element_;
1575
+ static int const kInterleavedK = InterleavedK;
1576
+ using Layout = layout::ColumnMajorInterleaved<kInterleavedK>;
1577
+ static int const kAdvanceRank = AdvanceRank;
1578
+ using ThreadMap = ThreadMap_;
1579
+ using AccessType = AccessType_;
1580
+
1581
+ using Index = typename Layout::Index;
1582
+ using LongIndex = typename Layout::LongIndex;
1583
+
1584
+ using TensorRef = TensorRef<Element, Layout>;
1585
+ using TensorView = TensorView<Element, Layout>;
1586
+ using TensorCoord = typename Layout::TensorCoord;
1587
+
1588
+ using Pointer = Element*;
1589
+ using NonConstPointer = typename platform::remove_const<Element>::type*;
1590
+
1591
+ using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
1592
+ layout::PitchLinearShape<Shape::kRow * kInterleavedK, Shape::kColumn / kInterleavedK>,
1593
+ Element,
1594
+ layout::PitchLinear,
1595
+ (kAdvanceRank == 0 ? 0 : 1),
1596
+ ThreadMap,
1597
+ AccessType>;
1598
+
1599
+ static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
1600
+
1601
+ /// Predicate vector stores mask to guard accesses
1602
+ using Mask = typename UnderlyingIterator::Mask;
1603
+
1604
+ /// Parameters object is precomputed state and is host-constructible
1605
+ class Params {
1606
+ private:
1607
+ friend PredicatedTileAccessIteratorResidualLast;
1608
+
1609
+ /// Parameters object
1610
+ typename UnderlyingIterator::Params params_;
1611
+
1612
+ public:
1613
+ CUTLASS_HOST_DEVICE
1614
+ Params() {}
1615
+
1616
+ /// Construct the Params object given a pitch-linear tensor's layout
1617
+ CUTLASS_HOST_DEVICE
1618
+ Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {}
1619
+
1620
+ CUTLASS_HOST_DEVICE
1621
+ Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {}
1622
+ };
1623
+
1624
+ private:
1625
+ //
1626
+ // Data members
1627
+ //
1628
+
1629
+ /// Underlying pitch-linear tile iterator
1630
+ UnderlyingIterator iterator_;
1631
+
1632
+ public:
1633
+ /// Constructs a TileIterator from its precomputed state, threadblock offset,
1634
+ /// and thread ID
1635
+ CUTLASS_HOST_DEVICE
1636
+ PredicatedTileAccessIteratorResidualLast(
1637
+ /// Precomputed parameters object
1638
+ Params const& params,
1639
+ /// Pointer to start of tensor
1640
+ Pointer pointer,
1641
+ /// Extent of tensor
1642
+ TensorCoord extent,
1643
+ /// ID of each participating thread
1644
+ int thread_id,
1645
+ /// Initial offset of threadblock
1646
+ TensorCoord const& threadblock_offset,
1647
+ int const* indices = nullptr ///< gather/scatter indices, note no support for
1648
+ ///< gather/scatter at this specialization
1649
+ )
1650
+ : iterator_(params.params_,
1651
+ pointer,
1652
+ layout::PitchLinearCoord(extent.row() * kInterleavedK,
1653
+ extent.column() / kInterleavedK),
1654
+ thread_id,
1655
+ layout::PitchLinearCoord(threadblock_offset.row() * kInterleavedK,
1656
+ threadblock_offset.column() / kInterleavedK))
1657
+ {
1658
+ }
1659
+
1660
+ /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
1661
+ /// offset
1662
+ CUTLASS_HOST_DEVICE
1663
+ PredicatedTileAccessIteratorResidualLast(
1664
+ Params const& params, ///< Precomputed parameters object
1665
+ Pointer pointer, ///< Pointer to start of tensor
1666
+ TensorCoord extent, ///< Extent of tensor
1667
+ int thread_id ///< ID of each participating thread
1668
+ )
1669
+ : PredicatedTileAccessIteratorResidualLast(params,
1670
+ pointer,
1671
+ extent,
1672
+ thread_id,
1673
+ make_Coord(0, 0))
1674
+ {
1675
+ }
1676
+
1677
+ /// Overrides the internal iteration index
1678
+ CUTLASS_HOST_DEVICE
1679
+ void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
1680
+
1681
+ CUTLASS_HOST_DEVICE
1682
+ void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
1683
+
1684
+ /// Adds a pointer offset in units of Element
1685
+ CUTLASS_HOST_DEVICE
1686
+ void add_pointer_offset(LongIndex pointer_offset)
1687
+ {
1688
+ iterator_.add_pointer_offset(pointer_offset);
1689
+ }
1690
+
1691
+ /// Advances an iterator along logical dimensions of matrix in units of whole
1692
+ /// tiles
1693
+ CUTLASS_HOST_DEVICE
1694
+ void add_tile_offset(TensorCoord const& tile_offset)
1695
+ {
1696
+ iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()});
1697
+ }
1698
+
1699
+ /// Returns a pointer
1700
+ CUTLASS_HOST_DEVICE
1701
+ AccessType* get() const { return reinterpret_cast<AccessType*>(iterator_.get()); }
1702
+
1703
+ /// Advances to the next tile in memory.
1704
+ ///
1705
+ /// The first time this method is called, predicates are updated, and the
1706
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
1707
+ /// Subsequent calls are lightweight and must only update the internal
1708
+ /// pointer.
1709
+ CUTLASS_HOST_DEVICE
1710
+ PredicatedTileAccessIteratorResidualLast& operator++()
1711
+ {
1712
+ ++iterator_;
1713
+ return *this;
1714
+ }
1715
+
1716
+ /// Advances to the next tile in memory.
1717
+ ///
1718
+ /// The first time this method is called, predicates are updated, and the
1719
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
1720
+ /// Subsequent calls are lightweight and must only update the internal
1721
+ /// pointer.
1722
+ CUTLASS_HOST_DEVICE
1723
+ PredicatedTileAccessIteratorResidualLast operator++(int)
1724
+ {
1725
+ PredicatedTileAccessIteratorResidualLast self(*this);
1726
+ operator++();
1727
+ return self;
1728
+ }
1729
+
1730
+ /// Clears the predicate set efficiently
1731
+ CUTLASS_HOST_DEVICE
1732
+ void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
1733
+
1734
+ /// Clears the predicate set efficiently
1735
+ CUTLASS_HOST_DEVICE
1736
+ void enable_mask() { iterator_.enable_mask(); }
1737
+
1738
+ /// Sets the predicate mask, overriding value stored in predicate iterator
1739
+ CUTLASS_HOST_DEVICE
1740
+ void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
1741
+
1742
+ /// Gets the mask
1743
+ CUTLASS_HOST_DEVICE
1744
+ void get_mask(Mask& mask) { iterator_.get_mask(mask); }
1745
+
1746
+ /// Returns whether access is valid or not
1747
+ CUTLASS_HOST_DEVICE
1748
+ bool valid() { return iterator_.valid(); }
1749
+ };
1750
+
1751
+ ////////////////////////////////////////////////////////////////////////////////
1752
+
1753
+ /// Specialization of PredicatedTileAccessIteratorResidualLast for row-major
1754
+ /// interleaved data.
1755
+ // It is mapped to the congruous layout.
1756
+ ///
1757
+ /// Satisfies: ForwardTileIteratorConcept |
1758
+ /// ReadableContiguousTileIteratorConcept |
1759
+ /// WriteableContiguousTileIteratorConcept |
1760
+ /// MaskedTileIteratorConcept
1761
+ ///
1762
+ template <typename Shape_,
1763
+ typename Element_,
1764
+ int AdvanceRank,
1765
+ typename ThreadMap_,
1766
+ typename AccessType_,
1767
+ int InterleavedK>
1768
+ class PredicatedTileAccessIteratorResidualLast<Shape_,
1769
+ Element_,
1770
+ layout::RowMajorInterleaved<InterleavedK>,
1771
+ AdvanceRank,
1772
+ ThreadMap_,
1773
+ AccessType_,
1774
+ false> {
1775
+ public:
1776
+ static_assert(AdvanceRank == 0 || AdvanceRank == 1,
1777
+ "Specialization for pitch-linear iterator may along advance along the "
1778
+ "contiguous(rank=0) or strided(rank=1) dimension.");
1779
+
1780
+ using Shape = Shape_;
1781
+ using Element = Element_;
1782
+ static int const kInterleavedK = InterleavedK;
1783
+ using Layout = layout::RowMajorInterleaved<kInterleavedK>;
1784
+ static int const kAdvanceRank = AdvanceRank;
1785
+ using ThreadMap = ThreadMap_;
1786
+ using AccessType = AccessType_;
1787
+
1788
+ using Index = typename Layout::Index;
1789
+ using LongIndex = typename Layout::LongIndex;
1790
+
1791
+ using TensorRef = TensorRef<Element, Layout>;
1792
+ using TensorView = TensorView<Element, Layout>;
1793
+ using TensorCoord = typename Layout::TensorCoord;
1794
+
1795
+ using Pointer = Element*;
1796
+ using NonConstPointer = typename platform::remove_const<Element>::type*;
1797
+
1798
+ using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast<
1799
+ layout::PitchLinearShape<Shape::kColumn * kInterleavedK, Shape::kRow / kInterleavedK>,
1800
+ Element,
1801
+ layout::PitchLinear,
1802
+ (kAdvanceRank == 0 ? 1 : 0),
1803
+ ThreadMap,
1804
+ AccessType>;
1805
+
1806
+ static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector;
1807
+
1808
+ /// Predicate vector stores mask to guard accesses
1809
+ using Mask = typename UnderlyingIterator::Mask;
1810
+
1811
+ /// Parameters object is precomputed state and is host-constructible
1812
+ class Params {
1813
+ private:
1814
+ friend PredicatedTileAccessIteratorResidualLast;
1815
+
1816
+ /// Parameters object
1817
+ typename UnderlyingIterator::Params params_;
1818
+
1819
+ public:
1820
+ CUTLASS_HOST_DEVICE
1821
+ Params() {}
1822
+
1823
+ /// Construct the Params object given a pitch-linear tensor's layout
1824
+ CUTLASS_HOST_DEVICE
1825
+ Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {}
1826
+
1827
+ CUTLASS_HOST_DEVICE
1828
+ Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {}
1829
+ };
1830
+
1831
+ private:
1832
+ //
1833
+ // Data members
1834
+ //
1835
+
1836
+ /// Underlying pitch-linear tile iterator
1837
+ UnderlyingIterator iterator_;
1838
+
1839
+ public:
1840
+ /// Constructs a TileIterator from its precomputed state, threadblock offset,
1841
+ /// and thread ID
1842
+ CUTLASS_HOST_DEVICE
1843
+ PredicatedTileAccessIteratorResidualLast(
1844
+ /// Precomputed parameters object
1845
+ Params const& params,
1846
+ /// Pointer to start of tensor
1847
+ Pointer pointer,
1848
+ /// Extent of tensor
1849
+ TensorCoord extent,
1850
+ /// ID of each participating thread
1851
+ int thread_id,
1852
+ /// Initial offset of threadblock
1853
+ TensorCoord const& threadblock_offset,
1854
+ int const* indices = nullptr ///< gather/scatter indices, note no support for
1855
+ ///< gather/scatter at this specialization
1856
+ )
1857
+ : iterator_(params.params_,
1858
+ pointer,
1859
+ layout::PitchLinearCoord(extent.column() * kInterleavedK,
1860
+ extent.row() / kInterleavedK),
1861
+ thread_id,
1862
+ layout::PitchLinearCoord(threadblock_offset.column() * kInterleavedK,
1863
+ threadblock_offset.row() / kInterleavedK))
1864
+ {
1865
+ }
1866
+
1867
+ /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock
1868
+ /// offset
1869
+ CUTLASS_HOST_DEVICE
1870
+ PredicatedTileAccessIteratorResidualLast(
1871
+ Params const& params, ///< Precomputed parameters object
1872
+ Pointer pointer, ///< Pointer to start of tensor
1873
+ TensorCoord extent, ///< Extent of tensor
1874
+ int thread_id ///< ID of each participating thread
1875
+ )
1876
+ : PredicatedTileAccessIteratorResidualLast(params,
1877
+ pointer,
1878
+ extent,
1879
+ thread_id,
1880
+ make_Coord(0, 0))
1881
+ {
1882
+ }
1883
+
1884
+ /// Overrides the internal iteration index
1885
+ CUTLASS_HOST_DEVICE
1886
+ void set_iteration_index(int index) { iterator_.set_iteration_index(index); }
1887
+
1888
+ CUTLASS_HOST_DEVICE
1889
+ void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
1890
+
1891
+ /// Adds a pointer offset in units of Element
1892
+ CUTLASS_HOST_DEVICE
1893
+ void add_pointer_offset(LongIndex pointer_offset)
1894
+ {
1895
+ iterator_.add_pointer_offset(pointer_offset);
1896
+ }
1897
+
1898
+ /// Advances an iterator along logical dimensions of matrix in units of whole
1899
+ /// tiles
1900
+ CUTLASS_HOST_DEVICE
1901
+ void add_tile_offset(TensorCoord const& tile_offset)
1902
+ {
1903
+ iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()});
1904
+ }
1905
+
1906
+ /// Returns a pointer
1907
+ CUTLASS_HOST_DEVICE
1908
+ AccessType* get() const { return reinterpret_cast<AccessType*>(iterator_.get()); }
1909
+
1910
+ /// Advances to the next tile in memory.
1911
+ ///
1912
+ /// The first time this method is called, predicates are updated, and the
1913
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
1914
+ /// Subsequent calls are lightweight and must only update the internal
1915
+ /// pointer.
1916
+ CUTLASS_HOST_DEVICE
1917
+ PredicatedTileAccessIteratorResidualLast& operator++()
1918
+ {
1919
+ ++iterator_;
1920
+ return *this;
1921
+ }
1922
+
1923
+ /// Advances to the next tile in memory.
1924
+ ///
1925
+ /// The first time this method is called, predicates are updated, and the
1926
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
1927
+ /// Subsequent calls are lightweight and must only update the internal
1928
+ /// pointer.
1929
+ CUTLASS_HOST_DEVICE
1930
+ PredicatedTileAccessIteratorResidualLast operator++(int)
1931
+ {
1932
+ PredicatedTileAccessIteratorResidualLast self(*this);
1933
+ operator++();
1934
+ return self;
1935
+ }
1936
+
1937
+ /// Clears the predicate set efficiently
1938
+ CUTLASS_HOST_DEVICE
1939
+ void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
1940
+
1941
+ /// Clears the predicate set efficiently
1942
+ CUTLASS_HOST_DEVICE
1943
+ void enable_mask() { iterator_.enable_mask(); }
1944
+
1945
+ /// Sets the predicate mask, overriding value stored in predicate iterator
1946
+ CUTLASS_HOST_DEVICE
1947
+ void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
1948
+
1949
+ /// Gets the mask
1950
+ CUTLASS_HOST_DEVICE
1951
+ void get_mask(Mask& mask) { iterator_.get_mask(mask); }
1952
+
1953
+ /// Returns whether access is valid or not
1954
+ CUTLASS_HOST_DEVICE
1955
+ bool valid() { return iterator_.valid(); }
1956
+ };
1957
+
1958
+ ////////////////////////////////////////////////////////////////////////////////
1959
+
1960
+ } // namespace threadblock
1961
+ } // namespace transform
1962
+ } // namespace cutlass
1963
+
1964
+ ////////////////////////////////////////////////////////////////////////////////
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_iterator_atomic.h ADDED
@@ -0,0 +1,886 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+ #include <cutlass/cutlass.h>
8
+ #include <cutlass/epilogue/threadblock/predicated_tile_iterator.h>
9
+ #include <cutlass/tensor_coord.h>
10
+ namespace cutlass {
11
+ namespace epilogue {
12
+ namespace threadblock {
13
+
14
+ template <class AccessType, class Enable = void>
15
+ struct atomic_store {};
16
+
17
+ template <class AccessType>
18
+ struct atomic_store<AccessType,
19
+ typename platform::enable_if<
20
+ platform::is_same<typename AccessType::Element, half_t>::value>::type> {
21
+ using Element = typename AccessType::Element;
22
+ static const int kCount = AccessType::kElements;
23
+
24
+ CUTLASS_DEVICE
25
+ atomic_store(AccessType const& D, void* ptr, bool pred_guard)
26
+ {
27
+ static_assert(!(kCount % 2), "kCount must be even");
28
+ half2* p = reinterpret_cast<half2*>(ptr);
29
+ uint const* data = reinterpret_cast<uint const*>(&D);
30
+ asm volatile(
31
+ "{\n"
32
+ " .reg .pred p;\n"
33
+ " setp.ne.b32 p, %0, 0;\n"
34
+ :
35
+ : "r"((int)pred_guard));
36
+ for (int i = 0; i < kCount / 2; i++) {
37
+ asm volatile(" @p red.relaxed.global.add.noftz.f16x2 [%0], %1;\n"
38
+ :
39
+ : "l"(p + i), "r"(data[i]));
40
+ }
41
+ asm volatile("}\n" ::);
42
+ }
43
+ };
44
+
45
+ template <class AccessType>
46
+ struct atomic_store<AccessType,
47
+ typename platform::enable_if<
48
+ platform::is_same<typename AccessType::Element, float>::value>::type> {
49
+ using Element = typename AccessType::Element;
50
+ static const int kCount = AccessType::kElements;
51
+
52
+ CUTLASS_DEVICE
53
+ atomic_store(AccessType const& D, void* ptr, bool pred_guard)
54
+ {
55
+ Element* p = reinterpret_cast<Element*>(ptr);
56
+ uint const* data = reinterpret_cast<uint const*>(&D);
57
+ asm volatile(
58
+ "{\n"
59
+ " .reg .pred p;\n"
60
+ " setp.ne.b32 p, %0, 0;\n"
61
+ :
62
+ : "r"((int)pred_guard));
63
+ for (int i = 0; i < kCount; i++) {
64
+ asm volatile(" @p red.relaxed.global.add.f32 [%0], %1;\n"
65
+ :
66
+ : "l"(p + i), "r"(data[i]));
67
+ }
68
+ asm volatile("}\n" ::);
69
+ }
70
+ };
71
+
72
+ template <typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
73
+ typename Element_, ///< Element data type
74
+ int Rank>
75
+ class PredicatedTileIteratorAffineRankNAtomic {
76
+ public:
77
+ using ThreadMap = ThreadMap_;
78
+ using Shape = typename ThreadMap::Shape;
79
+
80
+ using Element = Element_;
81
+
82
+ using Layout = layout::AffineRankN<Rank>;
83
+ using TensorRef = TensorRef<Element, Layout>;
84
+ using TensorView = TensorView<Element, Layout>;
85
+ using ConstTensorRef = typename TensorRef::ConstTensorRef;
86
+
87
+ using Index = typename Layout::Index;
88
+ using LongIndex = typename Layout::LongIndex;
89
+ using TensorCoord = typename Layout::TensorCoord;
90
+
91
+ static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
92
+ static int const kThreads = ThreadMap::kThreads;
93
+ static int const kIterations = ThreadMap::Count::kTile;
94
+
95
+ static_assert(ThreadMap::Iterations::kRow > 0, "ThreadMap::Iterations::kRow must be > 0");
96
+ static_assert(ThreadMap::Iterations::kGroup > 0, "ThreadMap::Iterations::kGroup must be > 0");
97
+ static_assert(ThreadMap::Iterations::kCluster > 0,
98
+ "ThreadMap::Iterations::kCluster must be > 0");
99
+ static_assert(ThreadMap::Iterations::kColumn > 0, "ThreadMap::Iterations::kColumn must be > 0");
100
+ static_assert(!(Layout::kRank % 2),
101
+ "Layout rank must be even. This assumes the first half of the "
102
+ "modes correspond to the 'row' "
103
+ "and the second half of the modes correspond to the 'column'");
104
+
105
+ static bool const kBigEndian = false;
106
+
107
+ /// Fragment object
108
+ using Fragment = Array<Element,
109
+ ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow *
110
+ ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster *
111
+ ThreadMap::kElementsPerAccess>;
112
+
113
+ /// Memory access size
114
+ using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
115
+
116
+ //
117
+ // Parameters struct
118
+ //
119
+
120
+ /// Parameters structure
121
+ struct Params {
122
+ //
123
+ // Data members
124
+ //
125
+
126
+ Layout layout;
127
+
128
+ /// Stride in units of bytes along M modes
129
+ Coord<Layout::kRank / 2, typename Layout::LongIndex> stride_m;
130
+
131
+ /// Stride in units of bytes along N modes
132
+ Coord<Layout::kRank / 2, typename Layout::LongIndex> stride_n;
133
+
134
+ /// Fast divmod objects divided by tensor extents
135
+ FastDivmod divmod_m[(Layout::kRank == 2) ? 1 : (Layout::kRank / 2 - 1)];
136
+
137
+ /// Fast divmod objects divided by tensor extents
138
+ FastDivmod divmod_n[(Layout::kRank == 2) ? 1 : (Layout::kRank / 2 - 1)];
139
+
140
+ int64_t rank2_inc_col;
141
+ int64_t rank2_inc_row;
142
+
143
+ //
144
+ // Methods
145
+ //
146
+ CUTLASS_HOST_DEVICE
147
+ Params() {}
148
+
149
+ CUTLASS_HOST_DEVICE
150
+ Params(TensorCoord const& extent, Layout const& layout_) : layout(layout_)
151
+ {
152
+ CUTLASS_PRAGMA_UNROLL
153
+ for (int i = 0; i < Layout::kRank / 2; ++i) {
154
+ stride_m[i] = OffsetBytes<Element>(layout_.stride()[i]);
155
+ stride_n[i] = OffsetBytes<Element>(layout_.stride()[i + Layout::kRank / 2]);
156
+ }
157
+
158
+ if (kBigEndian) {
159
+ // "Big Endian" scheme
160
+ CUTLASS_PRAGMA_UNROLL
161
+ for (int i = 0; i < Layout::kRank / 2 - 1; ++i) {
162
+ divmod_m[i] = FastDivmod(extent[i + 1]);
163
+ divmod_n[i] = FastDivmod(extent[i + Layout::kRank / 2 + 1]);
164
+ }
165
+ } else {
166
+ // "Little Endian" scheme
167
+ CUTLASS_PRAGMA_UNROLL
168
+ for (int i = 0; i < Layout::kRank / 2 - 1; ++i) {
169
+ divmod_m[i] = FastDivmod(extent[i]);
170
+ divmod_n[i] = FastDivmod(extent[i + Layout::kRank / 2]);
171
+ }
172
+ }
173
+ }
174
+
175
+ CUTLASS_HOST_DEVICE
176
+ Params(Layout const& layout_) : layout(layout_)
177
+ {
178
+ CUTLASS_PRAGMA_UNROLL
179
+ for (int i = 0; i < Layout::kRank / 2; ++i) {
180
+ stride_m[i] = OffsetBytes<Element>(layout_.stride()[i]);
181
+ stride_n[i] = OffsetBytes<Element>(layout_.stride()[i + Layout::kRank / 2]);
182
+ }
183
+
184
+ rank2_inc_col = ThreadMap::Delta::kColumn * stride_n[0];
185
+ rank2_inc_row = ThreadMap::Delta::kRow * stride_m[0];
186
+ }
187
+ };
188
+
189
+ /// Mask object
190
+ struct Mask {
191
+ static int const kCount = ThreadMap::Iterations::kColumn;
192
+
193
+ /// Predicate state
194
+ bool predicates[kCount];
195
+
196
+ //
197
+ // Mask
198
+ //
199
+ CUTLASS_HOST_DEVICE
200
+ Mask() { enable(); }
201
+
202
+ ///< Efficiently disables all accesses guarded by mask
203
+ CUTLASS_HOST_DEVICE void clear()
204
+ {
205
+ CUTLASS_PRAGMA_UNROLL
206
+ for (int i = 0; i < kCount; ++i) { predicates[i] = false; }
207
+ }
208
+
209
+ ///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
210
+ CUTLASS_DEVICE void enable()
211
+ {
212
+ CUTLASS_PRAGMA_UNROLL
213
+ for (int i = 0; i < kCount; ++i) { predicates[i] = true; }
214
+ }
215
+ };
216
+
217
+ private:
218
+ //
219
+ // Data members
220
+ //
221
+
222
+ /// Parameters structure containing reference and precomputed state.
223
+ Params params_;
224
+
225
+ /// Byte-level pointer
226
+ uint8_t* byte_pointer_;
227
+
228
+ /// Array of boolean values to contain steady-state predicates
229
+ Mask mask_;
230
+
231
+ /// Extent of the matrix tile in rows
232
+ Index extent_row_;
233
+
234
+ /// Extent of the matrix tile in columns
235
+ Index extent_col_;
236
+
237
+ /// A thread's starting row position (assuming steady-state predicates have
238
+ /// been computed)
239
+ Index thread_start_row_;
240
+
241
+ /// A thread's starting column position (assuming steady-state predicates have
242
+ /// been computed)
243
+ Index thread_start_column_;
244
+
245
+ /// Internal state counter
246
+ int state_[3];
247
+
248
+ /// Offsets in columns, cached for performance
249
+ int64_t offset_modes_n_[ThreadMap::Iterations::kColumn];
250
+
251
+ //
252
+ // Static asserts about internal strides
253
+ //
254
+
255
+ static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
256
+ static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
257
+
258
+ private:
259
+ //
260
+ // Methods
261
+ //
262
+
263
+ public:
264
+ //
265
+ // Methods
266
+ //
267
+
268
+ /// Constructor
269
+ CUTLASS_DEVICE
270
+ PredicatedTileIteratorAffineRankNAtomic(
271
+ Params const& params,
272
+ Element* pointer,
273
+ MatrixCoord extent,
274
+ int thread_idx,
275
+ MatrixCoord threadblock_offset = MatrixCoord(),
276
+ int const* indices = nullptr ///< gather/scatter indices, note no support for
277
+ ///< gather/scatter at this specialization
278
+ )
279
+ : params_(params)
280
+ {
281
+ MatrixCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
282
+
283
+ extent_row_ = extent.row();
284
+ extent_col_ = extent.column();
285
+
286
+ thread_start_row_ = thread_offset.row();
287
+ thread_start_column_ = thread_offset.column();
288
+
289
+ if (Layout::kRank > 2) {
290
+ // Initialize predicates
291
+ CUTLASS_PRAGMA_UNROLL
292
+ for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
293
+ //
294
+ // Compute coordinate and decompose into N modes
295
+ //
296
+
297
+ int coord_n = thread_start_column_ + c * ThreadMap::Delta::kColumn;
298
+
299
+ mask_.predicates[c] = coord_n < extent.column();
300
+
301
+ Coord<Layout::kRank / 2, Index> modes_n;
302
+
303
+ int64_t offset_modes_n = 0;
304
+
305
+ if (kBigEndian) {
306
+ modes_n = CoordinateDecomposition<Layout::kRank / 2>(coord_n, params_.divmod_n);
307
+
308
+ offset_modes_n = dot(modes_n, params_.stride_n);
309
+ } else {
310
+ modes_n = CoordinateDecompositionLittleEndian<Layout::kRank / 2>(
311
+ coord_n, params_.divmod_n);
312
+
313
+ offset_modes_n = dot(modes_n, params_.stride_n);
314
+ }
315
+
316
+ offset_modes_n_[c] = offset_modes_n;
317
+ }
318
+
319
+ if (!pointer) { mask_.clear(); }
320
+ }
321
+
322
+ // Initialize pointer
323
+ byte_pointer_ = reinterpret_cast<uint8_t*>(pointer);
324
+
325
+ // Initialize internal state counter
326
+ state_[0] = state_[1] = state_[2] = 0;
327
+ }
328
+
329
+ /// Adds a pointer offset in units of Element
330
+ CUTLASS_HOST_DEVICE
331
+ void add_pointer_offset(LongIndex pointer_offset)
332
+ {
333
+ byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
334
+ }
335
+
336
+ /// Stores a fragment to memory
337
+ CUTLASS_DEVICE
338
+ void store_with_byte_offset(Fragment const& frag, int64_t byte_offset)
339
+ {
340
+ uint8_t* byte_pointer = byte_pointer_;
341
+ AccessType const* frag_ptr = reinterpret_cast<AccessType const*>(&frag);
342
+
343
+ CUTLASS_PRAGMA_UNROLL
344
+ for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
345
+ CUTLASS_PRAGMA_UNROLL
346
+ for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
347
+ int row_begin = thread_start_row_ + group * ThreadMap::Delta::kGroup +
348
+ cluster * ThreadMap::Delta::kCluster;
349
+ int64_t offset_modes_m = row_begin * params_.stride_m[0];
350
+
351
+ CUTLASS_PRAGMA_UNROLL
352
+ for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
353
+ int frag_row_idx =
354
+ (row + ThreadMap::Iterations::kRow *
355
+ (group + ThreadMap::Iterations::kGroup * cluster));
356
+
357
+ //
358
+ // Compute coordinate and decompose into M modes
359
+ //
360
+
361
+ int coord_m = row * ThreadMap::Delta::kRow + row_begin;
362
+
363
+ Coord<Layout::kRank / 2, Index> modes_m;
364
+
365
+ if (Layout::kRank > 2) {
366
+ if (kBigEndian) {
367
+ modes_m = CoordinateDecomposition<Layout::kRank / 2>(coord_m,
368
+ params_.divmod_m);
369
+ } else {
370
+ modes_m = CoordinateDecompositionLittleEndian<Layout::kRank / 2>(
371
+ coord_m, params_.divmod_m);
372
+ }
373
+
374
+ offset_modes_m = dot(modes_m, params_.stride_m);
375
+ }
376
+
377
+ //
378
+ // Compute the offset due to modes M
379
+ //
380
+
381
+ bool row_guard = (coord_m < extent_row_);
382
+ int64_t offset_modes_n = thread_start_column_ * params_.stride_n[0];
383
+
384
+ CUTLASS_PRAGMA_UNROLL
385
+ for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
386
+ //
387
+ // Compute coordinate and decompose into N modes
388
+ //
389
+
390
+ if (Layout::kRank > 2) { offset_modes_n = offset_modes_n_[column]; }
391
+
392
+ //
393
+ // Compute the pointer and access
394
+ //
395
+ bool guard;
396
+ if (Layout::kRank > 2) {
397
+ guard = row_guard && mask_.predicates[column];
398
+ } else {
399
+ guard = (coord_m < extent_row_) &&
400
+ ((thread_start_column_ + ThreadMap::Delta::kColumn * column) <
401
+ extent_col_);
402
+ }
403
+
404
+ atomic_store<AccessType>(
405
+ frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
406
+ (void*)(byte_pointer + offset_modes_m + offset_modes_n + byte_offset),
407
+ guard);
408
+
409
+ if (Layout::kRank == 2) { offset_modes_n += params_.rank2_inc_col; }
410
+ }
411
+
412
+ if (Layout::kRank == 2) { offset_modes_m += params_.rank2_inc_row; }
413
+ }
414
+ }
415
+ }
416
+ }
417
+
418
+ /// Stores a fragment to memory
419
+ CUTLASS_DEVICE
420
+ void store(Fragment const& frag) { store_with_byte_offset(frag, 0); }
421
+
422
+ CUTLASS_DEVICE
423
+ void load(Fragment& frag) {}
424
+
425
+ /// Advances to the next position to load or store
426
+ CUTLASS_HOST_DEVICE
427
+ PredicatedTileIteratorAffineRankNAtomic& operator++()
428
+ {
429
+ ++state_[0];
430
+ thread_start_row_ += ThreadMap::Shape::kRow;
431
+
432
+ if (state_[0] == ThreadMap::Count::kRow) {
433
+ state_[0] = 0;
434
+ ++state_[1];
435
+
436
+ thread_start_row_ +=
437
+ (ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
438
+
439
+ if (state_[1] == ThreadMap::Count::kGroup) {
440
+ state_[1] = 0;
441
+ ++state_[2];
442
+
443
+ thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup *
444
+ ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
445
+
446
+ if (state_[2] == ThreadMap::Count::kCluster) { state_[2] = 0; }
447
+ }
448
+ }
449
+
450
+ return *this;
451
+ }
452
+
453
+ ///< Efficiently disables all accesses guarded by mask
454
+ CUTLASS_DEVICE void clear_mask() { mask_.clear(); }
455
+
456
+ ///< Efficiently enables all accesses guarded by mask
457
+ CUTLASS_DEVICE void enable_mask() { mask_.enable(); }
458
+
459
+ ///< Sets the mask
460
+ CUTLASS_DEVICE void get_mask(Mask& mask) { mask = mask_; }
461
+
462
+ ///< Sets the mask
463
+ CUTLASS_DEVICE void set_mask(Mask const& mask) { mask_ = mask; }
464
+ };
465
+
466
+ template <typename ThreadMap_, ///< Thread map (conept: OutputTileThreadMap)
467
+ typename Element_, ///< Element data type
468
+ bool ScatterD = false, ///< Scatter D operand or not
469
+ typename PermuteDLayout = layout::NoPermute, ///< Permute D operand or not
470
+ bool UseCUDAStore = false>
471
+ class PredicatedTileIteratorAtomic {
472
+ public:
473
+ using ThreadMap = ThreadMap_;
474
+ using Shape = typename ThreadMap::Shape;
475
+
476
+ using Element = Element_;
477
+
478
+ using Layout = layout::RowMajor;
479
+ using TensorRef = TensorRef<Element, Layout>;
480
+ using ConstTensorRef = typename TensorRef::ConstTensorRef;
481
+
482
+ using Index = typename Layout::Index;
483
+ using LongIndex = typename Layout::LongIndex;
484
+ using TensorCoord = MatrixCoord;
485
+
486
+ static int const kElementsPerAccess = ThreadMap::kElementsPerAccess;
487
+ static int const kThreads = ThreadMap::kThreads;
488
+ static int const kIterations = ThreadMap::Count::kTile;
489
+
490
+ static bool constexpr PermuteD = !layout::is_trivial_permute<PermuteDLayout>;
491
+
492
+ static_assert(ThreadMap::Iterations::kRow > 0, "ThreadMap::Iterations::kRow must be > 0");
493
+ static_assert(ThreadMap::Iterations::kGroup > 0, "ThreadMap::Iterations::kGroup must be > 0");
494
+ static_assert(ThreadMap::Iterations::kCluster > 0,
495
+ "ThreadMap::Iterations::kCluster must be > 0");
496
+ static_assert(ThreadMap::Iterations::kColumn > 0, "ThreadMap::Iterations::kColumn must be > 0");
497
+
498
+ /// Fragment object
499
+ using Fragment = Array<Element,
500
+ ThreadMap::Iterations::kColumn * ThreadMap::Iterations::kRow *
501
+ ThreadMap::Iterations::kGroup * ThreadMap::Iterations::kCluster *
502
+ ThreadMap::kElementsPerAccess>;
503
+
504
+ /// Memory access size
505
+ using AccessType = AlignedArray<Element, ThreadMap::kElementsPerAccess>;
506
+
507
+ //
508
+ // Parameters struct
509
+ //
510
+
511
+ /// Uses a non-template class
512
+ struct Params : PredicatedTileIteratorParams {
513
+ using Base = PredicatedTileIteratorParams;
514
+
515
+ CUTLASS_HOST_DEVICE
516
+ Params() {}
517
+
518
+ CUTLASS_HOST_DEVICE
519
+ Params(Layout const& layout)
520
+ : PredicatedTileIteratorParams(
521
+ layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess,
522
+ make_OutputTileThreadMapDesc<ThreadMap>())
523
+ {
524
+ }
525
+
526
+ CUTLASS_HOST_DEVICE
527
+ Params(Base const& base) : Base(base) {}
528
+ };
529
+
530
+ /// Mask object
531
+ struct Mask {
532
+ static int const kCount = ThreadMap::Iterations::kColumn;
533
+
534
+ /// Predicate state
535
+ bool predicates[kCount];
536
+
537
+ //
538
+ // Mask
539
+ //
540
+ CUTLASS_HOST_DEVICE
541
+ Mask() { enable(); }
542
+
543
+ ///< Efficiently disables all accesses guarded by mask
544
+ CUTLASS_HOST_DEVICE void clear()
545
+ {
546
+ CUTLASS_PRAGMA_UNROLL
547
+ for (int i = 0; i < kCount; ++i) { predicates[i] = false; }
548
+ }
549
+
550
+ ///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask
551
+ CUTLASS_DEVICE void enable()
552
+ {
553
+ CUTLASS_PRAGMA_UNROLL
554
+ for (int i = 0; i < kCount; ++i) { predicates[i] = true; }
555
+ }
556
+ };
557
+
558
+ private:
559
+ //
560
+ // Data members
561
+ //
562
+
563
+ /// Parameters structure containing reference and precomputed state.
564
+ PredicatedTileIteratorParams params_;
565
+
566
+ /// Byte-level pointer. This pointer is usually for both load() and store(),
567
+ /// unless PermuteD is performed. When having PermuteD, byte_pointer_ is only
568
+ /// for load().
569
+ uint8_t* byte_pointer_;
570
+
571
+ /// Byte-level pointer for store(). Due to PermuteD Op, store_byte_pointer_
572
+ /// may be with different address computation compared to byte_pointer_.
573
+ uint8_t* store_byte_pointer_;
574
+
575
+ /// Array of boolean values to contain steady-state predicates
576
+ Mask mask_;
577
+
578
+ /// Extent of the matrix tile in rows
579
+ Index extent_row_;
580
+
581
+ /// Extent of the matrix tile in rows
582
+ Index extent_column_;
583
+
584
+ /// A thread's starting row position (assuming steady-state predicates have
585
+ /// been computed)
586
+ Index thread_start_row_;
587
+
588
+ /// A thread's starting column
589
+ Index thread_start_column_;
590
+
591
+ /// Internal state counter
592
+ int state_[3];
593
+
594
+ /// Scatter indices
595
+ int const* indices_;
596
+
597
+ /// PermuteDLayout
598
+ PermuteDLayout permute_layout_;
599
+
600
+ //
601
+ // Static asserts about internal strides
602
+ //
603
+
604
+ static_assert(sizeof(extent_row_) == 4, "Expected 32b extents");
605
+ static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents");
606
+ static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides");
607
+
608
+ private:
609
+ //
610
+ // Methods
611
+ //
612
+
613
+ public:
614
+ //
615
+ // Methods
616
+ //
617
+
618
+ /// Constructor
619
+ CUTLASS_DEVICE
620
+ PredicatedTileIteratorAtomic(PredicatedTileIteratorParams const& params,
621
+ Element* pointer,
622
+ TensorCoord extent,
623
+ int thread_idx,
624
+ TensorCoord threadblock_offset = TensorCoord(),
625
+ int const* indices = nullptr)
626
+ : params_(params),
627
+ indices_(indices),
628
+ permute_layout_(PitchLinearCoord(extent.column(), extent.row()),
629
+ params_.stride * kElementsPerAccess / sizeof(AccessType))
630
+ {
631
+ TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset;
632
+
633
+ extent_row_ = extent.row();
634
+ extent_column_ = extent.column();
635
+
636
+ thread_start_row_ = thread_offset.row();
637
+ thread_start_column_ = thread_offset.column();
638
+
639
+ // Initialize predicates
640
+ CUTLASS_PRAGMA_UNROLL
641
+ for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) {
642
+ mask_.predicates[c] =
643
+ ((thread_offset.column() + ThreadMap::Delta::kColumn * c) < extent.column());
644
+ }
645
+
646
+ // Null pointer performs no accesses
647
+ if (!pointer) { mask_.clear(); }
648
+
649
+ if (ScatterD && !indices) { mask_.clear(); }
650
+
651
+ // Initialize byte_pointer_
652
+ byte_pointer_ = reinterpret_cast<uint8_t*>(pointer) +
653
+ LongIndex(thread_offset.row()) * LongIndex(params_.stride) +
654
+ LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
655
+
656
+ if (ScatterD) {
657
+ byte_pointer_ =
658
+ reinterpret_cast<uint8_t*>(pointer) +
659
+ LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess;
660
+ }
661
+
662
+ // store_byte_pointer_ is set to be the same with byte_pointer_ unless
663
+ // PermuteD is used.
664
+ store_byte_pointer_ = PermuteD ? reinterpret_cast<uint8_t*>(pointer) : byte_pointer_;
665
+
666
+ // Initialize internal state counter
667
+ state_[0] = state_[1] = state_[2] = 0;
668
+ }
669
+
670
+ /// Adds a pointer offset in units of Element
671
+ CUTLASS_HOST_DEVICE
672
+ void add_pointer_offset(LongIndex pointer_offset)
673
+ {
674
+ store_byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
675
+ byte_pointer_ += pointer_offset * sizeof_bits<Element>::value / 8;
676
+ }
677
+
678
+ /// Stores a fragment to memory
679
+ CUTLASS_DEVICE
680
+ void store_with_byte_offset(Fragment const& frag, int64_t byte_offset) const
681
+ {
682
+ uint8_t* byte_pointer = store_byte_pointer_;
683
+ AccessType const* frag_ptr = reinterpret_cast<AccessType const*>(&frag);
684
+
685
+ CUTLASS_PRAGMA_UNROLL
686
+ for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) {
687
+ CUTLASS_PRAGMA_UNROLL
688
+ for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) {
689
+ CUTLASS_PRAGMA_UNROLL
690
+ for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) {
691
+ int frag_row_idx =
692
+ (row + ThreadMap::Iterations::kRow *
693
+ (group + ThreadMap::Iterations::kGroup * cluster));
694
+
695
+ int row_offset = row * ThreadMap::Delta::kRow +
696
+ group * ThreadMap::Delta::kGroup +
697
+ cluster * ThreadMap::Delta::kCluster;
698
+
699
+ bool row_guard = ((row_offset + thread_start_row_) < extent_row_);
700
+
701
+ AccessType* memory_pointer =
702
+ reinterpret_cast<AccessType*>(byte_pointer + byte_offset);
703
+
704
+ if (ScatterD && row_guard) {
705
+ assert(indices_);
706
+
707
+ memory_pointer = reinterpret_cast<AccessType*>(
708
+ byte_pointer + byte_offset +
709
+ LongIndex(indices_[row_offset + thread_start_row_]) *
710
+ LongIndex(params_.stride));
711
+ }
712
+
713
+ CUTLASS_PRAGMA_UNROLL
714
+ for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) {
715
+ bool guard = row_guard && mask_.predicates[column];
716
+
717
+ if (PermuteD) {
718
+ int col_offset = column * ThreadMap::Delta::kColumn;
719
+
720
+ int col = col_offset + thread_start_column_;
721
+ int row = row_offset + thread_start_row_;
722
+
723
+ // Locate memory_pointer
724
+ memory_pointer = reinterpret_cast<AccessType*>(
725
+ byte_pointer + byte_offset +
726
+ permute_layout_(PitchLinearCoord(col, row)) * sizeof(AccessType) /
727
+ kElementsPerAccess);
728
+ }
729
+ atomic_store<AccessType>(
730
+ frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column],
731
+ (void*)&memory_pointer[0],
732
+ guard);
733
+
734
+ if (!PermuteD) {
735
+ memory_pointer += (ThreadMap::Delta::kColumn / kElementsPerAccess);
736
+ }
737
+ }
738
+
739
+ if (row + 1 < ThreadMap::Iterations::kRow) {
740
+ if (!ScatterD && !PermuteD) { byte_pointer += params_.increment_row; }
741
+ }
742
+ }
743
+
744
+ if (group + 1 < ThreadMap::Iterations::kGroup) {
745
+ byte_pointer += params_.increment_group;
746
+ }
747
+ }
748
+
749
+ if (cluster + 1 < ThreadMap::Iterations::kCluster) {
750
+ byte_pointer += params_.increment_cluster;
751
+ }
752
+ }
753
+ }
754
+
755
+ /// Stores a fragment to memory
756
+ CUTLASS_DEVICE
757
+ void store(Fragment const& frag) const { store_with_byte_offset(frag, 0); }
758
+
759
+ CUTLASS_DEVICE
760
+ void load(Fragment& frag) {}
761
+
762
+ CUTLASS_DEVICE
763
+ MatrixCoord thread_start() const
764
+ {
765
+ return MatrixCoord(thread_start_row_, thread_start_column_);
766
+ }
767
+
768
+ /// Need to get the thread start row from the tile iterator
769
+ CUTLASS_DEVICE
770
+ int32_t thread_start_row() const { return thread_start_row_; }
771
+
772
+ /// Need to get the thread start row from the tile iterator
773
+ CUTLASS_DEVICE
774
+ int32_t thread_start_column() const { return thread_start_column_; }
775
+
776
+ /// Extent of the matrix in rows
777
+ CUTLASS_DEVICE
778
+ Index extent_row() const { return extent_row_; }
779
+
780
+ /// Extent of the matrix in columns
781
+ CUTLASS_DEVICE
782
+ Index extent_column() const { return extent_column_; }
783
+
784
+ /// Advances to the next position to load or store
785
+ CUTLASS_HOST_DEVICE
786
+ PredicatedTileIteratorAtomic& operator++()
787
+ {
788
+ ++state_[0];
789
+
790
+ if (!ScatterD && !PermuteD) { store_byte_pointer_ += params_.advance_row; }
791
+
792
+ if (!ScatterD) { byte_pointer_ += params_.advance_row; }
793
+
794
+ thread_start_row_ += ThreadMap::Shape::kRow;
795
+
796
+ if (state_[0] == ThreadMap::Count::kRow) {
797
+ state_[0] = 0;
798
+ ++state_[1];
799
+ byte_pointer_ += params_.advance_group;
800
+ store_byte_pointer_ += params_.advance_group;
801
+
802
+ thread_start_row_ +=
803
+ (ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow;
804
+
805
+ if (state_[1] == ThreadMap::Count::kGroup) {
806
+ state_[1] = 0;
807
+ ++state_[2];
808
+ byte_pointer_ += params_.advance_cluster;
809
+ store_byte_pointer_ += params_.advance_cluster;
810
+
811
+ thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup *
812
+ ThreadMap::Count::kRow * ThreadMap::Shape::kRow;
813
+
814
+ if (state_[2] == ThreadMap::Count::kCluster) {
815
+ state_[2] = 0;
816
+ byte_pointer_ += params_.advance_tile;
817
+ store_byte_pointer_ += params_.advance_tile;
818
+
819
+ thread_start_row_ += ThreadMap::Shape::kGroup * ThreadMap::Shape::kRow *
820
+ ThreadMap::Shape::kCluster * ThreadMap::Shape::kTile;
821
+ }
822
+ }
823
+ }
824
+
825
+ return *this;
826
+ }
827
+
828
+ /// Advances a number of positions to load or store
829
+ CUTLASS_HOST_DEVICE
830
+ PredicatedTileIteratorAtomic& operator+=(int increment)
831
+ {
832
+ // Row
833
+ state_[0] += increment;
834
+ int increment_row = state_[0] / ThreadMap::Count::kRow;
835
+ state_[0] = state_[0] % ThreadMap::Count::kRow;
836
+
837
+ byte_pointer_ += (params_.advance_row * increment);
838
+ store_byte_pointer_ += (params_.advance_row * increment);
839
+ thread_start_row_ += (ThreadMap::Shape::kRow * increment);
840
+
841
+ // Group
842
+ state_[1] += increment_row;
843
+ int increment_group = state_[1] / ThreadMap::Count::kGroup;
844
+ state_[1] = state_[1] % ThreadMap::Count::kGroup;
845
+
846
+ byte_pointer_ += (params_.advance_group * increment_row);
847
+ store_byte_pointer_ += (params_.advance_group * increment_row);
848
+ thread_start_row_ += (ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow *
849
+ ThreadMap::Count::kRow * increment_row;
850
+
851
+ // Cluster
852
+ state_[2] += increment_group;
853
+ int increment_cluster = state_[2] / ThreadMap::Count::kCluster;
854
+ state_[2] = state_[2] % ThreadMap::Count::kCluster;
855
+
856
+ byte_pointer_ += (params_.advance_cluster * increment_group);
857
+ store_byte_pointer_ += (params_.advance_cluster * increment_group);
858
+ thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup *
859
+ ThreadMap::Count::kRow * ThreadMap::Shape::kRow * increment_group;
860
+
861
+ // Tile
862
+ byte_pointer_ += (params_.advance_tile * increment_cluster);
863
+ store_byte_pointer_ += (params_.advance_tile * increment_cluster);
864
+ thread_start_row_ += ThreadMap::Shape::kGroup * ThreadMap::Shape::kRow *
865
+ ThreadMap::Shape::kCluster * ThreadMap::Shape::kTile *
866
+ increment_cluster;
867
+
868
+ return *this;
869
+ }
870
+
871
+ ///< Efficiently disables all accesses guarded by mask
872
+ CUTLASS_DEVICE void clear_mask() { mask_.clear(); }
873
+
874
+ ///< Efficiently enables all accesses guarded by mask
875
+ CUTLASS_DEVICE void enable_mask() { mask_.enable(); }
876
+
877
+ ///< Sets the mask
878
+ CUTLASS_DEVICE void get_mask(Mask& mask) const { mask = mask_; }
879
+
880
+ ///< Sets the mask
881
+ CUTLASS_DEVICE void set_mask(Mask const& mask) { mask_ = mask; }
882
+ };
883
+
884
+ } // namespace threadblock
885
+ } // namespace epilogue
886
+ } // namespace cutlass
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_iterator_residual_last.h ADDED
@@ -0,0 +1,1938 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
3
+ *reserved. SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice,
9
+ *this list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23
+ *LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24
+ *CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25
+ *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ *INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27
+ *CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28
+ *ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
+ *POSSIBILITY OF SUCH DAMAGE.
30
+ *
31
+ **************************************************************************************************/
32
+
33
+ // Copyright (c) Microsoft Corporation.
34
+ // SPDX-License-Identifier: Apache-2.0
35
+
36
+ // DeepSpeed Team
37
+
38
+ /*! \file
39
+ \brief Templates implementing loading of tiles from pitch-linear rank=2
40
+ tensors.
41
+
42
+ This iterator uses masks to guard out-of-bounds accesses. The first tile
43
+ this iterator visits maybe partial, then the remaining tiles are complete.
44
+ So, we only need to compute the predicates twice, once before the first tile
45
+ and once for the remaining full tiles which can share the same predicates.
46
+
47
+ A precomputed "Params" object minimizes the amount of state that must be
48
+ stored in registers, and integer addition is used to advance the pointer
49
+ through memory.
50
+ */
51
+
52
+ #pragma once
53
+
54
+ #include "cutlass/arch/memory.h"
55
+ #include "cutlass/transform/threadblock/predicated_tile_access_iterator.h"
56
+
57
+ ////////////////////////////////////////////////////////////////////////////////
58
+
59
+ namespace cutlass {
60
+ namespace transform {
61
+ namespace threadblock {
62
+
63
+ ////////////////////////////////////////////////////////////////////////////////
64
+
65
+ /// PredicatedTileIteratorResidualLast
66
+ ///
67
+ /// Satisfies: ForwardTileIteratorConcept |
68
+ /// ReadableContiguousTileIteratorConcept |
69
+ /// WriteableContiguousTileIteratorConcept |
70
+ /// MaskedTileIteratorConcept
71
+ ///
72
+ /// Regular tile iterator using a precomputed control structure to minimize
73
+ /// register liveness and integer arithmetic.
74
+ ///
75
+ /// Layout is assumed to be invariant at the time the precomputed "Params"
76
+ /// object is constructed.
77
+ ///
78
+ /// Base pointer and tensor extents may be specified at the time the iterator is
79
+ /// constructed. Subsequently, they are assumed to be immutable.
80
+ ///
81
+ /// Adding a logical coordinate offset may be performed at the time the iterator
82
+ /// is constructed. Subsequent additions to logical coordinate offset may be
83
+ /// performed but are relatively expensive.
84
+ ///
85
+ /// Visitation order is intended to first visit a "residual" tile that may be
86
+ /// partially full in both the advance dimension and the steady-state dimension.
87
+ /// This is assumed to be the last tile in the iteration sequence. Advancing an
88
+ /// iterator that has just been constructed moves to the first tile that is full
89
+ /// in the advance dimension and recomputes predicates. Subsequent accesses may
90
+ /// be performed without updating internal predicates and are efficient in terms
91
+ /// of live register state and pointer arithmetic instructions.
92
+ ///
93
+ /// To be efficient, this assumes the iterator will be dereferenced and advanced
94
+ /// at least once outside any looping structure to minimize integer arithmetic.
95
+ ///
96
+ /// Accesses out of bounds are safe so long as `clear_mask()` is called prior to
97
+ /// dereferencing the iterator.
98
+ ///
99
+ ///
100
+ /// Example:
101
+ ///
102
+ /// An efficient pipeline structure may be constructed as follows:
103
+ ///
104
+ // template <typename Iterator>
105
+ // __global__ void kernel(
106
+ // typename Iterator::Params params,
107
+ // typename Iterator::Element *ptr,
108
+ // TensorCoord extent) {
109
+ //
110
+ // typename Iterator::Fragment fragment;
111
+ //
112
+ // TensorCoord threadblock_offset(0, 0);
113
+ //
114
+ // Iterator iter(params, ptr, extent, threadIdx.x, threadblock_offsets);
115
+ //
116
+ //
117
+ // fragment = *iter; // load "residue" tile first
118
+ // ++iter; // advance to first "steady state" tile and update
119
+ // internal masks
120
+ //
121
+ //
122
+ // #pragma unroll
123
+ // for (int i = Remaining - 1; i >= 0; --i) {
124
+ //
125
+ // f(fragment);
126
+ //
127
+ // if (!i) {
128
+ // iter.clear_mask(); // light-weight operation to clear masks -
129
+ // subsequent loads become NO-OPs.
130
+ // }
131
+ //
132
+ // fragment = *iter; // load tile during "steady state" phase
133
+ // ++iter; // advance to next tile - lightweight due to
134
+ // steady-state masks
135
+ // }
136
+ // }
137
+ //
138
+ // void host(TensorView<Element, 2, layout::PitchLinear> view) {
139
+ //
140
+ // using Iterator =
141
+ // transform::threadblock::PredicatedTileIteratorResidualLast;
142
+ //
143
+ // typename Iterator::Params params(view.layout());
144
+ //
145
+ // kernel<Iterator>(params, view.data());
146
+ // }
147
+ ///
148
+ ///
149
+ template <typename Shape,
150
+ typename Element,
151
+ typename Layout,
152
+ int AdvanceRank,
153
+ typename ThreadMap,
154
+ int AccessSize = ThreadMap::kElementsPerAccess,
155
+ bool Gather = false>
156
+ class PredicatedTileIteratorResidualLast;
157
+
158
+ ////////////////////////////////////////////////////////////////////////////////
159
+
160
+ /// Specialization of PredicatedTileIteratorResidualLast for pitch-linear data.
161
+ ///
162
+ /// Satisfies: ForwardTileIteratorConcept |
163
+ /// ReadableContiguousTileIteratorConcept |
164
+ /// WriteableContiguousTileIteratorConcept |
165
+ /// MaskedTileIteratorConcept
166
+ ///
167
+ template <typename Shape_,
168
+ typename Element_,
169
+ int AdvanceRank,
170
+ typename ThreadMap_,
171
+ int AccessSize,
172
+ bool Gather>
173
+ class PredicatedTileIteratorResidualLast<Shape_,
174
+ Element_,
175
+ layout::PitchLinear,
176
+ AdvanceRank,
177
+ ThreadMap_,
178
+ AccessSize,
179
+ Gather> {
180
+ public:
181
+ static_assert(AdvanceRank == 0 || AdvanceRank == 1,
182
+ "Specialization for pitch-linear iterator may advance along the "
183
+ "contiguous(rank=0) or strided(rank=1) dimension.");
184
+
185
+ using Shape = Shape_;
186
+ using Element = Element_;
187
+ using Layout = layout::PitchLinear;
188
+ static int const kAdvanceRank = AdvanceRank;
189
+ using ThreadMap = ThreadMap_;
190
+
191
+ using Index = typename Layout::Index;
192
+ using LongIndex = typename Layout::LongIndex;
193
+
194
+ using TensorRef = TensorRef<Element, Layout>;
195
+ using TensorView = TensorView<Element, Layout>;
196
+ using TensorCoord = typename Layout::TensorCoord;
197
+
198
+ using Pointer = Element*;
199
+ using NonConstPointer = typename platform::remove_const<Element>::type*;
200
+
201
+ /// Type used for internal memory accesses
202
+ using AccessType =
203
+ AlignedArray<Element, AccessSize, (AccessSize * sizeof_bits<Element>::value / 8)>;
204
+
205
+ /// Underlying iterator to compute the addresses
206
+ using TileAccessIterator = PredicatedTileAccessIteratorResidualLast<Shape,
207
+ Element,
208
+ Layout,
209
+ kAdvanceRank,
210
+ ThreadMap,
211
+ AccessType,
212
+ Gather>;
213
+
214
+ static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector;
215
+
216
+ /// Fragment object to be loaded or stored
217
+ using Fragment =
218
+ cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
219
+
220
+ /// Predicate vector stores mask to guard accesses
221
+ using Mask = typename TileAccessIterator::Mask;
222
+
223
+ /// Parameters object is precomputed state and is host-constructible
224
+ class Params {
225
+ public:
226
+ using Base = typename TileAccessIterator::Params::Base;
227
+
228
+ friend PredicatedTileIteratorResidualLast;
229
+
230
+ private:
231
+ /// Parameters object
232
+ typename TileAccessIterator::Params params_;
233
+
234
+ public:
235
+ /// Construct the Params object given a pitch-linear tensor's layout
236
+ CUTLASS_HOST_DEVICE
237
+ Params(Layout const& layout) : params_(layout) {}
238
+
239
+ CUTLASS_HOST_DEVICE
240
+ Params() {}
241
+
242
+ CUTLASS_HOST_DEVICE
243
+ Params(Base const& base) : params_(base) {}
244
+ };
245
+
246
+ private:
247
+ /// Internal pointer type permits fast address arithmetic
248
+ using BytePointer = char*;
249
+
250
+ private:
251
+ //
252
+ // Data members
253
+ //
254
+
255
+ /// Data member to the tile access iterator
256
+ TileAccessIterator address_iterator_;
257
+
258
+ public:
259
+ /// Constructs a TileIterator from its precomputed state, threadblock offset,
260
+ /// and thread ID
261
+ CUTLASS_HOST_DEVICE
262
+ PredicatedTileIteratorResidualLast(
263
+ /// Precomputed parameters object
264
+ Params const& params,
265
+ /// Pointer to start of tensor
266
+ Pointer pointer,
267
+ /// Extent of tensor
268
+ TensorCoord extent,
269
+ /// ID of each participating thread
270
+ int thread_id,
271
+ /// Initial offset of threadblock
272
+ TensorCoord const& threadblock_offset,
273
+ /// Gather indices
274
+ int const* indices = nullptr)
275
+ : address_iterator_(params.params_, pointer, extent, thread_id, threadblock_offset, indices)
276
+ {
277
+ }
278
+
279
+ /// Construct a PredicatedTileIteratorResidualLast with zero threadblock
280
+ /// offset
281
+ CUTLASS_HOST_DEVICE
282
+ PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object
283
+ Pointer pointer, ///< Pointer to start of tensor
284
+ TensorCoord extent, ///< Extent of tensor
285
+ int thread_id ///< ID of each participating thread
286
+ )
287
+ : PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0))
288
+ {
289
+ }
290
+
291
+ /// Adds a pointer offset in units of Element
292
+ CUTLASS_HOST_DEVICE
293
+ void add_pointer_offset(LongIndex pointer_offset)
294
+ {
295
+ address_iterator_.add_pointer_offset(pointer_offset);
296
+ }
297
+
298
+ /// Advances to the next tile in memory.
299
+ ///
300
+ /// The first time this method is called, predicates are updated, and the
301
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
302
+ /// Subsequent calls are lightweight and must only update the internal
303
+ /// pointer.
304
+ CUTLASS_HOST_DEVICE
305
+ PredicatedTileIteratorResidualLast& operator++()
306
+ {
307
+ if (kAdvanceRank)
308
+ address_iterator_.add_tile_offset({0, 1});
309
+ else
310
+ address_iterator_.add_tile_offset({1, 0});
311
+
312
+ return *this;
313
+ }
314
+
315
+ /// Advances to the next tile in memory.
316
+ ///
317
+ /// The first time this method is called, predicates are updated, and the
318
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
319
+ /// Subsequent calls are lightweight and must only update the internal
320
+ /// pointer.
321
+ CUTLASS_HOST_DEVICE
322
+ PredicatedTileIteratorResidualLast operator++(int)
323
+ {
324
+ PredicatedTileIteratorResidualLast self(*this);
325
+ operator++();
326
+ return self;
327
+ }
328
+
329
+ /// Clears the predicate set efficiently
330
+ CUTLASS_HOST_DEVICE
331
+ void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); }
332
+
333
+ CUTLASS_HOST_DEVICE
334
+ void set_residual_tile(bool enable) { address_iterator_.set_residual_tile(enable); }
335
+
336
+ /// Clears the predicate set efficiently
337
+ CUTLASS_HOST_DEVICE
338
+ void enable_mask() { address_iterator_.enable_mask(); }
339
+
340
+ /// Sets the predicate mask, overriding value stored in predicate iterator
341
+ CUTLASS_HOST_DEVICE
342
+ void set_mask(Mask const& mask) { address_iterator_.set_mask(mask); }
343
+
344
+ /// Gets the mask
345
+ CUTLASS_HOST_DEVICE
346
+ void get_mask(Mask& mask) { address_iterator_.get_mask(mask); }
347
+
348
+ CUTLASS_DEVICE
349
+ void load_with_pointer_offset(Fragment& frag, Index pointer_offset)
350
+ {
351
+ load_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
352
+ }
353
+
354
+ CUTLASS_DEVICE
355
+ void load_with_byte_offset(Fragment& frag, LongIndex byte_offset)
356
+ {
357
+ AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
358
+
359
+ CUTLASS_PRAGMA_UNROLL
360
+ for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
361
+ CUTLASS_PRAGMA_UNROLL
362
+ for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
363
+ CUTLASS_PRAGMA_UNROLL
364
+ for (int v = 0; v < kAccessesPerVector; ++v) {
365
+ int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
366
+
367
+ address_iterator_.set_iteration_index(idx);
368
+ char const* byte_ptr =
369
+ reinterpret_cast<char const*>(address_iterator_.get()) + byte_offset;
370
+
371
+ AccessType const* access_ptr = reinterpret_cast<AccessType const*>(byte_ptr);
372
+
373
+ cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
374
+ frag_ptr[idx], access_ptr, address_iterator_.valid());
375
+
376
+ ++address_iterator_;
377
+ }
378
+ }
379
+ }
380
+ }
381
+
382
+ /// Loads a fragment from memory
383
+ CUTLASS_DEVICE
384
+ void load(Fragment& frag) { load_with_byte_offset(frag, 0); }
385
+
386
+ /// Store a fragment to memory
387
+ CUTLASS_DEVICE
388
+ void store_with_pointer_offset(Fragment const& frag, Index pointer_offset)
389
+ {
390
+ store_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
391
+ }
392
+
393
+ /// Store a fragment to memory
394
+ CUTLASS_DEVICE
395
+ void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset)
396
+ {
397
+ address_iterator_.set_iteration_index(0);
398
+ AccessType const* frag_ptr = reinterpret_cast<AccessType const*>(&frag);
399
+
400
+ CUTLASS_PRAGMA_UNROLL
401
+ for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
402
+ CUTLASS_PRAGMA_UNROLL
403
+ for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
404
+ CUTLASS_PRAGMA_UNROLL
405
+ for (int v = 0; v < kAccessesPerVector; ++v) {
406
+ int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
407
+
408
+ char* byte_ptr = reinterpret_cast<char*>(address_iterator_.get()) + byte_offset;
409
+ AccessType* access_ptr = reinterpret_cast<AccessType*>(byte_ptr);
410
+
411
+ if (address_iterator_.valid()) { *access_ptr = frag_ptr[idx]; }
412
+ ++address_iterator_;
413
+ }
414
+ }
415
+ }
416
+ }
417
+
418
+ /// Store a fragment to memory
419
+ CUTLASS_DEVICE
420
+ void store(Fragment const& frag) { store_with_byte_offset(frag, 0); }
421
+ };
422
+
423
+ ////////////////////////////////////////////////////////////////////////////////
424
+
425
+ /// Specialization of PredicatedTileIteratorResidualLast for pitch-linear data.
426
+ ///
427
+ /// Satisfies: ForwardTileIteratorConcept |
428
+ /// ReadableContiguousTileIteratorConcept |
429
+ /// WriteableContiguousTileIteratorConcept |
430
+ /// MaskedTileIteratorConcept
431
+ ///
432
+ template <typename Shape_,
433
+ typename Element_,
434
+ int AdvanceRank,
435
+ typename ThreadMap_,
436
+ int AccessSize,
437
+ bool Gather>
438
+ class PredicatedTileIteratorResidualLast<Shape_,
439
+ Element_,
440
+ layout::ColumnMajor,
441
+ AdvanceRank,
442
+ ThreadMap_,
443
+ AccessSize,
444
+ Gather> {
445
+ public:
446
+ static_assert(AdvanceRank == 0 || AdvanceRank == 1,
447
+ "Specialization for pitch-linear iterator may along advance along the "
448
+ "contiguous(rank=0) or strided(rank=1) dimension.");
449
+
450
+ using Shape = Shape_;
451
+ using Element = Element_;
452
+ using Layout = layout::ColumnMajor;
453
+ static int const kAdvanceRank = AdvanceRank;
454
+ using ThreadMap = ThreadMap_;
455
+
456
+ using Index = typename Layout::Index;
457
+ using LongIndex = typename Layout::LongIndex;
458
+
459
+ using TensorRef = TensorRef<Element, Layout>;
460
+ using TensorView = TensorView<Element, Layout>;
461
+ using TensorCoord = typename Layout::TensorCoord;
462
+
463
+ using Pointer = Element*;
464
+ using NonConstPointer = typename platform::remove_const<Element>::type*;
465
+
466
+ using UnderlyingIterator =
467
+ PredicatedTileIteratorResidualLast<layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
468
+ Element,
469
+ layout::PitchLinear,
470
+ (kAdvanceRank == 0 ? 0 : 1),
471
+ ThreadMap,
472
+ AccessSize,
473
+ Gather>;
474
+
475
+ using AccessType = typename UnderlyingIterator::AccessType;
476
+
477
+ /// Fragment object to be loaded or stored
478
+ using Fragment =
479
+ cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
480
+
481
+ /// Predicate vector stores mask to guard accesses
482
+ using Mask = typename UnderlyingIterator::Mask;
483
+
484
+ /// Parameters object is precomputed state and is host-constructible
485
+ class Params {
486
+ private:
487
+ friend PredicatedTileIteratorResidualLast;
488
+
489
+ /// Parameters object
490
+ typename UnderlyingIterator::Params params_;
491
+
492
+ public:
493
+ CUTLASS_HOST_DEVICE
494
+ Params() {}
495
+
496
+ /// Construct the Params object given a pitch-linear tensor's layout
497
+ CUTLASS_HOST_DEVICE
498
+ Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {}
499
+
500
+ CUTLASS_HOST_DEVICE
501
+ Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {}
502
+ };
503
+
504
+ private:
505
+ //
506
+ // Data members
507
+ //
508
+
509
+ /// Underlying pitch-linear tile iterator
510
+ UnderlyingIterator iterator_;
511
+
512
+ public:
513
+ /// Constructs a TileIterator from its precomputed state, threadblock offset,
514
+ /// and thread ID
515
+ CUTLASS_HOST_DEVICE
516
+ PredicatedTileIteratorResidualLast(
517
+ Params const& params, ///< Precomputed parameters object
518
+ Pointer pointer, ///< Pointer to start of tensor
519
+ TensorCoord extent, ///< Extent of tensor
520
+ int thread_id, ///< ID of each participating thread
521
+ TensorCoord const& threadblock_offset, ///< Initial offset of threadblock
522
+ int const* indices = nullptr ///< gather/scatter indices, note no support for
523
+ ///< gather/scatter at this specialization
524
+ )
525
+ : iterator_(params.params_,
526
+ pointer,
527
+ layout::PitchLinearCoord(extent.row(), extent.column()),
528
+ thread_id,
529
+ layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column()),
530
+ indices)
531
+ {
532
+ }
533
+
534
+ /// Construct a PredicatedTileIteratorResidualLast with zero threadblock
535
+ /// offset
536
+ CUTLASS_HOST_DEVICE
537
+ PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object
538
+ Pointer pointer, ///< Pointer to start of tensor
539
+ TensorCoord extent, ///< Extent of tensor
540
+ int thread_id ///< ID of each participating thread
541
+ )
542
+ : PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0))
543
+ {
544
+ }
545
+
546
+ /// Adds a pointer offset in units of Element
547
+ CUTLASS_HOST_DEVICE
548
+ void add_pointer_offset(LongIndex pointer_offset)
549
+ {
550
+ iterator_.add_pointer_offset(pointer_offset);
551
+ }
552
+
553
+ /// Advances to the next tile in memory.
554
+ ///
555
+ /// The first time this method is called, predicates are updated, and the
556
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
557
+ /// Subsequent calls are lightweight and must only update the internal
558
+ /// pointer.
559
+ CUTLASS_HOST_DEVICE
560
+ PredicatedTileIteratorResidualLast& operator++()
561
+ {
562
+ ++iterator_;
563
+ return *this;
564
+ }
565
+
566
+ /// Advances to the next tile in memory.
567
+ ///
568
+ /// The first time this method is called, predicates are updated, and the
569
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
570
+ /// Subsequent calls are lightweight and must only update the internal
571
+ /// pointer.
572
+ CUTLASS_HOST_DEVICE
573
+ PredicatedTileIteratorResidualLast operator++(int)
574
+ {
575
+ PredicatedTileIteratorResidualLast self(*this);
576
+ operator++();
577
+ return self;
578
+ }
579
+
580
+ /// Clears the predicate set efficiently
581
+ CUTLASS_HOST_DEVICE
582
+ void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
583
+
584
+ CUTLASS_HOST_DEVICE
585
+ void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
586
+
587
+ /// Clears the predicate set efficiently
588
+ CUTLASS_HOST_DEVICE
589
+ void enable_mask() { iterator_.enable_mask(); }
590
+
591
+ /// Sets the predicate mask, overriding value stored in predicate iterator
592
+ CUTLASS_HOST_DEVICE
593
+ void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
594
+
595
+ /// Gets the mask
596
+ CUTLASS_HOST_DEVICE
597
+ void get_mask(Mask& mask) { iterator_.get_mask(mask); }
598
+
599
+ /// Loads a fragment from memory
600
+ CUTLASS_DEVICE
601
+ void load_with_pointer_offset(Fragment& frag, Index pointer_offset)
602
+ {
603
+ iterator_.load_with_pointer_offset(frag, pointer_offset);
604
+ }
605
+
606
+ /// Loads a fragment from memory
607
+ CUTLASS_DEVICE
608
+ void load_with_byte_offset(Fragment& frag, LongIndex byte_offset)
609
+ {
610
+ iterator_.load_with_byte_offset(frag, byte_offset);
611
+ }
612
+
613
+ /// Loads a fragment from memory
614
+ CUTLASS_DEVICE
615
+ void load(Fragment& frag) { load_with_pointer_offset(frag, 0); }
616
+
617
+ /// Store a fragment to memory
618
+ CUTLASS_DEVICE
619
+ void store_with_pointer_offset(Fragment const& frag, Index pointer_offset)
620
+ {
621
+ iterator_.store_with_pointer_offset(frag, pointer_offset);
622
+ }
623
+
624
+ /// Store a fragment to memory
625
+ CUTLASS_DEVICE
626
+ void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset)
627
+ {
628
+ iterator_.store_with_byte_offset(frag, byte_offset);
629
+ }
630
+
631
+ /// Store a fragment to memory
632
+ CUTLASS_DEVICE
633
+ void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); }
634
+ };
635
+
636
+ ////////////////////////////////////////////////////////////////////////////////
637
+
638
+ /// Specialization of PredicatedTileIteratorResidualLast for pitch-linear data.
639
+ ///
640
+ /// Satisfies: ForwardTileIteratorConcept |
641
+ /// ReadableContiguousTileIteratorConcept |
642
+ /// WriteableContiguousTileIteratorConcept |
643
+ /// MaskedTileIteratorConcept
644
+ ///
645
+ template <typename Shape_,
646
+ typename Element_,
647
+ int AdvanceRank,
648
+ typename ThreadMap_,
649
+ int AccessSize,
650
+ bool Gather>
651
+ class PredicatedTileIteratorResidualLast<Shape_,
652
+ Element_,
653
+ layout::RowMajor,
654
+ AdvanceRank,
655
+ ThreadMap_,
656
+ AccessSize,
657
+ Gather> {
658
+ public:
659
+ static_assert(AdvanceRank == 0 || AdvanceRank == 1,
660
+ "Specialization for pitch-linear iterator may along advance along the "
661
+ "contiguous(rank=0) or strided(rank=1) dimension.");
662
+
663
+ using Shape = Shape_;
664
+ using Element = Element_;
665
+ using Layout = layout::RowMajor;
666
+ static int const kAdvanceRank = AdvanceRank;
667
+ using ThreadMap = ThreadMap_;
668
+
669
+ using Index = typename Layout::Index;
670
+ using LongIndex = typename Layout::LongIndex;
671
+
672
+ using TensorRef = TensorRef<Element, Layout>;
673
+ using TensorView = TensorView<Element, Layout>;
674
+ using TensorCoord = typename Layout::TensorCoord;
675
+
676
+ using Pointer = Element*;
677
+ using NonConstPointer = typename platform::remove_const<Element>::type*;
678
+
679
+ using UnderlyingIterator =
680
+ PredicatedTileIteratorResidualLast<layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
681
+ Element,
682
+ layout::PitchLinear,
683
+ (kAdvanceRank == 0 ? 1 : 0),
684
+ ThreadMap,
685
+ AccessSize,
686
+ Gather>;
687
+
688
+ using AccessType = typename UnderlyingIterator::AccessType;
689
+
690
+ /// Fragment object to be loaded or stored
691
+ using Fragment =
692
+ cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
693
+
694
+ /// Predicate vector stores mask to guard accesses
695
+ using Mask = typename UnderlyingIterator::Mask;
696
+
697
+ /// Parameters object is precomputed state and is host-constructible
698
+ class Params {
699
+ private:
700
+ friend PredicatedTileIteratorResidualLast;
701
+
702
+ /// Parameters object
703
+ typename UnderlyingIterator::Params params_;
704
+
705
+ public:
706
+ CUTLASS_HOST_DEVICE
707
+ Params() {}
708
+
709
+ /// Construct the Params object given a pitch-linear tensor's layout
710
+ CUTLASS_HOST_DEVICE
711
+ Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {}
712
+
713
+ CUTLASS_HOST_DEVICE
714
+ Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {}
715
+ };
716
+
717
+ private:
718
+ //
719
+ // Data members
720
+ //
721
+
722
+ /// Underlying pitch-linear tile iterator
723
+ UnderlyingIterator iterator_;
724
+
725
+ public:
726
+ /// Constructs a TileIterator from its precomputed state, threadblock offset,
727
+ /// and thread ID
728
+ CUTLASS_HOST_DEVICE
729
+ PredicatedTileIteratorResidualLast(
730
+ Params const& params, ///< Precomputed parameters object
731
+ Pointer pointer, ///< Pointer to start of tensor
732
+ TensorCoord extent, ///< Extent of tensor
733
+ int thread_id, ///< ID of each participating thread
734
+ TensorCoord const& threadblock_offset, ///< Initial offset of threadblock
735
+ int const* indices = nullptr ///< Gather indices
736
+ )
737
+ : iterator_(params.params_,
738
+ pointer,
739
+ layout::PitchLinearCoord(extent.column(), extent.row()),
740
+ thread_id,
741
+ layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row()),
742
+ indices)
743
+ {
744
+ }
745
+
746
+ /// Construct a PredicatedTileIteratorResidualLast with zero threadblock
747
+ /// offset
748
+ CUTLASS_HOST_DEVICE
749
+ PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object
750
+ Pointer pointer, ///< Pointer to start of tensor
751
+ TensorCoord extent, ///< Extent of tensor
752
+ int thread_id ///< ID of each participating thread
753
+ )
754
+ : PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0))
755
+ {
756
+ }
757
+
758
+ /// Adds a pointer offset in units of Element
759
+ CUTLASS_HOST_DEVICE
760
+ void add_pointer_offset(LongIndex pointer_offset)
761
+ {
762
+ iterator_.add_pointer_offset(pointer_offset);
763
+ }
764
+
765
+ /// Advances to the next tile in memory.
766
+ ///
767
+ /// The first time this method is called, predicates are updated, and the
768
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
769
+ /// Subsequent calls are lightweight and must only update the internal
770
+ /// pointer.
771
+ CUTLASS_HOST_DEVICE
772
+ PredicatedTileIteratorResidualLast& operator++()
773
+ {
774
+ ++iterator_;
775
+ return *this;
776
+ }
777
+
778
+ /// Advances to the next tile in memory.
779
+ ///
780
+ /// The first time this method is called, predicates are updated, and the
781
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
782
+ /// Subsequent calls are lightweight and must only update the internal
783
+ /// pointer.
784
+ CUTLASS_HOST_DEVICE
785
+ PredicatedTileIteratorResidualLast operator++(int)
786
+ {
787
+ PredicatedTileIteratorResidualLast self(*this);
788
+ operator++();
789
+ return self;
790
+ }
791
+
792
+ /// Clears the predicate set efficiently
793
+ CUTLASS_HOST_DEVICE
794
+ void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
795
+
796
+ CUTLASS_HOST_DEVICE
797
+ void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
798
+
799
+ /// Clears the predicate set efficiently
800
+ CUTLASS_HOST_DEVICE
801
+ void enable_mask() { iterator_.enable_mask(); }
802
+
803
+ /// Sets the predicate mask, overriding value stored in predicate iterator
804
+ CUTLASS_HOST_DEVICE
805
+ void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
806
+
807
+ /// Gets the mask
808
+ CUTLASS_HOST_DEVICE
809
+ void get_mask(Mask& mask) { iterator_.get_mask(mask); }
810
+
811
+ /// Loads a fragment from memory
812
+ CUTLASS_DEVICE
813
+ void load_with_pointer_offset(Fragment& frag, Index pointer_offset)
814
+ {
815
+ iterator_.load_with_pointer_offset(frag, pointer_offset);
816
+ }
817
+
818
+ /// Loads a fragment from memory
819
+ CUTLASS_DEVICE
820
+ void load_with_byte_offset(Fragment& frag, LongIndex byte_offset)
821
+ {
822
+ iterator_.load_with_byte_offset(frag, byte_offset);
823
+ }
824
+
825
+ /// Loads a fragment from memory
826
+ CUTLASS_DEVICE
827
+ void load(Fragment& frag) { load_with_pointer_offset(frag, 0); }
828
+
829
+ /// Store a fragment to memory
830
+ CUTLASS_DEVICE
831
+ void store_with_pointer_offset(Fragment const& frag, Index pointer_offset)
832
+ {
833
+ iterator_.store_with_pointer_offset(frag, pointer_offset);
834
+ }
835
+
836
+ /// Store a fragment to memory
837
+ CUTLASS_DEVICE
838
+ void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset)
839
+ {
840
+ iterator_.store_with_byte_offset(frag, byte_offset);
841
+ }
842
+
843
+ /// Store a fragment to memory
844
+ CUTLASS_DEVICE
845
+ void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); }
846
+ };
847
+
848
+ ////////////////////////////////////////////////////////////////////////////////
849
+
850
+ /// Specialization of PredicatedTileIteratorResidualLast for affine rank-2 data.
851
+ ///
852
+ /// Satisfies: ForwardTileIteratorConcept |
853
+ /// ReadableContiguousTileIteratorConcept |
854
+ /// WriteableContiguousTileIteratorConcept |
855
+ /// MaskedTileIteratorConcept
856
+ ///
857
+ template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int AccessSize>
858
+ class PredicatedTileIteratorResidualLast<Shape_,
859
+ Element_,
860
+ layout::AffineRankN<2>,
861
+ AdvanceRank,
862
+ ThreadMap_,
863
+ AccessSize,
864
+ false> {
865
+ public:
866
+ static_assert(AdvanceRank == 0 || AdvanceRank == 1,
867
+ "Specialization for pitch-linear iterator may advance along the "
868
+ "contiguous(rank=0) or strided(rank=1) dimension.");
869
+
870
+ using Shape = Shape_;
871
+ using Element = Element_;
872
+ using Layout = layout::AffineRankN<2>;
873
+ static int const kAdvanceRank = AdvanceRank;
874
+ using ThreadMap = ThreadMap_;
875
+
876
+ using Index = typename Layout::Index;
877
+ using LongIndex = typename Layout::LongIndex;
878
+
879
+ using TensorRef = TensorRef<Element, Layout>;
880
+ using TensorView = TensorView<Element, Layout>;
881
+ using TensorCoord = typename Layout::TensorCoord;
882
+
883
+ using Pointer = Element*;
884
+ using NonConstPointer = typename platform::remove_const<Element>::type*;
885
+
886
+ /// Type used for internal memory accesses
887
+ using AccessType =
888
+ AlignedArray<Element, AccessSize, (AccessSize * sizeof_bits<Element>::value / 8)>;
889
+
890
+ /// Underlying iterator to compute the addresses
891
+ using TileAccessIterator = PredicatedTileAccessIteratorResidualLast<Shape,
892
+ Element,
893
+ Layout,
894
+ kAdvanceRank,
895
+ ThreadMap,
896
+ AccessType>;
897
+
898
+ static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector;
899
+
900
+ /// Fragment object to be loaded or stored
901
+ using Fragment =
902
+ cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
903
+
904
+ /// Predicate vector stores mask to guard accesses
905
+ using Mask = typename TileAccessIterator::Mask;
906
+
907
+ /// Parameters object is precomputed state and is host-constructible
908
+ class Params {
909
+ public:
910
+ friend PredicatedTileIteratorResidualLast;
911
+
912
+ private:
913
+ /// Parameters object
914
+ typename TileAccessIterator::Params params_;
915
+
916
+ public:
917
+ /// Construct the Params object given a pitch-linear tensor's layout
918
+ CUTLASS_HOST_DEVICE
919
+ Params(Layout const& layout) : params_(layout) {}
920
+
921
+ CUTLASS_HOST_DEVICE
922
+ Params() {}
923
+ };
924
+
925
+ private:
926
+ /// Internal pointer type permits fast address arithmetic
927
+ using BytePointer = char*;
928
+
929
+ private:
930
+ //
931
+ // Data members
932
+ //
933
+
934
+ /// Data member to the tile access iterator
935
+ TileAccessIterator address_iterator_;
936
+
937
+ public:
938
+ /// Constructs a TileIterator from its precomputed state, threadblock offset,
939
+ /// and thread ID
940
+ CUTLASS_HOST_DEVICE
941
+ PredicatedTileIteratorResidualLast(
942
+ /// Precomputed parameters object
943
+ Params const& params,
944
+ /// Pointer to start of tensor
945
+ Pointer pointer,
946
+ /// Extent of tensor
947
+ TensorCoord extent,
948
+ /// ID of each participating thread
949
+ int thread_id,
950
+ /// Initial offset of threadblock
951
+ TensorCoord const& threadblock_offset,
952
+ int const* indices = nullptr ///< gather/scatter indices, note no support for
953
+ ///< gather/scatter at this specialization
954
+ )
955
+ : address_iterator_(params.params_, pointer, extent, thread_id, threadblock_offset)
956
+ {
957
+ }
958
+
959
+ /// Construct a PredicatedTileIteratorResidualLast with zero threadblock
960
+ /// offset
961
+ CUTLASS_HOST_DEVICE
962
+ PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object
963
+ Pointer pointer, ///< Pointer to start of tensor
964
+ TensorCoord extent, ///< Extent of tensor
965
+ int thread_id ///< ID of each participating thread
966
+ )
967
+ : PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0))
968
+ {
969
+ }
970
+
971
+ /// Adds a pointer offset in units of Element
972
+ CUTLASS_HOST_DEVICE
973
+ void add_pointer_offset(LongIndex pointer_offset)
974
+ {
975
+ address_iterator_.add_pointer_offset(pointer_offset);
976
+ }
977
+
978
+ /// Advances to the next tile in memory.
979
+ ///
980
+ /// The first time this method is called, predicates are updated, and the
981
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
982
+ /// Subsequent calls are lightweight and must only update the internal
983
+ /// pointer.
984
+ CUTLASS_HOST_DEVICE
985
+ PredicatedTileIteratorResidualLast& operator++()
986
+ {
987
+ if (kAdvanceRank)
988
+ address_iterator_.add_tile_offset(make_Coord(0, 1));
989
+ else
990
+ address_iterator_.add_tile_offset(make_Coord(1, 0));
991
+
992
+ return *this;
993
+ }
994
+
995
+ /// Advances to the next tile in memory.
996
+ ///
997
+ /// The first time this method is called, predicates are updated, and the
998
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
999
+ /// Subsequent calls are lightweight and must only update the internal
1000
+ /// pointer.
1001
+ CUTLASS_HOST_DEVICE
1002
+ PredicatedTileIteratorResidualLast operator++(int)
1003
+ {
1004
+ PredicatedTileIteratorResidualLast self(*this);
1005
+ operator++();
1006
+ return self;
1007
+ }
1008
+
1009
+ /// Clears the predicate set efficiently
1010
+ CUTLASS_HOST_DEVICE
1011
+ void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); }
1012
+
1013
+ CUTLASS_HOST_DEVICE
1014
+ void set_residual_tile(bool enable) { address_iterator_.set_residual_tile(enable); }
1015
+
1016
+ /// Clears the predicate set efficiently
1017
+ CUTLASS_HOST_DEVICE
1018
+ void enable_mask() { address_iterator_.enable_mask(); }
1019
+
1020
+ /// Sets the predicate mask, overriding value stored in predicate iterator
1021
+ CUTLASS_HOST_DEVICE
1022
+ void set_mask(Mask const& mask) { address_iterator_.set_mask(mask); }
1023
+
1024
+ /// Gets the mask
1025
+ CUTLASS_HOST_DEVICE
1026
+ void get_mask(Mask& mask) { address_iterator_.get_mask(mask); }
1027
+
1028
+ CUTLASS_DEVICE
1029
+ void load_with_pointer_offset(Fragment& frag, Index pointer_offset)
1030
+ {
1031
+ load_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
1032
+ }
1033
+
1034
+ CUTLASS_DEVICE
1035
+ void load_with_byte_offset(Fragment& frag, LongIndex byte_offset)
1036
+ {
1037
+ AccessType* frag_ptr = reinterpret_cast<AccessType*>(&frag);
1038
+
1039
+ CUTLASS_PRAGMA_UNROLL
1040
+ for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
1041
+ CUTLASS_PRAGMA_UNROLL
1042
+ for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
1043
+ CUTLASS_PRAGMA_UNROLL
1044
+ for (int v = 0; v < kAccessesPerVector; ++v) {
1045
+ int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
1046
+
1047
+ address_iterator_.set_iteration_index(idx);
1048
+ char const* byte_ptr =
1049
+ reinterpret_cast<char const*>(address_iterator_.get()) + byte_offset;
1050
+
1051
+ AccessType const* access_ptr = reinterpret_cast<AccessType const*>(byte_ptr);
1052
+
1053
+ cutlass::arch::global_load<AccessType, sizeof(AccessType)>(
1054
+ frag_ptr[idx], access_ptr, address_iterator_.valid());
1055
+
1056
+ ++address_iterator_;
1057
+ }
1058
+ }
1059
+ }
1060
+ }
1061
+
1062
+ /// Loads a fragment from memory
1063
+ CUTLASS_DEVICE
1064
+ void load(Fragment& frag) { load_with_byte_offset(frag, 0); }
1065
+
1066
+ /// Store a fragment to memory
1067
+ CUTLASS_DEVICE
1068
+ void store_with_pointer_offset(Fragment const& frag, Index pointer_offset)
1069
+ {
1070
+ store_with_byte_offset(frag, pointer_offset * sizeof_bits<Element>::value / 8);
1071
+ }
1072
+
1073
+ /// Store a fragment to memory
1074
+ CUTLASS_DEVICE
1075
+ void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset)
1076
+ {
1077
+ address_iterator_.set_iteration_index(0);
1078
+ AccessType const* frag_ptr = reinterpret_cast<AccessType const*>(&frag);
1079
+
1080
+ CUTLASS_PRAGMA_UNROLL
1081
+ for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) {
1082
+ CUTLASS_PRAGMA_UNROLL
1083
+ for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) {
1084
+ CUTLASS_PRAGMA_UNROLL
1085
+ for (int v = 0; v < kAccessesPerVector; ++v) {
1086
+ int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous);
1087
+
1088
+ char* byte_ptr = reinterpret_cast<char*>(address_iterator_.get()) + byte_offset;
1089
+ AccessType* access_ptr = reinterpret_cast<AccessType*>(byte_ptr);
1090
+
1091
+ if (address_iterator_.valid()) { *access_ptr = frag_ptr[idx]; }
1092
+ ++address_iterator_;
1093
+ }
1094
+ }
1095
+ }
1096
+ }
1097
+
1098
+ /// Store a fragment to memory
1099
+ CUTLASS_DEVICE
1100
+ void store(Fragment const& frag) { store_with_byte_offset(frag, 0); }
1101
+ };
1102
+
1103
+ ////////////////////////////////////////////////////////////////////////////////
1104
+
1105
+ /// Specialization of PredicatedTileIteratorResidualLast for affine rank 2
1106
+ /// column-major data.
1107
+ ///
1108
+ /// Satisfies: ForwardTileIteratorConcept |
1109
+ /// ReadableContiguousTileIteratorConcept |
1110
+ /// WriteableContiguousTileIteratorConcept |
1111
+ /// MaskedTileIteratorConcept
1112
+ ///
1113
+ template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int AccessSize>
1114
+ class PredicatedTileIteratorResidualLast<Shape_,
1115
+ Element_,
1116
+ layout::AffineRank2ColumnMajor,
1117
+ AdvanceRank,
1118
+ ThreadMap_,
1119
+ AccessSize,
1120
+ false> {
1121
+ public:
1122
+ static_assert(AdvanceRank == 0 || AdvanceRank == 1,
1123
+ "Specialization for pitch-linear iterator may along advance along the "
1124
+ "contiguous(rank=0) or strided(rank=1) dimension.");
1125
+
1126
+ using Shape = Shape_;
1127
+ using Element = Element_;
1128
+ using Layout = layout::AffineRank2ColumnMajor;
1129
+ static int const kAdvanceRank = AdvanceRank;
1130
+ using ThreadMap = ThreadMap_;
1131
+
1132
+ using Index = typename Layout::Index;
1133
+ using LongIndex = typename Layout::LongIndex;
1134
+
1135
+ using TensorRef = TensorRef<Element, Layout>;
1136
+ using TensorView = TensorView<Element, Layout>;
1137
+ using TensorCoord = typename Layout::TensorCoord;
1138
+
1139
+ using Pointer = Element*;
1140
+ using NonConstPointer = typename platform::remove_const<Element>::type*;
1141
+
1142
+ // Map to the underlying AffineRankN<2> layout
1143
+ using UnderlyingIterator =
1144
+ PredicatedTileIteratorResidualLast<layout::PitchLinearShape<Shape::kRow, Shape::kColumn>,
1145
+ Element,
1146
+ layout::AffineRankN<2>,
1147
+ (kAdvanceRank == 0 ? 0 : 1),
1148
+ ThreadMap,
1149
+ AccessSize>;
1150
+
1151
+ using AccessType = typename UnderlyingIterator::AccessType;
1152
+
1153
+ /// Fragment object to be loaded or stored
1154
+ using Fragment =
1155
+ cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
1156
+
1157
+ /// Predicate vector stores mask to guard accesses
1158
+ using Mask = typename UnderlyingIterator::Mask;
1159
+
1160
+ /// Parameters object is precomputed state and is host-constructible
1161
+ class Params {
1162
+ private:
1163
+ friend PredicatedTileIteratorResidualLast;
1164
+
1165
+ /// Parameters object
1166
+ typename UnderlyingIterator::Params params_;
1167
+
1168
+ public:
1169
+ CUTLASS_HOST_DEVICE
1170
+ Params() {}
1171
+
1172
+ /// Construct the Params object given an AffineRankN<2> tensor's layout
1173
+ CUTLASS_HOST_DEVICE
1174
+ Params(Layout const& layout)
1175
+ : params_(layout::AffineRankN<2>(layout.stride(0), layout.stride(1)))
1176
+ {
1177
+ }
1178
+ };
1179
+
1180
+ private:
1181
+ //
1182
+ // Data members
1183
+ //
1184
+
1185
+ /// Underlying AffineRankN<2> tile iterator
1186
+ UnderlyingIterator iterator_;
1187
+
1188
+ public:
1189
+ /// Constructs a TileIterator from its precomputed state, threadblock offset,
1190
+ /// and thread ID
1191
+ CUTLASS_HOST_DEVICE
1192
+ PredicatedTileIteratorResidualLast(
1193
+ Params const& params, ///< Precomputed parameters object
1194
+ Pointer pointer, ///< Pointer to start of tensor
1195
+ TensorCoord extent, ///< Extent of tensor
1196
+ int thread_id, ///< ID of each participating thread
1197
+ TensorCoord const& threadblock_offset, ///< Initial offset of threadblock
1198
+ int const* indices = nullptr ///< gather/scatter indices, note no support for
1199
+ ///< gather/scatter at this specialization
1200
+ )
1201
+ : iterator_(params.params_,
1202
+ pointer,
1203
+ layout::PitchLinearCoord(extent.row(), extent.column()),
1204
+ thread_id,
1205
+ layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column()))
1206
+ {
1207
+ }
1208
+
1209
+ /// Construct a PredicatedTileIteratorResidualLast with zero threadblock
1210
+ /// offset
1211
+ CUTLASS_HOST_DEVICE
1212
+ PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object
1213
+ Pointer pointer, ///< Pointer to start of tensor
1214
+ TensorCoord extent, ///< Extent of tensor
1215
+ int thread_id ///< ID of each participating thread
1216
+ )
1217
+ : PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0))
1218
+ {
1219
+ }
1220
+
1221
+ /// Adds a pointer offset in units of Element
1222
+ CUTLASS_HOST_DEVICE
1223
+ void add_pointer_offset(LongIndex pointer_offset)
1224
+ {
1225
+ iterator_.add_pointer_offset(pointer_offset);
1226
+ }
1227
+
1228
+ /// Advances to the next tile in memory.
1229
+ ///
1230
+ /// The first time this method is called, predicates are updated, and the
1231
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
1232
+ /// Subsequent calls are lightweight and must only update the internal
1233
+ /// pointer.
1234
+ CUTLASS_HOST_DEVICE
1235
+ PredicatedTileIteratorResidualLast& operator++()
1236
+ {
1237
+ ++iterator_;
1238
+ return *this;
1239
+ }
1240
+
1241
+ /// Advances to the next tile in memory.
1242
+ ///
1243
+ /// The first time this method is called, predicates are updated, and the
1244
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
1245
+ /// Subsequent calls are lightweight and must only update the internal
1246
+ /// pointer.
1247
+ CUTLASS_HOST_DEVICE
1248
+ PredicatedTileIteratorResidualLast operator++(int)
1249
+ {
1250
+ PredicatedTileIteratorResidualLast self(*this);
1251
+ operator++();
1252
+ return self;
1253
+ }
1254
+
1255
+ /// Clears the predicate set efficiently
1256
+ CUTLASS_HOST_DEVICE
1257
+ void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
1258
+
1259
+ CUTLASS_HOST_DEVICE
1260
+ void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
1261
+
1262
+ /// Clears the predicate set efficiently
1263
+ CUTLASS_HOST_DEVICE
1264
+ void enable_mask() { iterator_.enable_mask(); }
1265
+
1266
+ /// Sets the predicate mask, overriding value stored in predicate iterator
1267
+ CUTLASS_HOST_DEVICE
1268
+ void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
1269
+
1270
+ /// Gets the mask
1271
+ CUTLASS_HOST_DEVICE
1272
+ void get_mask(Mask& mask) { iterator_.get_mask(mask); }
1273
+
1274
+ /// Loads a fragment from memory
1275
+ CUTLASS_DEVICE
1276
+ void load_with_pointer_offset(Fragment& frag, Index pointer_offset)
1277
+ {
1278
+ iterator_.load_with_pointer_offset(frag, pointer_offset);
1279
+ }
1280
+
1281
+ /// Loads a fragment from memory
1282
+ CUTLASS_DEVICE
1283
+ void load_with_byte_offset(Fragment& frag, LongIndex byte_offset)
1284
+ {
1285
+ iterator_.load_with_byte_offset(frag, byte_offset);
1286
+ }
1287
+
1288
+ /// Loads a fragment from memory
1289
+ CUTLASS_DEVICE
1290
+ void load(Fragment& frag) { load_with_pointer_offset(frag, 0); }
1291
+
1292
+ /// Store a fragment to memory
1293
+ CUTLASS_DEVICE
1294
+ void store_with_pointer_offset(Fragment const& frag, Index pointer_offset)
1295
+ {
1296
+ iterator_.store_with_pointer_offset(frag, pointer_offset);
1297
+ }
1298
+
1299
+ /// Store a fragment to memory
1300
+ CUTLASS_DEVICE
1301
+ void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset)
1302
+ {
1303
+ iterator_.store_with_byte_offset(frag, byte_offset);
1304
+ }
1305
+
1306
+ /// Store a fragment to memory
1307
+ CUTLASS_DEVICE
1308
+ void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); }
1309
+ };
1310
+
1311
+ ////////////////////////////////////////////////////////////////////////////////
1312
+
1313
+ /// Specialization of PredicatedTileIteratorResidualLast for affine rank 2
1314
+ /// row-major data.
1315
+ ///
1316
+ /// Satisfies: ForwardTileIteratorConcept |
1317
+ /// ReadableContiguousTileIteratorConcept |
1318
+ /// WriteableContiguousTileIteratorConcept |
1319
+ /// MaskedTileIteratorConcept
1320
+ ///
1321
+ template <typename Shape_, typename Element_, int AdvanceRank, typename ThreadMap_, int AccessSize>
1322
+ class PredicatedTileIteratorResidualLast<Shape_,
1323
+ Element_,
1324
+ layout::AffineRank2RowMajor,
1325
+ AdvanceRank,
1326
+ ThreadMap_,
1327
+ AccessSize,
1328
+ false> {
1329
+ public:
1330
+ static_assert(AdvanceRank == 0 || AdvanceRank == 1,
1331
+ "Specialization for pitch-linear iterator may along advance along the "
1332
+ "contiguous(rank=0) or strided(rank=1) dimension.");
1333
+
1334
+ using Shape = Shape_;
1335
+ using Element = Element_;
1336
+ using Layout = layout::AffineRank2RowMajor;
1337
+ static int const kAdvanceRank = AdvanceRank;
1338
+ using ThreadMap = ThreadMap_;
1339
+
1340
+ using Index = typename Layout::Index;
1341
+ using LongIndex = typename Layout::LongIndex;
1342
+
1343
+ using TensorRef = TensorRef<Element, Layout>;
1344
+ using TensorView = TensorView<Element, Layout>;
1345
+ using TensorCoord = typename Layout::TensorCoord;
1346
+
1347
+ using Pointer = Element*;
1348
+ using NonConstPointer = typename platform::remove_const<Element>::type*;
1349
+
1350
+ // Map to the underlying AffineRankN<2> layout
1351
+ using UnderlyingIterator =
1352
+ PredicatedTileIteratorResidualLast<layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
1353
+ Element,
1354
+ layout::AffineRankN<2>,
1355
+ (kAdvanceRank == 0 ? 1 : 0),
1356
+ ThreadMap,
1357
+ AccessSize>;
1358
+
1359
+ using AccessType = typename UnderlyingIterator::AccessType;
1360
+
1361
+ /// Fragment object to be loaded or stored
1362
+ using Fragment =
1363
+ cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
1364
+
1365
+ /// Predicate vector stores mask to guard accesses
1366
+ using Mask = typename UnderlyingIterator::Mask;
1367
+
1368
+ /// Parameters object is precomputed state and is host-constructible
1369
+ class Params {
1370
+ private:
1371
+ friend PredicatedTileIteratorResidualLast;
1372
+
1373
+ /// Parameters object
1374
+ typename UnderlyingIterator::Params params_;
1375
+
1376
+ public:
1377
+ CUTLASS_HOST_DEVICE
1378
+ Params() {}
1379
+
1380
+ /// Construct the Params object given an AffineRankN<2> tensor's layout
1381
+ CUTLASS_HOST_DEVICE
1382
+ Params(Layout const& layout)
1383
+ : params_(layout::AffineRankN<2>(layout.stride(1), layout.stride(0)))
1384
+ {
1385
+ }
1386
+ };
1387
+
1388
+ private:
1389
+ //
1390
+ // Data members
1391
+ //
1392
+
1393
+ /// Underlying AffineRankN<2> tile iterator
1394
+ UnderlyingIterator iterator_;
1395
+
1396
+ public:
1397
+ /// Constructs a TileIterator from its precomputed state, threadblock offset,
1398
+ /// and thread ID
1399
+ CUTLASS_HOST_DEVICE
1400
+ PredicatedTileIteratorResidualLast(
1401
+ Params const& params, ///< Precomputed parameters object
1402
+ Pointer pointer, ///< Pointer to start of tensor
1403
+ TensorCoord extent, ///< Extent of tensor
1404
+ int thread_id, ///< ID of each participating thread
1405
+ TensorCoord const& threadblock_offset, ///< Initial offset of threadblock
1406
+ int const* indices = nullptr ///< gather/scatter indices, note no support for
1407
+ ///< gather/scatter at this specialization
1408
+ )
1409
+ : iterator_(params.params_,
1410
+ pointer,
1411
+ layout::PitchLinearCoord(extent.column(), extent.row()),
1412
+ thread_id,
1413
+ layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row()))
1414
+ {
1415
+ }
1416
+
1417
+ /// Construct a PredicatedTileIteratorResidualLast with zero threadblock
1418
+ /// offset
1419
+ CUTLASS_HOST_DEVICE
1420
+ PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object
1421
+ Pointer pointer, ///< Pointer to start of tensor
1422
+ TensorCoord extent, ///< Extent of tensor
1423
+ int thread_id ///< ID of each participating thread
1424
+ )
1425
+ : PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0))
1426
+ {
1427
+ }
1428
+
1429
+ /// Adds a pointer offset in units of Element
1430
+ CUTLASS_HOST_DEVICE
1431
+ void add_pointer_offset(LongIndex pointer_offset)
1432
+ {
1433
+ iterator_.add_pointer_offset(pointer_offset);
1434
+ }
1435
+
1436
+ /// Advances to the next tile in memory.
1437
+ ///
1438
+ /// The first time this method is called, predicates are updated, and the
1439
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
1440
+ /// Subsequent calls are lightweight and must only update the internal
1441
+ /// pointer.
1442
+ CUTLASS_HOST_DEVICE
1443
+ PredicatedTileIteratorResidualLast& operator++()
1444
+ {
1445
+ ++iterator_;
1446
+ return *this;
1447
+ }
1448
+
1449
+ /// Advances to the next tile in memory.
1450
+ ///
1451
+ /// The first time this method is called, predicates are updated, and the
1452
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
1453
+ /// Subsequent calls are lightweight and must only update the internal
1454
+ /// pointer.
1455
+ CUTLASS_HOST_DEVICE
1456
+ PredicatedTileIteratorResidualLast operator++(int)
1457
+ {
1458
+ PredicatedTileIteratorResidualLast self(*this);
1459
+ operator++();
1460
+ return self;
1461
+ }
1462
+
1463
+ /// Clears the predicate set efficiently
1464
+ CUTLASS_HOST_DEVICE
1465
+ void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
1466
+
1467
+ CUTLASS_HOST_DEVICE
1468
+ void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
1469
+
1470
+ /// Clears the predicate set efficiently
1471
+ CUTLASS_HOST_DEVICE
1472
+ void enable_mask() { iterator_.enable_mask(); }
1473
+
1474
+ /// Sets the predicate mask, overriding value stored in predicate iterator
1475
+ CUTLASS_HOST_DEVICE
1476
+ void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
1477
+
1478
+ /// Gets the mask
1479
+ CUTLASS_HOST_DEVICE
1480
+ void get_mask(Mask& mask) { iterator_.get_mask(mask); }
1481
+
1482
+ /// Loads a fragment from memory
1483
+ CUTLASS_DEVICE
1484
+ void load_with_pointer_offset(Fragment& frag, Index pointer_offset)
1485
+ {
1486
+ iterator_.load_with_pointer_offset(frag, pointer_offset);
1487
+ }
1488
+
1489
+ /// Loads a fragment from memory
1490
+ CUTLASS_DEVICE
1491
+ void load_with_byte_offset(Fragment& frag, LongIndex byte_offset)
1492
+ {
1493
+ iterator_.load_with_byte_offset(frag, byte_offset);
1494
+ }
1495
+
1496
+ /// Loads a fragment from memory
1497
+ CUTLASS_DEVICE
1498
+ void load(Fragment& frag) { load_with_pointer_offset(frag, 0); }
1499
+
1500
+ /// Store a fragment to memory
1501
+ CUTLASS_DEVICE
1502
+ void store_with_pointer_offset(Fragment const& frag, Index pointer_offset)
1503
+ {
1504
+ iterator_.store_with_pointer_offset(frag, pointer_offset);
1505
+ }
1506
+
1507
+ /// Store a fragment to memory
1508
+ CUTLASS_DEVICE
1509
+ void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset)
1510
+ {
1511
+ iterator_.store_with_byte_offset(frag, byte_offset);
1512
+ }
1513
+
1514
+ /// Store a fragment to memory
1515
+ CUTLASS_DEVICE
1516
+ void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); }
1517
+ };
1518
+
1519
+ ////////////////////////////////////////////////////////////////////////////////
1520
+
1521
+ /// Specialization of PredicatedTileIteratorResidualLast for interleaved data.
1522
+ /// It is mapped to the congruous layout.
1523
+ ///
1524
+ /// Satisfies: ForwardTileIteratorConcept |
1525
+ /// ReadableContiguousTileIteratorConcept |
1526
+ /// WriteableContiguousTileIteratorConcept |
1527
+ /// MaskedTileIteratorConcept
1528
+ ///
1529
+
1530
+ template <typename Shape_,
1531
+ typename Element_,
1532
+ int AdvanceRank,
1533
+ typename ThreadMap_,
1534
+ int AccessSize,
1535
+ int InterleavedK>
1536
+ class PredicatedTileIteratorResidualLast<Shape_,
1537
+ Element_,
1538
+ layout::ColumnMajorInterleaved<InterleavedK>,
1539
+ AdvanceRank,
1540
+ ThreadMap_,
1541
+ AccessSize,
1542
+ false> {
1543
+ public:
1544
+ static_assert(AdvanceRank == 0 || AdvanceRank == 1,
1545
+ "Specialization for pitch-linear iterator may along advance along the "
1546
+ "contiguous(rank=0) or strided(rank=1) dimension.");
1547
+
1548
+ using Shape = Shape_;
1549
+ using Element = Element_;
1550
+ static int const kInterleavedK = InterleavedK;
1551
+ using Layout = layout::ColumnMajorInterleaved<kInterleavedK>;
1552
+ static int const kAdvanceRank = AdvanceRank;
1553
+ using ThreadMap = ThreadMap_;
1554
+
1555
+ using Index = typename Layout::Index;
1556
+ using LongIndex = typename Layout::LongIndex;
1557
+
1558
+ using TensorRef = TensorRef<Element, Layout>;
1559
+ using TensorView = TensorView<Element, Layout>;
1560
+ using TensorCoord = typename Layout::TensorCoord;
1561
+
1562
+ using Pointer = Element*;
1563
+ using NonConstPointer = typename platform::remove_const<Element>::type*;
1564
+
1565
+ using UnderlyingIterator = PredicatedTileIteratorResidualLast<
1566
+ layout::PitchLinearShape<Shape::kRow * kInterleavedK, Shape::kColumn / kInterleavedK>,
1567
+ Element,
1568
+ layout::PitchLinear,
1569
+ (kAdvanceRank == 0 ? 0 : 1),
1570
+ ThreadMap,
1571
+ AccessSize>;
1572
+
1573
+ using AccessType = typename UnderlyingIterator::AccessType;
1574
+
1575
+ /// Fragment object to be loaded or stored
1576
+ using Fragment =
1577
+ cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
1578
+
1579
+ /// Predicate vector stores mask to guard accesses
1580
+ using Mask = typename UnderlyingIterator::Mask;
1581
+
1582
+ /// Parameters object is precomputed state and is host-constructible
1583
+ class Params {
1584
+ private:
1585
+ friend PredicatedTileIteratorResidualLast;
1586
+
1587
+ /// Parameters object
1588
+ typename UnderlyingIterator::Params params_;
1589
+
1590
+ public:
1591
+ CUTLASS_HOST_DEVICE
1592
+ Params() {}
1593
+
1594
+ /// Construct the Params object given a pitch-linear tensor's layout
1595
+ CUTLASS_HOST_DEVICE
1596
+ Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {}
1597
+
1598
+ CUTLASS_HOST_DEVICE
1599
+ Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {}
1600
+ };
1601
+
1602
+ private:
1603
+ //
1604
+ // Data members
1605
+ //
1606
+
1607
+ /// Underlying pitch-linear tile iterator
1608
+ UnderlyingIterator iterator_;
1609
+
1610
+ public:
1611
+ /// Constructs a TileIterator from its precomputed state, threadblock offset,
1612
+ /// and thread ID
1613
+ CUTLASS_HOST_DEVICE
1614
+ PredicatedTileIteratorResidualLast(
1615
+ /// Precomputed parameters object
1616
+ Params const& params,
1617
+ /// Pointer to start of tensor
1618
+ Pointer pointer,
1619
+ /// Extent of tensor
1620
+ TensorCoord extent,
1621
+ /// ID of each participating thread
1622
+ int thread_id,
1623
+ /// Initial offset of threadblock
1624
+ TensorCoord const& threadblock_offset,
1625
+ int const* indices = nullptr ///< gather/scatter indices, note no support for
1626
+ ///< gather/scatter at this specialization
1627
+ )
1628
+ : iterator_(params.params_,
1629
+ pointer,
1630
+ layout::PitchLinearCoord(extent.row() * kInterleavedK,
1631
+ extent.column() / kInterleavedK),
1632
+ thread_id,
1633
+ layout::PitchLinearCoord(threadblock_offset.row() * kInterleavedK,
1634
+ threadblock_offset.column() / kInterleavedK))
1635
+ {
1636
+ }
1637
+
1638
+ /// Construct a PredicatedTileIteratorResidualLast with zero threadblock
1639
+ /// offset
1640
+ CUTLASS_HOST_DEVICE
1641
+ PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object
1642
+ Pointer pointer, ///< Pointer to start of tensor
1643
+ TensorCoord extent, ///< Extent of tensor
1644
+ int thread_id ///< ID of each participating thread
1645
+ )
1646
+ : PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0))
1647
+ {
1648
+ }
1649
+
1650
+ /// Adds a pointer offset in units of Element
1651
+ CUTLASS_HOST_DEVICE
1652
+ void add_pointer_offset(LongIndex pointer_offset)
1653
+ {
1654
+ iterator_.add_pointer_offset(pointer_offset);
1655
+ }
1656
+
1657
+ /// Advances to the next tile in memory.
1658
+ ///
1659
+ /// The first time this method is called, predicates are updated, and the
1660
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
1661
+ /// Subsequent calls are lightweight and must only update the internal
1662
+ /// pointer.
1663
+ CUTLASS_HOST_DEVICE
1664
+ PredicatedTileIteratorResidualLast& operator++()
1665
+ {
1666
+ ++iterator_;
1667
+ return *this;
1668
+ }
1669
+
1670
+ /// Advances to the next tile in memory.
1671
+ ///
1672
+ /// The first time this method is called, predicates are updated, and the
1673
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
1674
+ /// Subsequent calls are lightweight and must only update the internal
1675
+ /// pointer.
1676
+ CUTLASS_HOST_DEVICE
1677
+ PredicatedTileIteratorResidualLast operator++(int)
1678
+ {
1679
+ PredicatedTileIteratorResidualLast self(*this);
1680
+ operator++();
1681
+ return self;
1682
+ }
1683
+
1684
+ /// Clears the predicate set efficiently
1685
+ CUTLASS_HOST_DEVICE
1686
+ void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
1687
+
1688
+ CUTLASS_HOST_DEVICE
1689
+ void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
1690
+
1691
+ /// Clears the predicate set efficiently
1692
+ CUTLASS_HOST_DEVICE
1693
+ void enable_mask() { iterator_.enable_mask(); }
1694
+
1695
+ /// Sets the predicate mask, overriding value stored in predicate iterator
1696
+ CUTLASS_HOST_DEVICE
1697
+ void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
1698
+
1699
+ /// Gets the mask
1700
+ CUTLASS_HOST_DEVICE
1701
+ void get_mask(Mask& mask) { iterator_.get_mask(mask); }
1702
+
1703
+ /// Loads a fragment from memory
1704
+ CUTLASS_DEVICE
1705
+ void load_with_pointer_offset(Fragment& frag, Index pointer_offset)
1706
+ {
1707
+ iterator_.load_with_pointer_offset(frag, pointer_offset);
1708
+ }
1709
+
1710
+ /// Loads a fragment from memory
1711
+ CUTLASS_DEVICE
1712
+ void load(Fragment& frag) { load_with_pointer_offset(frag, 0); }
1713
+
1714
+ /// Store a fragment to memory
1715
+ CUTLASS_DEVICE
1716
+ void store_with_pointer_offset(Fragment const& frag, Index pointer_offset)
1717
+ {
1718
+ iterator_.store_with_pointer_offset(frag, pointer_offset);
1719
+ }
1720
+
1721
+ /// Store a fragment to memory
1722
+ CUTLASS_DEVICE
1723
+ void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); }
1724
+ };
1725
+
1726
+ ////////////////////////////////////////////////////////////////////////////////
1727
+
1728
+ /// Specialization of PredicatedTileIteratorResidualLast for interleaved-32
1729
+ /// data. It is mapped to the congruous layout.
1730
+ ///
1731
+ /// Satisfies: ForwardTileIteratorConcept |
1732
+ /// ReadableContiguousTileIteratorConcept |
1733
+ /// WriteableContiguousTileIteratorConcept |
1734
+ /// MaskedTileIteratorConcept
1735
+ ///
1736
+ template <typename Shape_,
1737
+ typename Element_,
1738
+ int AdvanceRank,
1739
+ typename ThreadMap_,
1740
+ int AccessSize,
1741
+ int InterleavedK>
1742
+ class PredicatedTileIteratorResidualLast<Shape_,
1743
+ Element_,
1744
+ layout::RowMajorInterleaved<InterleavedK>,
1745
+ AdvanceRank,
1746
+ ThreadMap_,
1747
+ AccessSize,
1748
+ false> {
1749
+ public:
1750
+ static_assert(AdvanceRank == 0 || AdvanceRank == 1,
1751
+ "Specialization for pitch-linear iterator may along advance along the "
1752
+ "contiguous(rank=0) or strided(rank=1) dimension.");
1753
+
1754
+ using Shape = Shape_;
1755
+ using Element = Element_;
1756
+ static int const kInterleavedK = InterleavedK;
1757
+ using Layout = layout::RowMajorInterleaved<kInterleavedK>;
1758
+ static int const kAdvanceRank = AdvanceRank;
1759
+ using ThreadMap = ThreadMap_;
1760
+
1761
+ using Index = typename Layout::Index;
1762
+ using LongIndex = typename Layout::LongIndex;
1763
+
1764
+ using TensorRef = TensorRef<Element, Layout>;
1765
+ using TensorView = TensorView<Element, Layout>;
1766
+ using TensorCoord = typename Layout::TensorCoord;
1767
+
1768
+ using Pointer = Element*;
1769
+ using NonConstPointer = typename platform::remove_const<Element>::type*;
1770
+
1771
+ using UnderlyingIterator = PredicatedTileIteratorResidualLast<
1772
+ layout::PitchLinearShape<Shape::kColumn * kInterleavedK, Shape::kRow / kInterleavedK>,
1773
+ Element,
1774
+ layout::PitchLinear,
1775
+ (kAdvanceRank == 0 ? 1 : 0),
1776
+ ThreadMap,
1777
+ AccessSize>;
1778
+
1779
+ using AccessType = typename UnderlyingIterator::AccessType;
1780
+
1781
+ /// Fragment object to be loaded or stored
1782
+ using Fragment =
1783
+ cutlass::Array<Element, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
1784
+
1785
+ /// Predicate vector stores mask to guard accesses
1786
+ using Mask = typename UnderlyingIterator::Mask;
1787
+
1788
+ /// Parameters object is precomputed state and is host-constructible
1789
+ class Params {
1790
+ private:
1791
+ friend PredicatedTileIteratorResidualLast;
1792
+
1793
+ /// Parameters object
1794
+ typename UnderlyingIterator::Params params_;
1795
+
1796
+ public:
1797
+ CUTLASS_HOST_DEVICE
1798
+ Params() {}
1799
+
1800
+ /// Construct the Params object given a pitch-linear tensor's layout
1801
+ CUTLASS_HOST_DEVICE
1802
+ Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {}
1803
+
1804
+ CUTLASS_HOST_DEVICE
1805
+ Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {}
1806
+ };
1807
+
1808
+ private:
1809
+ //
1810
+ // Data members
1811
+ //
1812
+
1813
+ /// Underlying pitch-linear tile iterator
1814
+ UnderlyingIterator iterator_;
1815
+
1816
+ public:
1817
+ /// Constructs a TileIterator from its precomputed state, threadblock offset,
1818
+ /// and thread ID
1819
+ CUTLASS_HOST_DEVICE
1820
+ PredicatedTileIteratorResidualLast(
1821
+ /// Precomputed parameters object
1822
+ Params const& params,
1823
+ /// Pointer to start of tensor
1824
+ Pointer pointer,
1825
+ /// Extent of tensor
1826
+ TensorCoord extent,
1827
+ /// ID of each participating thread
1828
+ int thread_id,
1829
+ /// Initial offset of threadblock
1830
+ TensorCoord const& threadblock_offset,
1831
+ int const* indices = nullptr ///< gather/scatter indices, note no support for
1832
+ ///< gather/scatter at this specialization
1833
+ )
1834
+ : iterator_(params.params_,
1835
+ pointer,
1836
+ layout::PitchLinearCoord(extent.column() * kInterleavedK,
1837
+ extent.row() / kInterleavedK),
1838
+ thread_id,
1839
+ layout::PitchLinearCoord(threadblock_offset.column() * kInterleavedK,
1840
+ threadblock_offset.row() / kInterleavedK))
1841
+ {
1842
+ }
1843
+
1844
+ /// Construct a PredicatedTileIteratorResidualLast with zero threadblock
1845
+ /// offset
1846
+ CUTLASS_HOST_DEVICE
1847
+ PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object
1848
+ Pointer pointer, ///< Pointer to start of tensor
1849
+ TensorCoord extent, ///< Extent of tensor
1850
+ int thread_id ///< ID of each participating thread
1851
+ )
1852
+ : PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0))
1853
+ {
1854
+ }
1855
+
1856
+ /// Adds a pointer offset in units of Element
1857
+ CUTLASS_HOST_DEVICE
1858
+ void add_pointer_offset(LongIndex pointer_offset)
1859
+ {
1860
+ iterator_.add_pointer_offset(pointer_offset);
1861
+ }
1862
+
1863
+ /// Advances to the next tile in memory.
1864
+ ///
1865
+ /// The first time this method is called, predicates are updated, and the
1866
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
1867
+ /// Subsequent calls are lightweight and must only update the internal
1868
+ /// pointer.
1869
+ CUTLASS_HOST_DEVICE
1870
+ PredicatedTileIteratorResidualLast& operator++()
1871
+ {
1872
+ ++iterator_;
1873
+ return *this;
1874
+ }
1875
+
1876
+ /// Advances to the next tile in memory.
1877
+ ///
1878
+ /// The first time this method is called, predicates are updated, and the
1879
+ /// iterator's internal pointer is reverted to the first "steady state" tile.
1880
+ /// Subsequent calls are lightweight and must only update the internal
1881
+ /// pointer.
1882
+ CUTLASS_HOST_DEVICE
1883
+ PredicatedTileIteratorResidualLast operator++(int)
1884
+ {
1885
+ PredicatedTileIteratorResidualLast self(*this);
1886
+ operator++();
1887
+ return self;
1888
+ }
1889
+
1890
+ /// Clears the predicate set efficiently
1891
+ CUTLASS_HOST_DEVICE
1892
+ void clear_mask(bool enable = true) { iterator_.clear_mask(enable); }
1893
+
1894
+ CUTLASS_HOST_DEVICE
1895
+ void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); }
1896
+
1897
+ /// Clears the predicate set efficiently
1898
+ CUTLASS_HOST_DEVICE
1899
+ void enable_mask() { iterator_.enable_mask(); }
1900
+
1901
+ /// Sets the predicate mask, overriding value stored in predicate iterator
1902
+ CUTLASS_HOST_DEVICE
1903
+ void set_mask(Mask const& mask) { iterator_.set_mask(mask); }
1904
+
1905
+ /// Gets the mask
1906
+ CUTLASS_HOST_DEVICE
1907
+ void get_mask(Mask& mask) { iterator_.get_mask(mask); }
1908
+
1909
+ /// Loads a fragment from memory
1910
+ CUTLASS_DEVICE
1911
+ void load_with_pointer_offset(Fragment& frag, Index pointer_offset)
1912
+ {
1913
+ iterator_.load_with_pointer_offset(frag, pointer_offset);
1914
+ }
1915
+
1916
+ /// Loads a fragment from memory
1917
+ CUTLASS_DEVICE
1918
+ void load(Fragment& frag) { load_with_pointer_offset(frag, 0); }
1919
+
1920
+ /// Store a fragment to memory
1921
+ CUTLASS_DEVICE
1922
+ void store_with_pointer_offset(Fragment const& frag, Index pointer_offset)
1923
+ {
1924
+ iterator_.store_with_pointer_offset(frag, pointer_offset);
1925
+ }
1926
+
1927
+ /// Store a fragment to memory
1928
+ CUTLASS_DEVICE
1929
+ void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); }
1930
+ };
1931
+
1932
+ ////////////////////////////////////////////////////////////////////////////////
1933
+
1934
+ } // namespace threadblock
1935
+ } // namespace transform
1936
+ } // namespace cutlass
1937
+
1938
+ ////////////////////////////////////////////////////////////////////////////////
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/transpose_warp_iterator.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
3
+ * SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice, this
9
+ * list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holdvr nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
22
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
23
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
25
+ * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
26
+ * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
27
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
+ *
30
+ **************************************************************************************************/
31
+
32
+ // Copyright (c) Microsoft Corporation.
33
+ // SPDX-License-Identifier: Apache-2.0
34
+
35
+ // DeepSpeed Team
36
+
37
+ #pragma once
38
+
39
+ #include "warp_iterator_from_smem.h"
40
+
41
+ template <typename WarpIterator>
42
+ struct TransposeWarpIterator {
43
+ using Iterator = char;
44
+ static bool constexpr kSupportsTranspose = false;
45
+ };
46
+
47
+ template <
48
+ /// Operand identity
49
+ cutlass::gemm::Operand Operand,
50
+ /// Data type of A elements
51
+ typename Element,
52
+ bool kTranspose>
53
+ struct TransposeWarpIterator<
54
+ cutlass::gemm::warp::WarpIteratorFromSmem<Operand, Element, kTranspose>> {
55
+ using Iterator = cutlass::gemm::warp::WarpIteratorFromSmem<Operand, Element, !kTranspose>;
56
+ static bool constexpr kSupportsTranspose = true;
57
+ };
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/warp_iterator_from_smem.h ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
3
+ *reserved. SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice,
9
+ *this list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holder nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23
+ *LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24
+ *CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25
+ *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ *INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27
+ *CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28
+ *ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
+ *POSSIBILITY OF SUCH DAMAGE.
30
+ *
31
+ **************************************************************************************************/
32
+
33
+ // Copyright (c) Microsoft Corporation.
34
+ // SPDX-License-Identifier: Apache-2.0
35
+
36
+ // DeepSpeed Team
37
+
38
+ /*! \file
39
+ \brief Inspired from
40
+ "cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h" Loads tiles of GEMM
41
+ operands from a RowMajor shared-memory layout into registers to use by A100
42
+ TensorCores.
43
+
44
+ The difference with "mma_tensor_op_tile_access_iterator.h" is that:
45
+ (1) We use "ldmatrix" to load tiles, rather than manual loads (slightly
46
+ faster) (2) We support to transpose the operand (eg read `A.transpose()` when
47
+ the shared memory holds `A`)
48
+
49
+ This is only implemented for the specific shapes.
50
+ */
51
+ #pragma once
52
+
53
+ #include <cutlass/gemm/gemm.h>
54
+
55
+ ////////////////////////////////////////////////////////////////////////////////
56
+ namespace cutlass {
57
+ namespace gemm {
58
+ namespace warp {
59
+
60
+ template <
61
+ /// Operand identity
62
+ Operand Operand_,
63
+ /// Data type of A elements
64
+ typename Element_,
65
+ bool kTranspose = false>
66
+ class WarpIteratorFromSmem {
67
+ public:
68
+ /// Shape of tile to load (concept: MatrixShape)
69
+ using Shape = cutlass::MatrixShape<32, 32>;
70
+
71
+ /// Operand tag
72
+ static Operand const kOperand = Operand_;
73
+
74
+ /// Basic check
75
+ static_assert(
76
+ kOperand == Operand::kA || kOperand == Operand::kB,
77
+ "WarpIteratorFromSmem may only be instantiated for A or B operands to warp-level Mma.");
78
+
79
+ /// Element type
80
+ using Element = Element_;
81
+ static_assert(sizeof_bits<Element>::value == 16, "Only supported for half");
82
+
83
+ /// Layout of source tile
84
+ using Layout = cutlass::layout::RowMajor;
85
+
86
+ /// Shape of one matrix product operation (concept: MatrixShape)
87
+ using InstructionShape = cutlass::MatrixShape<16, 8>;
88
+
89
+ /// Delta between *MMA operations (in units of *MMA operations, concept:
90
+ /// MatrixShape)
91
+ static int const kOpDelta = 1;
92
+
93
+ /// Number of participating threads
94
+ static int const kThreads = 32;
95
+
96
+ /// TensorRef type for loading element from a tensor
97
+ using TensorRef = TensorRef<Element, Layout>;
98
+
99
+ /// Index type
100
+ using Index = typename TensorRef::Index;
101
+
102
+ /// Long Index type
103
+ using LongIndex = typename TensorRef::LongIndex;
104
+
105
+ /// Coordinate for an element in the tensor
106
+ using TensorCoord = typename TensorRef::TensorCoord;
107
+
108
+ /// Number of elements accessed per Shared Memory load
109
+ static int const kElementsPerAccess =
110
+ (sizeof_bits<Element>::value >= 32 ? 1 : 32 / sizeof_bits<Element>::value);
111
+
112
+ using InstructionCount = MatrixShape<Shape::kRow / InstructionShape::kRow,
113
+ Shape::kColumn / InstructionShape::kColumn>;
114
+
115
+ static int const kIterations = (kOperand == Operand::kA) ? InstructionCount::kColumn
116
+ : InstructionCount::kRow;
117
+
118
+ public:
119
+ //
120
+ // Derived quantities
121
+ //
122
+
123
+ /// Fragment object holding a thread's part of a tile
124
+ using Fragment =
125
+ Array<Element,
126
+ (kOperand == Operand::kA) ? (Shape::kRow* InstructionShape::kColumn / kThreads)
127
+ : (Shape::kColumn* InstructionShape::kRow / kThreads)>;
128
+
129
+ /// Memory access type
130
+ // using AccessType = AlignedArray<Element, kElementsPerAccess>;
131
+ using AccessType = Array<unsigned, 4>;
132
+
133
+ static int constexpr kWarpShapeDivisibleInner =
134
+ (kOperand == Operand::kA ? InstructionShape::kColumn : InstructionShape::kRow);
135
+ static int constexpr kAccessesInner = (kWarpShapeDivisibleInner / kElementsPerAccess) / 4;
136
+ static int const kTilesPerInstruction = InstructionShape::kRow / 8;
137
+
138
+ private:
139
+ /// Underlying tensor reference
140
+ TensorRef ref_;
141
+
142
+ /// Origin
143
+ MatrixCoord origin_;
144
+
145
+ /// Iterations in a tile
146
+ int iterations_;
147
+
148
+ public:
149
+ /// Constructor from TensorRef
150
+ CUTLASS_HOST_DEVICE
151
+ WarpIteratorFromSmem(TensorRef const& ref, int lane_id)
152
+ : WarpIteratorFromSmem(ref, {Shape::kRow, Shape::kColumn}, lane_id)
153
+ {
154
+ }
155
+ CUTLASS_HOST_DEVICE
156
+ WarpIteratorFromSmem(TensorRef const& ref, TensorCoord extent, int lane_id)
157
+ : ref_(ref), iterations_(0)
158
+ {
159
+ int ldsm_vec_num = (lane_id >> 3);
160
+ if (kOperand == Operand::kA) {
161
+ origin_ = MatrixCoord(lane_id % 8, 0);
162
+ static_assert(InstructionCount::kRow * kAccessesInner * kTilesPerInstruction == 4, "");
163
+ CUTLASS_PRAGMA_UNROLL
164
+ for (int inst_m_idx = 0; inst_m_idx < InstructionCount::kRow; ++inst_m_idx) {
165
+ CUTLASS_PRAGMA_UNROLL
166
+ for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) {
167
+ CUTLASS_PRAGMA_UNROLL
168
+ for (int access_m_idx = 0; access_m_idx < kTilesPerInstruction;
169
+ ++access_m_idx) {
170
+ int access_idx =
171
+ access_m_idx +
172
+ kTilesPerInstruction * (inner_idx + kAccessesInner * inst_m_idx);
173
+
174
+ MatrixCoord offset(access_m_idx * 8 + inst_m_idx * InstructionShape::kRow,
175
+ inner_idx * 4 * kElementsPerAccess);
176
+
177
+ if (access_idx == ldsm_vec_num) {
178
+ if (kTranspose) { offset = MatrixCoord(offset.column(), offset.row()); }
179
+ origin_ += offset;
180
+ }
181
+ }
182
+ }
183
+ }
184
+ } else {
185
+ origin_ = MatrixCoord(0, lane_id % 8);
186
+ static_assert(InstructionCount::kColumn * kAccessesInner == 4, "");
187
+ CUTLASS_PRAGMA_UNROLL
188
+ for (int inst_n_idx = 0; inst_n_idx < InstructionCount::kColumn; ++inst_n_idx) {
189
+ CUTLASS_PRAGMA_UNROLL
190
+ for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) {
191
+ int access_idx = inner_idx + kAccessesInner * inst_n_idx;
192
+
193
+ MatrixCoord offset(inner_idx * 4 * kElementsPerAccess, inst_n_idx * 8);
194
+
195
+ if (access_idx == ldsm_vec_num) {
196
+ if (kTranspose) { offset = MatrixCoord(offset.column(), offset.row()); }
197
+ origin_ += offset;
198
+ }
199
+ }
200
+ }
201
+ }
202
+
203
+ ref_.add_coord_offset(origin_);
204
+ }
205
+
206
+ /// Advances an iterator along logical dimensions of matrix in units of whole
207
+ /// tiles
208
+ CUTLASS_HOST_DEVICE
209
+ WarpIteratorFromSmem& add_tile_offset(TensorCoord const& tile_offset)
210
+ {
211
+ TensorCoord coord_offset(tile_offset.row() * Shape::kRow,
212
+ tile_offset.column() * Shape::kColumn);
213
+ if (kTranspose) { coord_offset = TensorCoord{coord_offset.column(), coord_offset.row()}; }
214
+ origin_ += coord_offset;
215
+
216
+ ref_.add_coord_offset(coord_offset);
217
+
218
+ return *this;
219
+ }
220
+
221
+ /// Advances the iterator along the advance dimension
222
+ CUTLASS_DEVICE
223
+ void advance()
224
+ {
225
+ if (kOperand == Operand::kA) {
226
+ add_tile_offset({0, 1});
227
+ } else {
228
+ add_tile_offset({1, 0});
229
+ }
230
+
231
+ iterations_ = 0;
232
+ }
233
+
234
+ /// increase iterations in a tile
235
+ CUTLASS_HOST_DEVICE
236
+ WarpIteratorFromSmem& operator++()
237
+ {
238
+ iterations_++;
239
+
240
+ if (iterations_ >= kIterations) advance();
241
+
242
+ return *this;
243
+ }
244
+
245
+ /// Loads a fragment from memory at the location pointed to by the iterator.
246
+ CUTLASS_DEVICE
247
+ void load(Fragment& frag) const
248
+ {
249
+ AccessType* access_ptr = reinterpret_cast<AccessType*>(&frag);
250
+ using LoadLayout =
251
+ typename platform::conditional<kTranspose, layout::ColumnMajor, layout::RowMajor>::type;
252
+
253
+ MatrixCoord offset;
254
+ if (kOperand == Operand::kA) {
255
+ offset = MatrixCoord(0, iterations_ * InstructionShape::kColumn);
256
+ } else {
257
+ offset = MatrixCoord(iterations_ * InstructionShape::kRow, 0);
258
+ }
259
+ if (kTranspose) { offset = MatrixCoord(offset.column(), offset.row()); }
260
+ cutlass::arch::ldsm<LoadLayout, 4>(access_ptr[0], ref_.data() + ref_.offset(offset));
261
+ }
262
+ };
263
+
264
+ ////////////////////////////////////////////////////////////////////////////////
265
+
266
+ } // namespace warp
267
+ } // namespace gemm
268
+ } // namespace cutlass
269
+ ////////////////////////////////////////////////////////////////////////////////
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/transform/bias_broadcast.h ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ // This does nothing.
9
+ template <typename ThreadMap, typename Shape, typename scalar_t>
10
+ struct BroadcastNoLoad {
11
+ using Fragment =
12
+ cutlass::Array<scalar_t, ThreadMap::Iterations::kCount * ThreadMap::kElementsPerAccess>;
13
+ static const bool kEnable = false;
14
+ CUTLASS_DEVICE static void load(Fragment& frag,
15
+ scalar_t* ptr,
16
+ int thread_id,
17
+ const cutlass::MatrixCoord& extent,
18
+ int stride)
19
+ {
20
+ }
21
+ CUTLASS_DEVICE static scalar_t*
22
+ advance(scalar_t* ptr, int B_id, int N_id, int H_id, int strideB, int strideN, int strideH)
23
+ {
24
+ return ptr;
25
+ }
26
+ };
27
+
28
+ // This is to load the bias matrix from the global memory with on-the-fly
29
+ // broadcast. The shape in global memory is [B, N, 1, 1, L]. Each time we load
30
+ // the last dimension as a L row vector, and we further broadcast the L vector
31
+ // to a tile of size [L, L] by repeating the L vector L times
32
+ template <typename ThreadMap, typename Shape, typename scalar_t>
33
+ struct BroadcastA : public BroadcastNoLoad<ThreadMap, Shape, scalar_t> {
34
+ using Base = BroadcastNoLoad<ThreadMap, Shape, scalar_t>;
35
+ static const bool kEnable = true;
36
+ using layout = cutlass::layout::AffineRank2RowMajor;
37
+
38
+ using GmemTileIterator = cutlass::transform::threadblock::
39
+ PredicatedTileIterator<Shape, scalar_t, layout, 0, ThreadMap>;
40
+ using Fragment = typename GmemTileIterator::Fragment;
41
+
42
+ CUTLASS_DEVICE static void load(Fragment& frag,
43
+ scalar_t* ptr,
44
+ int thread_id,
45
+ const cutlass::MatrixCoord& extent,
46
+ int stride)
47
+ {
48
+ GmemTileIterator iter({layout(0, 1)}, ptr, extent, thread_id);
49
+ iter.load(frag);
50
+ }
51
+
52
+ CUTLASS_DEVICE static scalar_t*
53
+ advance(scalar_t* ptr, int B_id, int N_id, int H_id, int strideB, int strideN, int strideH)
54
+ {
55
+ return ptr + B_id * strideB + N_id * strideN;
56
+ }
57
+ };
58
+
59
+ // This is to load the bias matrix from the global memory with on-the-fly
60
+ // broadcast. The shape in global memory is [B, 1, H, L, L]. Each time we load
61
+ // a [L, L] matrix. Different N use the same bias matrix when B and H are the
62
+ // same.
63
+ template <typename ThreadMap, typename Shape, typename scalar_t>
64
+ struct BroadcastB : public BroadcastNoLoad<ThreadMap, Shape, scalar_t> {
65
+ using Base = BroadcastNoLoad<ThreadMap, Shape, scalar_t>;
66
+ static const bool kEnable = true;
67
+ using layout = cutlass::layout::RowMajor;
68
+
69
+ using GmemTileIterator = cutlass::transform::threadblock::
70
+ PredicatedTileIterator<Shape, scalar_t, layout, 0, ThreadMap>;
71
+ using Fragment = typename GmemTileIterator::Fragment;
72
+
73
+ CUTLASS_DEVICE static void load(Fragment& frag,
74
+ scalar_t* ptr,
75
+ int thread_id,
76
+ const cutlass::MatrixCoord& extent,
77
+ int stride)
78
+ {
79
+ GmemTileIterator iter({layout(stride)}, ptr, extent, thread_id);
80
+ iter.load(frag);
81
+ }
82
+
83
+ CUTLASS_DEVICE static scalar_t*
84
+ advance(scalar_t* ptr, int B_id, int N_id, int H_id, int strideB, int strideN, int strideH)
85
+ {
86
+ return ptr + B_id * strideB + H_id * strideH;
87
+ }
88
+ };
89
+
90
+ template <typename Shape,
91
+ typename scalar_t,
92
+ int kThreads,
93
+ template <typename, typename, typename>
94
+ class Broadcast1_,
95
+ template <typename, typename, typename>
96
+ class Broadcast2_>
97
+ struct AttentionBiasEpilogue {
98
+ using ThreadMap = cutlass::transform::PitchLinearStripminedThreadMap<
99
+ cutlass::layout::PitchLinearShape<Shape::kColumn, Shape::kRow>,
100
+ kThreads,
101
+ 1>;
102
+
103
+ using Broadcast1 = Broadcast1_<ThreadMap, Shape, scalar_t>;
104
+ using Broadcast2 = Broadcast2_<ThreadMap, Shape, scalar_t>;
105
+
106
+ Broadcast1 broadcast1;
107
+ Broadcast2 broadcast2;
108
+
109
+ using Ref = cutlass::TensorRef<float, cutlass::layout::RowMajor>;
110
+ using SmemTileIterator = cutlass::transform::threadblock::
111
+ RegularTileIterator<Shape, float, cutlass::layout::RowMajor, 0, ThreadMap>;
112
+
113
+ CUTLASS_DEVICE void operator()(const Ref& ref,
114
+ scalar_t* ptr1,
115
+ scalar_t* ptr2,
116
+ int thread_id,
117
+ const cutlass::MatrixCoord& extent,
118
+ int stride)
119
+ {
120
+ static_assert(Broadcast1::Fragment::kElements == Broadcast2::Fragment::kElements,
121
+ "The two broadcast fragments must have the same number of "
122
+ "elements");
123
+ typename SmemTileIterator::Fragment frag;
124
+ frag.clear();
125
+ float* frag_ptr = reinterpret_cast<float*>(&frag);
126
+ if (Broadcast1::kEnable) {
127
+ typename Broadcast1::Fragment frag1;
128
+ frag1.clear();
129
+ broadcast1.load(frag1, ptr1, thread_id, extent, stride);
130
+ scalar_t* frag1_ptr = reinterpret_cast<scalar_t*>(&frag1);
131
+ for (int i = 0; i < Broadcast1::Fragment::kElements; ++i) {
132
+ frag_ptr[i] += static_cast<float>(frag1_ptr[i]);
133
+ }
134
+ }
135
+ if (Broadcast2::kEnable) {
136
+ typename Broadcast2::Fragment frag2;
137
+ frag2.clear();
138
+ broadcast2.load(frag2, ptr2, thread_id, extent, stride);
139
+ scalar_t* frag2_ptr = reinterpret_cast<scalar_t*>(&frag2);
140
+ for (int i = 0; i < Broadcast2::Fragment::kElements; ++i) {
141
+ frag_ptr[i] += static_cast<float>(frag2_ptr[i]);
142
+ }
143
+ }
144
+ SmemTileIterator iter(ref, thread_id);
145
+ iter.store(frag);
146
+ __syncthreads();
147
+ }
148
+ };
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/transform/tile_smem_loader.h ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /***************************************************************************************************
2
+ * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights
3
+ *reserved. SPDX-License-Identifier: BSD-3-Clause
4
+ *
5
+ * Redistribution and use in source and binary forms, with or without
6
+ * modification, are permitted provided that the following conditions are met:
7
+ *
8
+ * 1. Redistributions of source code must retain the above copyright notice,
9
+ *this list of conditions and the following disclaimer.
10
+ *
11
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
12
+ * this list of conditions and the following disclaimer in the documentation
13
+ * and/or other materials provided with the distribution.
14
+ *
15
+ * 3. Neither the name of the copyright holdvr nor the names of its
16
+ * contributors may be used to endorse or promote products derived from
17
+ * this software without specific prior written permission.
18
+ *
19
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22
+ *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23
+ *LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24
+ *CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25
+ *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26
+ *INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27
+ *CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28
+ *ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29
+ *POSSIBILITY OF SUCH DAMAGE.
30
+ *
31
+ **************************************************************************************************/
32
+
33
+ // Copyright (c) Microsoft Corporation.
34
+ // SPDX-License-Identifier: Apache-2.0
35
+
36
+ // DeepSpeed Team
37
+
38
+ #pragma once
39
+ #include <cutlass/cutlass.h>
40
+ #include "cutlass/aligned_buffer.h"
41
+ #include "cutlass/array.h"
42
+ #include "cutlass/coord.h"
43
+ #include "cutlass/layout/matrix.h"
44
+ #include "cutlass/layout/pitch_linear.h"
45
+ #include "cutlass/numeric_types.h"
46
+ #include "cutlass/platform/platform.h"
47
+ #include "cutlass/transform/pitch_linear_thread_map.h"
48
+ #include "cutlass/transform/threadblock/predicated_tile_iterator.h"
49
+ #include "cutlass/transform/threadblock/regular_tile_iterator.h"
50
+
51
+ template <typename scalar_t, // scalar type
52
+ typename ThreadblockTileShape, // size of tile to load
53
+ int Threads, // number of participating threads
54
+ int ElementsPerAccess> // thread access width in elements
55
+ class TileSmemLoader {
56
+ public:
57
+ using Shape = ThreadblockTileShape;
58
+ using SmemTile = cutlass::AlignedBuffer<scalar_t, ThreadblockTileShape::kCount>;
59
+
60
+ using ThreadMap = cutlass::transform::PitchLinearStripminedThreadMap<
61
+ cutlass::layout::PitchLinearShape<ThreadblockTileShape::kColumn, // contiguous
62
+ ThreadblockTileShape::kRow>, // strided
63
+ Threads, // Threads
64
+ ElementsPerAccess>; // ElementsPerAccess
65
+
66
+ using GmemTileIterator = cutlass::transform::threadblock::PredicatedTileIterator<
67
+ ThreadblockTileShape, // Shape
68
+ scalar_t, // Element
69
+ cutlass::layout::RowMajor, // Layout
70
+ 0, // AdvanceRank
71
+ ThreadMap>; // ThreadMap
72
+
73
+ using SmemTileIterator =
74
+ cutlass::transform::threadblock::RegularTileIterator<ThreadblockTileShape, // Shape
75
+ scalar_t, // Element
76
+ cutlass::layout::RowMajor, // Layout
77
+ 0, // AdvanceRank
78
+ ThreadMap>; // ThreadMap
79
+
80
+ using Fragment = typename GmemTileIterator::Fragment;
81
+
82
+ /// load a tile from global memory into shared memory
83
+ CUTLASS_DEVICE
84
+ static void load(GmemTileIterator tile_load_iter, SmemTileIterator tile_store_iter)
85
+ {
86
+ Fragment tb_frag;
87
+ tb_frag.clear();
88
+ tile_load_iter.load(tb_frag);
89
+ tile_store_iter.store(tb_frag);
90
+
91
+ __syncthreads();
92
+ }
93
+ };