diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adagrad/cpu_adagrad.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adagrad/cpu_adagrad.cpp new file mode 100644 index 0000000000000000000000000000000000000000..563255176500ace077fc90e9bf98b6a7ec29056a --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adagrad/cpu_adagrad.cpp @@ -0,0 +1,256 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "cpu_adagrad.h" +#include +#include +#include +#include +#include +#if defined(__ENABLE_CUDA__) +#include +#include "cublas_v2.h" +#include "cuda.h" +#include "curand.h" +#include "custom_cuda_layers.h" +#endif + +static std::unordered_map> s_optimizers; + +// C++ interface + +void Adagrad_Optimizer::Step_1(float* _params, + float* grads, + float* _exp_avg_sq, + size_t _param_size, + ds_half_precision_t* dev_params, + bool half_precision) +{ + size_t rounded_size = 0; +#if defined(__AVX512__) or defined(__AVX256__) + Step_AVX<1>( + &rounded_size, _params, grads, _exp_avg_sq, _param_size, dev_params, half_precision); +#endif + if (_param_size > rounded_size) { + float step_size = -1 * _alpha; + ds_half_precision_t* grads_cast_h; + ds_half_precision_t* params_cast_h; + if (half_precision) { + grads_cast_h = reinterpret_cast(grads); + params_cast_h = reinterpret_cast(_params); + } + for (size_t t = rounded_size; t < _param_size; t += TILE) { + size_t copy_size = TILE; + if ((t + TILE) > _param_size) copy_size = _param_size - t; + size_t offset = copy_size + t; +#if defined(__ENABLE_CUDA__) + if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); } +#elif defined(__ENABLE_CANN__) + if ((t / TILE) >= 2) { aclrtSynchronizeStream(_streams[_buf_index].stream()); } +#endif +#pragma omp parallel for + for (size_t k = t; k < offset; k++) { + float grad = half_precision ? (float)grads_cast_h[k] : grads[k]; + float param = half_precision ? (float)params_cast_h[k] : _params[k]; + float momentum = grads[k]; + float variance = _exp_avg_sq[k]; + if (_weight_decay > 0) { grad = param * _weight_decay + grad; } + + variance += grad * grad; + + grad = sqrt(variance); + grad += _eps; + grad = momentum / grad; + param = grad * step_size + param; +#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) + if (dev_params) _doubled_buffer[_buf_index][k - t] = param; +#endif + if (half_precision) + params_cast_h[k] = (ds_half_precision_t)param; + else + _params[k] = param; + // STORE UPDATE TERM TO GRAD'S MEMORY + grads[k] = grad * step_size; + _exp_avg_sq[k] = variance; + } +#if defined(__ENABLE_CUDA__) + if (dev_params) { + launch_param_update( + _doubled_buffer[_buf_index], dev_params + t, (copy_size), _streams[_buf_index]); + _buf_index = !_buf_index; + } +#elif defined(__ENABLE_CANN__) + if (dev_params) { + size_t memcpy_size = copy_size * sizeof(_doubled_buffer[_buf_index][0]); + aclrtMemcpy(dev_params + t, + memcpy_size, + _doubled_buffer[_buf_index], + memcpy_size, + aclrtMemcpyKind::ACL_MEMCPY_HOST_TO_DEVICE); + + _buf_index = !_buf_index; + } +#endif + } + } +} + +void Adagrad_Optimizer::Step_4(float* _params, + float* grads, + float* _exp_avg_sq, + size_t _param_size, + ds_half_precision_t* dev_params, + bool half_precision) +{ + size_t rounded_size = 0; +#if defined(__AVX512__) or defined(__AVX256__) + Step_AVX<4>( + &rounded_size, _params, grads, _exp_avg_sq, _param_size, dev_params, half_precision); +#endif + if (_param_size > rounded_size) + Step_1((_params + rounded_size), + (grads + rounded_size), + (_exp_avg_sq + rounded_size), + (_param_size - rounded_size), + (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), + half_precision); +} + +int create_adagrad_optimizer(int optimizer_id, + float alpha = 1e-2, + float eps = 1e-8, + float weight_decay = 0, + bool should_log = false) +{ + auto opt = std::make_shared(alpha, eps, weight_decay); + + s_optimizers[optimizer_id] = opt; + + if (should_log) { + std::string avx_type = ""; +#if defined(__AVX512__) + avx_type = "AVX512"; +#else +#if defined(__AVX256__) + avx_type = "AVX2"; +#else + avx_type = "scalar"; +#endif +#endif + + printf("Adagrad Optimizer #%d is created with %s arithmetic capability.\n", + optimizer_id, + avx_type.c_str()); + printf("Config: alpha=%f, weight_decay=%f\n", alpha, weight_decay); + } + + return 0; +} + +void Adagrad_Optimizer::Step_8(float* _params, + float* grads, + float* _exp_avg_sq, + size_t _param_size, + ds_half_precision_t* dev_params, + bool half_precision) +{ + size_t rounded_size = 0; +#if defined(__AVX512__) or defined(__AVX256__) + Step_AVX<8>( + &rounded_size, _params, grads, _exp_avg_sq, _param_size, dev_params, half_precision); +#endif + if (_param_size > rounded_size) + Step_4((_params + rounded_size), + (grads + rounded_size), + (_exp_avg_sq + rounded_size), + (_param_size - rounded_size), + (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), + half_precision); +} + +int ds_adagrad_step(int optimizer_id, + size_t step, + float lr, + float epsilon, + float weight_decay, + torch::Tensor& params, + torch::Tensor& grads, + torch::Tensor& exp_avg_sq) +{ + auto params_c = params.contiguous(); + auto grads_c = grads.contiguous(); + auto exp_avg_sq_c = exp_avg_sq.contiguous(); + + float* params_ptr = (float*)params_c.data_ptr(); + float* grads_ptr = (float*)grads_c.data_ptr(); + float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr(); + + std::shared_ptr opt = + std::static_pointer_cast(s_optimizers[optimizer_id]); + opt->IncrementStep(step); + opt->update_state(lr, epsilon, weight_decay); + opt->Step_8(params_ptr, grads_ptr, exp_avg_sq_ptr, params_c.numel()); + +#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) + opt->SynchronizeStreams(); +#endif + return 0; +} + +int ds_adagrad_step_plus_copy(int optimizer_id, + size_t step, + float lr, + float epsilon, + float weight_decay, + torch::Tensor& params, + torch::Tensor& grads, + torch::Tensor& exp_avg_sq, + torch::Tensor& gpu_params) +{ +#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) + auto params_c = params.contiguous(); + auto gpu_params_c = gpu_params.contiguous(); + auto exp_avg_sq_c = exp_avg_sq.contiguous(); + auto grads_c = grads.contiguous(); + + float* params_ptr = (float*)params_c.data_ptr(); + float* grads_ptr = (float*)grads_c.data_ptr(); + ds_half_precision_t* gpu_params_ptr = (ds_half_precision_t*)gpu_params_c.data_ptr(); + float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr(); + + std::shared_ptr opt = + std::static_pointer_cast(s_optimizers[optimizer_id]); + opt->IncrementStep(step); + opt->update_state(lr, epsilon, weight_decay); + opt->Step_8(params_ptr, + grads_ptr, + exp_avg_sq_ptr, + params_c.numel(), + gpu_params_ptr, + (params.options().dtype() == at::kHalf)); + + opt->SynchronizeStreams(); +#else + assert(false); +#endif + return 0; +} + +int destroy_adagrad_optimizer(int optimizer_id) +{ + s_optimizers.erase(optimizer_id); + + return 0; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("adagrad_update", &ds_adagrad_step, "DeepSpeed CPU Adagrad update (C++)"); + m.def("adagrad_update_copy", + &ds_adagrad_step_plus_copy, + "DeepSpeed CPU Adagrad update and param copy (C++)"); + m.def("create_adagrad", &create_adagrad_optimizer, "DeepSpeed CPU Adagrad (C++)"); + m.def("destroy_adagrad", &destroy_adagrad_optimizer, "DeepSpeed CPU Adagrad destroy (C++)"); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/cpu_adam.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/cpu_adam.cpp new file mode 100644 index 0000000000000000000000000000000000000000..96809827f3e13fc02e0755906d694c7e721e0384 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/cpu_adam.cpp @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "cpu_adam.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("adam_update", &ds_adam_step, "DeepSpeed CPU Adam update (C++)"); + m.def("adam_update_copy", + &ds_adam_step_plus_copy, + "DeepSpeed CPU Adam update and param copy (C++)"); + m.def("create_adam", &create_adam_optimizer, "DeepSpeed CPU Adam (C++)"); + m.def("destroy_adam", &destroy_adam_optimizer, "DeepSpeed CPU Adam destroy (C++)"); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/cpu_adam_impl.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/cpu_adam_impl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9a4a8d9565198a1e05a13d43afcc07dc4313eca2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/cpu_adam_impl.cpp @@ -0,0 +1,312 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include +#include +#include +#include +#include +#include "cpu_adam.h" + +#if defined(__ENABLE_CUDA__) +#include +#include "cublas_v2.h" +#include "cuda.h" +#include "curand.h" +#include "custom_cuda_layers.h" +#endif + +static std::unordered_map> s_optimizers; + +// C++ interface + +void Adam_Optimizer::Step_1(float* _params, + float* grads, + float* _exp_avg, + float* _exp_avg_sq, + size_t _param_size, + ds_half_precision_t* dev_params, + bool half_precision) +{ + size_t rounded_size = 0; +#if defined(__AVX512__) or defined(__AVX256__) + Step_AVX<1>(&rounded_size, + _params, + grads, + _exp_avg, + _exp_avg_sq, + _param_size, + dev_params, + half_precision); +#endif + if (_param_size > rounded_size) { + float betta1_minus1 = 1 - _betta1; + float betta2_minus1 = 1 - _betta2; + + float step_size = -1 * _alpha / _bias_correction1; + float w_decay = -1 * _alpha * _weight_decay; + ds_half_precision_t* grads_cast_h; + ds_half_precision_t* params_cast_h; + if (half_precision) { + grads_cast_h = reinterpret_cast(grads); + params_cast_h = reinterpret_cast(_params); + } + + for (size_t t = rounded_size; t < _param_size; t += TILE) { + size_t copy_size = TILE; + if ((t + TILE) > _param_size) copy_size = _param_size - t; + size_t offset = copy_size + t; +#if defined(__ENABLE_CUDA__) + if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); } +#elif defined(__ENABLE_CANN__) + if ((t / TILE) >= 2) { aclrtSynchronizeStream(_streams[_buf_index].stream()); } +#endif +#pragma omp parallel for + for (size_t k = t; k < offset; k++) { + float grad = half_precision ? (float)grads_cast_h[k] : grads[k]; + float param = half_precision ? (float)params_cast_h[k] : _params[k]; + float momentum = _exp_avg[k]; + float variance = _exp_avg_sq[k]; + if (_weight_decay > 0 && !_adamw_mode) { grad = param * _weight_decay + grad; } + momentum = momentum * _betta1; + momentum = grad * betta1_minus1 + momentum; + + variance = variance * _betta2; + grad = grad * grad; + variance = grad * betta2_minus1 + variance; + + grad = sqrt(variance); + grad = grad * _bias_correction2 + _eps; + grad = momentum / grad; + if (_weight_decay > 0 && _adamw_mode) { param += w_decay * param; } + param = grad * step_size + param; +#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) + if (dev_params) _doubled_buffer[_buf_index][k - t] = param; +#endif + if (half_precision) + params_cast_h[k] = (ds_half_precision_t)param; + else + _params[k] = param; + _exp_avg[k] = momentum; + _exp_avg_sq[k] = variance; + } +#if defined(__ENABLE_CUDA__) + if (dev_params) { + launch_param_update( + _doubled_buffer[_buf_index], dev_params + t, (copy_size), _streams[_buf_index]); + + _buf_index = !_buf_index; + } +#elif defined(__ENABLE_CANN__) + if (dev_params) { + size_t memcpy_size = copy_size * sizeof(_doubled_buffer[_buf_index][0]); + aclrtMemcpy(dev_params + t, + memcpy_size, + _doubled_buffer[_buf_index], + memcpy_size, + aclrtMemcpyKind::ACL_MEMCPY_HOST_TO_DEVICE); + + _buf_index = !_buf_index; + } +#endif + } + } +} + +void Adam_Optimizer::Step_4(float* _params, + float* grads, + float* _exp_avg, + float* _exp_avg_sq, + size_t _param_size, + ds_half_precision_t* dev_params, + bool half_precision) +{ + size_t rounded_size = 0; +#if defined(__AVX512__) or defined(__AVX256__) + Step_AVX<4>(&rounded_size, + _params, + grads, + _exp_avg, + _exp_avg_sq, + _param_size, + dev_params, + half_precision); +#endif + if (_param_size > rounded_size) + Step_1((_params + rounded_size), + (grads + rounded_size), + (_exp_avg + rounded_size), + (_exp_avg_sq + rounded_size), + (_param_size - rounded_size), + (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), + half_precision); +} + +int create_adam_optimizer(int optimizer_id, + float alpha, + float betta1, + float betta2, + float eps, + float weight_decay, + bool adamw_mode, + bool should_log) +{ + auto opt = + std::make_shared(alpha, betta1, betta2, eps, weight_decay, adamw_mode); + + s_optimizers[optimizer_id] = opt; + + if (should_log) { + std::string avx_type = ""; +#if defined(__AVX512__) + avx_type = "AVX512"; +#else +#if defined(__AVX256__) + avx_type = "AVX2"; +#else + avx_type = "scalar"; +#endif +#endif + + printf("Adam Optimizer #%d is created with %s arithmetic capability.\n", + optimizer_id, + avx_type.c_str()); + printf("Config: alpha=%f, betas=(%f, %f), weight_decay=%f, adam_w=%d\n", + alpha, + betta1, + betta2, + weight_decay, + (int)adamw_mode); + } + + return 0; +} + +void Adam_Optimizer::Step_8(float* _params, + float* grads, + float* _exp_avg, + float* _exp_avg_sq, + size_t _param_size, + ds_half_precision_t* dev_params, + bool half_precision) +{ + size_t rounded_size = 0; +#if defined(__AVX512__) or defined(__AVX256__) + Step_AVX<8>(&rounded_size, + _params, + grads, + _exp_avg, + _exp_avg_sq, + _param_size, + dev_params, + half_precision); +#endif + if (_param_size > rounded_size) + Step_4((_params + rounded_size), + (grads + rounded_size), + (_exp_avg + rounded_size), + (_exp_avg_sq + rounded_size), + (_param_size - rounded_size), + (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), + half_precision); +} + +int ds_adam_step(int optimizer_id, + size_t step, + float lr, + float beta1, + float beta2, + float epsilon, + float weight_decay, + bool bias_correction, + torch::Tensor& params, + torch::Tensor& grads, + torch::Tensor& exp_avg, + torch::Tensor& exp_avg_sq) +{ + auto params_c = params.contiguous(); + auto grads_c = grads.contiguous(); + auto exp_avg_c = exp_avg.contiguous(); + auto exp_avg_sq_c = exp_avg_sq.contiguous(); + + // assert(params.options().dtype() == grads.options().dtype()); + + float* params_ptr = (float*)params_c.data_ptr(); + float* grads_ptr = (float*)grads_c.data_ptr(); + float* exp_avg_ptr = (float*)exp_avg_c.data_ptr(); + float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr(); + + std::shared_ptr opt = + std::static_pointer_cast(s_optimizers[optimizer_id]); + opt->IncrementStep(step, beta1, beta2); + opt->update_state(lr, epsilon, weight_decay, bias_correction); + + opt->Step_8(params_ptr, + grads_ptr, + exp_avg_ptr, + exp_avg_sq_ptr, + params_c.numel(), + nullptr, + (params.options().dtype() == at::kHalf)); + +#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) + opt->SynchronizeStreams(); +#endif + return 0; +} + +int ds_adam_step_plus_copy(int optimizer_id, + size_t step, + float lr, + float beta1, + float beta2, + float epsilon, + float weight_decay, + bool bias_correction, + torch::Tensor& params, + torch::Tensor& grads, + torch::Tensor& exp_avg, + torch::Tensor& exp_avg_sq, + torch::Tensor& device_params) +{ +#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) + auto params_c = params.contiguous(); + auto device_params_c = device_params.contiguous(); + auto exp_avg_c = exp_avg.contiguous(); + auto exp_avg_sq_c = exp_avg_sq.contiguous(); + auto grads_c = grads.contiguous(); + + float* params_ptr = (float*)params_c.data_ptr(); + float* grads_ptr = (float*)grads_c.data_ptr(); + ds_half_precision_t* device_params_ptr = (ds_half_precision_t*)device_params_c.data_ptr(); + float* exp_avg_ptr = (float*)exp_avg_c.data_ptr(); + float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr(); + + std::shared_ptr opt = + std::static_pointer_cast(s_optimizers[optimizer_id]); + opt->IncrementStep(step, beta1, beta2); + opt->update_state(lr, epsilon, weight_decay, bias_correction); + opt->Step_8(params_ptr, + grads_ptr, + exp_avg_ptr, + exp_avg_sq_ptr, + params_c.numel(), + device_params_ptr, + (params.options().dtype() == at::kHalf)); + + opt->SynchronizeStreams(); +#else + assert(false); +#endif + return 0; +} + +int destroy_adam_optimizer(int optimizer_id) +{ + s_optimizers.erase(optimizer_id); + + return 0; +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/fused_adam_frontend.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/fused_adam_frontend.cpp new file mode 100644 index 0000000000000000000000000000000000000000..13b390248608b046dab443f85346b5446a47d722 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/fused_adam_frontend.cpp @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include + +void multi_tensor_adam_cuda(int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, + const float lr, + const float beta1, + const float beta2, + const float epsilon, + const int step, + const int mode, + const int bias_correction, + const float weight_decay); + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("multi_tensor_adam", + &multi_tensor_adam_cuda, + "Compute and apply gradient update to parameters for Adam optimizer"); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/multi_tensor_adam.cu b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/multi_tensor_adam.cu new file mode 100644 index 0000000000000000000000000000000000000000..a1fc7d15aec9ad16ddb242fd62c99cbfdca55d0c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/multi_tensor_adam.cu @@ -0,0 +1,203 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Copyright NVIDIA/apex +This file is adapted from fused adam in NVIDIA/apex, commit a109f85 +*/ + +#include +#include +#include +#include +// Another possibility: +// #include + +#include + +#include "multi_tensor_apply.cuh" +#include "type_shim.h" + +#define BLOCK_SIZE 512 +#define ILP 4 + +typedef enum : int { + ADAM_MODE_0 = 0, // L2 regularization mode + ADAM_MODE_1 = 1 // Decoupled weight decay mode(AdamW) +} adamMode_t; + +using MATH_T = float; + +template +struct AdamFunctor { + __device__ __forceinline__ void operator()(int chunk_size, + volatile int* noop_gmem, + TensorListMetadata<4>& tl, + const float beta1, + const float beta2, + const float beta1_correction, + const float beta2_correction, + const float epsilon, + const float lr, + adamMode_t mode, + const float decay) + { + // I'd like this kernel to propagate infs/nans. + // if(*noop_gmem == 1) + // return; + + index_t tensor_loc = tl.block_to_tensor[blockIdx.x]; + + // potentially use to pass in list of scalar + // int tensor_num = tl.start_tensor_this_launch + tensor_loc; + + index_t chunk_idx = tl.block_to_chunk[blockIdx.x]; + index_t n = tl.sizes[tensor_loc]; + + T* g = (T*)tl.addresses[0][tensor_loc]; + g += chunk_idx * chunk_size; + + T* p = (T*)tl.addresses[1][tensor_loc]; + p += chunk_idx * chunk_size; + + T* m = (T*)tl.addresses[2][tensor_loc]; + m += chunk_idx * chunk_size; + + T* v = (T*)tl.addresses[3][tensor_loc]; + v += chunk_idx * chunk_size; + + n -= chunk_idx * chunk_size; + + // see note in multi_tensor_scale_kernel.cu + for (index_t i_start = 0; i_start < n && i_start < chunk_size; + i_start += blockDim.x * ILP) { + MATH_T r_g[ILP]; + MATH_T r_p[ILP]; + MATH_T r_m[ILP]; + MATH_T r_v[ILP]; +#pragma unroll + for (int ii = 0; ii < ILP; ii++) { + int i = i_start + threadIdx.x + ii * blockDim.x; + if (i < n && i < chunk_size) { + r_g[ii] = g[i]; + r_p[ii] = p[i]; + r_m[ii] = m[i]; + r_v[ii] = v[i]; + } else { + r_g[ii] = MATH_T(0); + r_p[ii] = MATH_T(0); + r_m[ii] = MATH_T(0); + r_v[ii] = MATH_T(0); + } + } +#pragma unroll + for (int ii = 0; ii < ILP; ii++) { + if (mode == ADAM_MODE_0) { // L2 + r_g[ii] = r_g[ii] + (decay * r_p[ii]); + r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii]; + r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii]; + MATH_T next_m_unbiased = r_m[ii] / beta1_correction; + MATH_T next_v_unbiased = r_v[ii] / beta2_correction; + MATH_T denom = sqrtf(next_v_unbiased) + epsilon; + MATH_T update = next_m_unbiased / denom; + r_p[ii] = r_p[ii] - (lr * update); + } else { // weight decay + r_m[ii] = beta1 * r_m[ii] + (1 - beta1) * r_g[ii]; + r_v[ii] = beta2 * r_v[ii] + (1 - beta2) * r_g[ii] * r_g[ii]; + MATH_T next_m_unbiased = r_m[ii] / beta1_correction; + MATH_T next_v_unbiased = r_v[ii] / beta2_correction; + MATH_T denom = sqrtf(next_v_unbiased) + epsilon; + MATH_T update = (next_m_unbiased / denom) + (decay * r_p[ii]); + r_p[ii] = r_p[ii] - (lr * update); + } + } +#pragma unroll + for (int ii = 0; ii < ILP; ii++) { + int i = i_start + threadIdx.x + ii * blockDim.x; + if (i < n && i < chunk_size) { + p[i] = r_p[ii]; + m[i] = r_m[ii]; + v[i] = r_v[ii]; + } + } + } + } +}; + +void multi_tensor_adam_cuda(int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, + const float lr, + const float beta1, + const float beta2, + const float epsilon, + const int step, + const int mode, + const int bias_correction, + const float weight_decay) +{ + using namespace at; + + // Handle bias correction mode + float bias_correction1 = 1.0f, bias_correction2 = 1.0f; + if (bias_correction == 1) { + bias_correction1 = 1 - std::pow(beta1, step); + bias_correction2 = 1 - std::pow(beta2, step); + } + + size_t max_size = 0; + bool requires_64bit_indexing = false; + for (auto it = tensor_lists.begin(); it != tensor_lists.end(); it++) { + for (auto it2 = it->begin(); it2 != it->end(); it2++) { + if (it2->numel() > max_size) { + max_size = it2->numel(); + if (max_size >= INT_MAX) { + requires_64bit_indexing = true; + break; + } + } + } + if (requires_64bit_indexing) { break; } + } + + // Assume single type across p,g,m1,m2 now + if (requires_64bit_indexing) { + DISPATCH_DOUBLE_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), + 0, + "adam", + multi_tensor_apply<4>((int64_t)BLOCK_SIZE, + (int64_t)chunk_size, + noop_flag, + tensor_lists, + AdamFunctor(), + beta1, + beta2, + bias_correction1, + bias_correction2, + epsilon, + lr, + (adamMode_t)mode, + weight_decay);) + } else { + DISPATCH_DOUBLE_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), + 0, + "adam", + multi_tensor_apply<4>(BLOCK_SIZE, + chunk_size, + noop_flag, + tensor_lists, + AdamFunctor(), + beta1, + beta2, + bias_correction1, + bias_correction2, + epsilon, + lr, + (adamMode_t)mode, + weight_decay);) + } + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/multi_tensor_apply.cuh b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/multi_tensor_apply.cuh new file mode 100644 index 0000000000000000000000000000000000000000..342376c141be7578553910df43b003d29e0e4fc4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/adam/multi_tensor_apply.cuh @@ -0,0 +1,132 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Copyright NVIDIA/apex +This file is adapted from fused adam in NVIDIA/apex, commit a109f85 +*/ + +#include +#include +#include +#include +#include +#include "compat.h" + +#include + +// #include + +// This header is the one-stop shop for all your multi-tensor apply needs. + +// TODO: Kernel arg size limit may be <4KB for some other cards (ie Jetson) +constexpr int depth_to_max_tensors[5] = {110, 64, 48, 36, 30}; +constexpr int depth_to_max_blocks[5] = {320, 320, 320, 320, 320}; + +template +struct TensorListMetadata { + void* addresses[n][depth_to_max_tensors[n - 1]]; + int sizes[depth_to_max_tensors[n - 1]]; + unsigned char block_to_tensor[depth_to_max_blocks[n - 1]]; + int block_to_chunk[depth_to_max_blocks[n - 1]]; // I fear this needs to be a full int. + int start_tensor_this_launch; +}; + +template +__global__ void multi_tensor_apply_kernel(int64_t chunk_size, + volatile int* noop_flag, + T tl, + U callable, + ArgTypes... args) +{ + // Hand the chunk information to the user-supplied functor to process however it likes. + callable(chunk_size, noop_flag, tl, args...); +} + +template +void multi_tensor_apply(int64_t block_size, + int64_t chunk_size, + const at::Tensor& noop_flag, + const std::vector>& tensor_lists, + T callable, + ArgTypes... args) +{ + TORCH_CHECK(tensor_lists.size() == depth, "tensor_lists.size() != depth"); + int len0 = tensor_lists[0].size(); + TORCH_CHECK(len0 > 0, "tensor_lists[0].size() is not > 0"); + auto ref_device = tensor_lists[0][0].device(); + TORCH_CHECK(ref_device.type() == at::kCUDA, "expected input to be on cuda"); + for (int l = 0; l < tensor_lists.size(); l++) // No range-based for because I need indices + { + TORCH_CHECK(tensor_lists[l].size() == len0, "Size mismatch among tensor lists"); + for (int t = 0; t < tensor_lists[l].size(); t++) { + // TODO: Print which tensor fails. + bool contiguous_memory = tensor_lists[l][t].is_contiguous(); +#ifdef VERSION_GE_1_5 + contiguous_memory = (contiguous_memory || + tensor_lists[l][t].is_contiguous(at::MemoryFormat::ChannelsLast)); +#endif + TORCH_CHECK(contiguous_memory, "A tensor was not contiguous."); + TORCH_CHECK(tensor_lists[l][t].device() == ref_device, + "A tensor was not on the same device as the first tensor"); + TORCH_CHECK(tensor_lists[l][t].numel() == tensor_lists[0][t].numel(), "Size mismatch"); + } + } + + int ntensors = tensor_lists[0].size(); + + TensorListMetadata tl; + + const at::cuda::OptionalCUDAGuard device_guard(device_of(tensor_lists[0][0])); + auto stream = at::cuda::getCurrentCUDAStream(); + + tl.start_tensor_this_launch = 0; + int loc_block_info = 0; + int loc_tensor_info = 0; + for (int t = 0; t < ntensors; t++) { + tl.sizes[loc_tensor_info] = tensor_lists[0][t].numel(); + for (int d = 0; d < depth; d++) + tl.addresses[d][loc_tensor_info] = tensor_lists[d][t].data_ptr(); + loc_tensor_info++; + + auto chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1) / chunk_size; + + for (auto chunk = 0; chunk < chunks_this_tensor; chunk++) { + // std::cout << chunks_this_tensor << std::endl; + tl.block_to_tensor[loc_block_info] = loc_tensor_info - 1; + tl.block_to_chunk[loc_block_info] = chunk; + loc_block_info++; + + bool tensors_full = (loc_tensor_info == depth_to_max_tensors[depth - 1] && + chunk == chunks_this_tensor - 1); + bool blocks_full = (loc_block_info == depth_to_max_blocks[depth - 1]); + bool last_chunk = (t == ntensors - 1 && chunk == chunks_this_tensor - 1); + if (tensors_full || blocks_full || last_chunk) { + // using accscalar_t = acc_type; + multi_tensor_apply_kernel<<>>( + chunk_size, noop_flag.DATA_PTR(), tl, callable, args...); + + AT_CUDA_CHECK(cudaGetLastError()); + + // Reset. The control flow possibilities here make my brain hurt. + loc_block_info = 0; + if (chunk == chunks_this_tensor - 1) { + // std::cout << "Hit case 1 " << cond1 << " " << cond2 << " " << cond3 << + // std::endl; + loc_tensor_info = 0; + tl.start_tensor_this_launch = t + 1; + } else { + // std::cout << "Hit case 2 " << cond1 << " " << cond2 << " " << cond3 << + // std::endl; + tl.sizes[0] = tl.sizes[loc_tensor_info - 1]; + for (int d = 0; d < depth; d++) + tl.addresses[d][0] = tl.addresses[d][loc_tensor_info - 1]; + loc_tensor_info = 1; + tl.start_tensor_this_launch = t; + } + } + } + } +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0f2895dfa328b1a15ac8b29075722584e8c5ec5e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.cpp @@ -0,0 +1,342 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Functionality for swapping optimizer tensors to/from (NVMe) storage devices. +*/ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "deepspeed_aio_common.h" + +using namespace std; +using namespace std::chrono; + +#define DEBUG_DS_AIO_PERF 0 +#define DEBUG_DS_AIO_SUBMIT_PERF 0 + +static const std::string c_library_name = "deepspeed_aio"; + +static void _report_aio_statistics(const char* tag, + const std::vector>& latencies) + __attribute__((unused)); + +static void _report_aio_statistics(const char* tag, + const std::vector>& latencies) +{ + std::vector lat_usec; + for (auto& lat : latencies) { lat_usec.push_back(lat.count() * 1e6); } + const auto min_lat = *(std::min_element(lat_usec.begin(), lat_usec.end())); + const auto max_lat = *(std::max_element(lat_usec.begin(), lat_usec.end())); + const auto avg_lat = std::accumulate(lat_usec.begin(), lat_usec.end(), 0) / lat_usec.size(); + + std::cout << c_library_name << ": latency statistics(usec) " << tag + << " min/max/avg = " << min_lat << " " << max_lat << " " << avg_lat << std::endl; +} + +static void _get_aio_latencies(std::vector>& raw_latencies, + struct deepspeed_aio_latency_t& summary_latencies) +{ + std::vector lat_usec; + for (auto& lat : raw_latencies) { lat_usec.push_back(lat.count() * 1e6); } + summary_latencies._min_usec = *(std::min_element(lat_usec.begin(), lat_usec.end())); + summary_latencies._max_usec = *(std::max_element(lat_usec.begin(), lat_usec.end())); + summary_latencies._avg_usec = + std::accumulate(lat_usec.begin(), lat_usec.end(), 0) / lat_usec.size(); +} + +static void _do_io_submit_singles(const long long int n_iocbs, + const long long int iocb_index, + std::unique_ptr& aio_ctxt, + std::vector>& submit_times) +{ + for (auto i = 0; i < n_iocbs; ++i) { + const auto st = std::chrono::high_resolution_clock::now(); + const auto submit_ret = io_submit(aio_ctxt->_io_ctxt, 1, aio_ctxt->_iocbs.data() + i); + submit_times.push_back(std::chrono::high_resolution_clock::now() - st); +#if DEBUG_DS_AIO_SUBMIT_PERF + printf("submit(usec) %f io_index=%lld buf=%p len=%lu off=%llu \n", + submit_times.back().count() * 1e6, + iocb_index, + aio_ctxt->_iocbs[i]->u.c.buf, + aio_ctxt->_iocbs[i]->u.c.nbytes, + aio_ctxt->_iocbs[i]->u.c.offset); +#endif + assert(submit_ret > 0); + } +} + +static void _do_io_submit_block(const long long int n_iocbs, + const long long int iocb_index, + std::unique_ptr& aio_ctxt, + std::vector>& submit_times) +{ + const auto st = std::chrono::high_resolution_clock::now(); + const auto submit_ret = io_submit(aio_ctxt->_io_ctxt, n_iocbs, aio_ctxt->_iocbs.data()); + submit_times.push_back(std::chrono::high_resolution_clock::now() - st); +#if DEBUG_DS_AIO_SUBMIT_PERF + printf("submit(usec) %f io_index=%lld nr=%lld buf=%p len=%lu off=%llu \n", + submit_times.back().count() * 1e6, + iocb_index, + n_iocbs, + aio_ctxt->_iocbs[0]->u.c.buf, + aio_ctxt->_iocbs[0]->u.c.nbytes, + aio_ctxt->_iocbs[0]->u.c.offset); +#endif + assert(submit_ret > 0); +} + +static int _do_io_complete(const long long int min_completes, + const long long int max_completes, + std::unique_ptr& aio_ctxt, + std::vector>& reap_times) +{ + const auto start_time = std::chrono::high_resolution_clock::now(); + long long int n_completes = io_pgetevents(aio_ctxt->_io_ctxt, + min_completes, + max_completes, + aio_ctxt->_io_events.data(), + nullptr, + nullptr); + reap_times.push_back(std::chrono::high_resolution_clock::now() - start_time); + assert(n_completes >= min_completes); + return n_completes; +} + +void do_aio_operation_sequential(const bool read_op, + std::unique_ptr& aio_ctxt, + std::unique_ptr& xfer_ctxt, + deepspeed_aio_config_t* config, + deepspeed_aio_perf_t* perf) +{ + struct io_prep_context prep_ctxt(read_op, xfer_ctxt, aio_ctxt->_block_size, &aio_ctxt->_iocbs); + + const auto num_io_blocks = static_cast( + ceil(static_cast(xfer_ctxt->_num_bytes) / aio_ctxt->_block_size)); +#if DEBUG_DS_AIO_PERF + const auto io_op_name = std::string(read_op ? "read" : "write"); + std::cout << c_library_name << ": start " << io_op_name << " " << xfer_ctxt->_num_bytes + << " bytes with " << num_io_blocks << " io blocks" << std::endl; +#endif + + std::vector> submit_times; + std::vector> reap_times; + const auto max_queue_bytes = + static_cast(aio_ctxt->_queue_depth * aio_ctxt->_block_size); + + auto start = std::chrono::high_resolution_clock::now(); + for (long long iocb_index = 0; iocb_index < num_io_blocks; + iocb_index += aio_ctxt->_queue_depth) { + const auto start_offset = iocb_index * aio_ctxt->_block_size; + const auto start_buffer = (char*)xfer_ctxt->_mem_buffer + start_offset; + const auto n_iocbs = + min(static_cast(aio_ctxt->_queue_depth), (num_io_blocks - iocb_index)); + const auto num_bytes = min(max_queue_bytes, (xfer_ctxt->_num_bytes - start_offset)); + prep_ctxt.prep_iocbs(n_iocbs, num_bytes, start_buffer, start_offset); + + if (config->_single_submit) { + _do_io_submit_singles(n_iocbs, iocb_index, aio_ctxt, submit_times); + } else { + _do_io_submit_block(n_iocbs, iocb_index, aio_ctxt, submit_times); + } + + _do_io_complete(n_iocbs, n_iocbs, aio_ctxt, reap_times); + } + const std::chrono::duration elapsed = std::chrono::high_resolution_clock::now() - start; + + if (perf) { + _get_aio_latencies(submit_times, perf->_submit); + _get_aio_latencies(reap_times, perf->_complete); + perf->_e2e_usec = elapsed.count() * 1e6; + perf->_e2e_rate_GB = (xfer_ctxt->_num_bytes / elapsed.count() / 1e9); + } + +#if DEBUG_DS_AIO_PERF + _report_aio_statistics("submit", submit_times); + _report_aio_statistics("complete", reap_times); +#endif + +#if DEBUG_DS_AIO_PERF + std::cout << c_library_name << ": runtime(usec) " << elapsed.count() * 1e6 + << " rate(GB/sec) = " << (xfer_ctxt->_num_bytes / elapsed.count() / 1e9) << std::endl; +#endif + +#if DEBUG_DS_AIO_PERF + std::cout << c_library_name << ": finish " << io_op_name << " " << xfer_ctxt->_num_bytes + << " bytes " << std::endl; +#endif +} + +void do_aio_operation_overlap(const bool read_op, + std::unique_ptr& aio_ctxt, + std::unique_ptr& xfer_ctxt, + deepspeed_aio_config_t* config, + deepspeed_aio_perf_t* perf) +{ + struct io_prep_generator io_gen(read_op, xfer_ctxt, aio_ctxt->_block_size); + +#if DEBUG_DS_AIO_PERF + const auto io_op_name = std::string(read_op ? "read" : "write"); + std::cout << c_library_name << ": start " << io_op_name << " " << xfer_ctxt->_num_bytes + << " bytes with " << io_gen._num_io_blocks << " io blocks" << std::endl; +#endif + + std::vector> submit_times; + std::vector> reap_times; + + auto request_iocbs = aio_ctxt->_queue_depth; + auto n_pending_iocbs = 0; + const auto min_completes = 1; + auto start = std::chrono::high_resolution_clock::now(); + while (true) { + const auto n_iocbs = io_gen.prep_iocbs(request_iocbs - n_pending_iocbs, &aio_ctxt->_iocbs); + if (n_iocbs > 0) { + if (config->_single_submit) { + _do_io_submit_singles( + n_iocbs, (io_gen._next_iocb_index - n_iocbs), aio_ctxt, submit_times); + } else { + _do_io_submit_block( + n_iocbs, (io_gen._next_iocb_index - n_iocbs), aio_ctxt, submit_times); + } + } + + n_pending_iocbs += n_iocbs; + assert(n_pending_iocbs <= aio_ctxt->_queue_depth); + + if (n_pending_iocbs == 0) { break; } + + const auto n_complete = + _do_io_complete(min_completes, n_pending_iocbs, aio_ctxt, reap_times); + n_pending_iocbs -= n_complete; + } + + const std::chrono::duration elapsed = std::chrono::high_resolution_clock::now() - start; + + if (perf) { + _get_aio_latencies(submit_times, perf->_submit); + _get_aio_latencies(reap_times, perf->_complete); + perf->_e2e_usec = elapsed.count() * 1e6; + perf->_e2e_rate_GB = (xfer_ctxt->_num_bytes / elapsed.count() / 1e9); + } + +#if DEBUG_DS_AIO_PERF + _report_aio_statistics("submit", submit_times); + _report_aio_statistics("complete", reap_times); +#endif + +#if DEBUG_DS_AIO_PERF + std::cout << c_library_name << ": runtime(usec) " << elapsed.count() * 1e6 + << " rate(GB/sec) = " << (xfer_ctxt->_num_bytes / elapsed.count() / 1e9) << std::endl; +#endif + +#if DEBUG_DS_AIO_PERF + std::cout << c_library_name << ": finish " << io_op_name << " " << xfer_ctxt->_num_bytes + << " bytes " << std::endl; +#endif +} + +void report_file_error(const char* filename, const std::string file_op, const int error_code) +{ + std::string err_msg = file_op + std::string(" failed on ") + std::string(filename) + + " error = " + std::to_string(error_code); + std::cerr << c_library_name << ": " << err_msg << std::endl; +} + +int open_file(const char* filename, const bool read_op) +{ + const int flags = read_op ? (O_RDONLY | O_DIRECT) : (O_WRONLY | O_CREAT | O_DIRECT); +#if defined(__ENABLE_CANN__) + int* flags_ptr = (int*)&flags; + *flags_ptr = read_op ? (O_RDONLY) : (O_WRONLY | O_CREAT); +#endif + const int mode = 0600; + const auto fd = open(filename, flags, mode); + if (fd == -1) { + const auto error_code = errno; + const auto error_msg = read_op ? " open for read " : " open for write "; + report_file_error(filename, error_msg, error_code); + return -1; + } + return fd; +} + +int regular_read(const char* filename, std::vector& buffer) +{ + long long int num_bytes; + const auto f_size = get_file_size(filename, num_bytes); + assert(f_size != -1); + buffer.resize(num_bytes); + const auto fd = open(filename, O_RDONLY, 0600); + assert(fd != -1); + long long int read_bytes = 0; + auto r = 0; + do { + const auto buffer_ptr = buffer.data() + read_bytes; + const auto bytes_to_read = num_bytes - read_bytes; + r = read(fd, buffer_ptr, bytes_to_read); + read_bytes += r; + } while (r > 0); + + if (read_bytes != num_bytes) { + std::cerr << "read error " + << " read_bytes (read) = " << read_bytes << " num_bytes (fstat) = " << num_bytes + << std::endl; + } + assert(read_bytes == num_bytes); + close(fd); + return 0; +} + +static bool _validate_buffer(const char* filename, void* aio_buffer, const long long int num_bytes) +{ + std::vector regular_buffer; + const auto reg_ret = regular_read(filename, regular_buffer); + assert(0 == reg_ret); + std::cout << "regular read of " << filename << " returned " << regular_buffer.size() << " bytes" + << std::endl; + + if (static_cast(regular_buffer.size()) != num_bytes) { return false; } + + return (0 == memcmp(aio_buffer, regular_buffer.data(), regular_buffer.size())); +} + +bool validate_aio_operation(const bool read_op, + const char* filename, + void* aio_buffer, + const long long int num_bytes) +{ + const auto msg_suffix = std::string("deepspeed_aio_") + + std::string(read_op ? "read()" : "write()") + + std::string("using read()"); + + if (false == _validate_buffer(filename, aio_buffer, num_bytes)) { + std::cout << "Fail: correctness of " << msg_suffix << std::endl; + return false; + } + + std::cout << "Pass: correctness of " << msg_suffix << std::endl; + return true; +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.h new file mode 100644 index 0000000000000000000000000000000000000000..2940de945ee8f2347cb3e2b25f6057cc5fdc9dfd --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_common.h @@ -0,0 +1,38 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Functionality for swapping optimizer tensors to/from (NVMe) storage devices. +*/ + +#include +#include +#include +#include + +using namespace std; + +void do_aio_operation_sequential(const bool read_op, + std::unique_ptr& aio_ctxt, + std::unique_ptr& xfer_ctxt, + deepspeed_aio_config_t* config, + deepspeed_aio_perf_t* perf); + +void do_aio_operation_overlap(const bool read_op, + std::unique_ptr& aio_ctxt, + std::unique_ptr& xfer_ctxt, + deepspeed_aio_config_t* config, + deepspeed_aio_perf_t* perf); + +int open_file(const char* filename, const bool read_op); + +void report_file_error(const char* filename, const std::string file_op, const int error_code); + +int regular_read(const char* filename, std::vector& buffer); + +bool validate_aio_operation(const bool read_op, + const char* filename, + void* aio_buffer, + const long long int num_bytes); diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_types.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_types.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5e34a61065d4b0f16ba904d0e397eeccc149e621 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_types.cpp @@ -0,0 +1,76 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Functionality for swapping optimizer tensors to/from (NVMe) storage devices. +*/ + +#include + +#include "deepspeed_aio_utils.h" + +using namespace std; + +const int c_block_size = 128 * 1024; +const int c_io_queue_depth = 8; + +deepspeed_aio_config_t::deepspeed_aio_config_t() + : _block_size(c_block_size), + _queue_depth(c_io_queue_depth), + _single_submit(false), + _overlap_events(false), + _lock_memory(false) +{ +} + +deepspeed_aio_config_t::deepspeed_aio_config_t(const int block_size, + const int queue_depth, + const bool single_submit, + const bool overlap_events, + const bool lock_memory) + : _block_size(block_size), + _queue_depth(queue_depth), + _single_submit(single_submit), + _overlap_events(overlap_events), + _lock_memory(lock_memory) +{ +} + +void deepspeed_aio_latency_t::dump(const std::string tag) +{ + std::cout << tag << _min_usec << " " << _max_usec << " " << _avg_usec << " " << std::endl; +} + +void deepspeed_aio_latency_t::accumulate(const struct deepspeed_aio_latency_t& other) +{ + _min_usec += other._min_usec; + _max_usec += other._max_usec; + _avg_usec += other._avg_usec; +} + +void deepspeed_aio_latency_t::scale(const float scaler) +{ + _min_usec *= scaler; + _max_usec *= scaler; + _avg_usec *= scaler; +} + +aio_context::aio_context(const int block_size, const int queue_depth) +{ + _block_size = block_size; + _queue_depth = queue_depth; + for (auto i = 0; i < queue_depth; ++i) { + _iocbs.push_back((struct iocb*)calloc(1, sizeof(struct iocb))); + } + _io_events.resize(queue_depth); + io_queue_init(queue_depth, &_io_ctxt); +} + +aio_context::~aio_context() +{ + for (auto& iocb : _iocbs) { free(iocb); } + _io_events.resize(0); + io_queue_release(_io_ctxt); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_types.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_types.h new file mode 100644 index 0000000000000000000000000000000000000000..ce6a4e5cdfa75120c24dad599a8bc717765c220d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_types.h @@ -0,0 +1,59 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Functionality for swapping optimizer tensors to/from (NVMe) storage devices. +*/ + +#include +#include + +#include +#include + +using namespace std; + +struct deepspeed_aio_latency_t { + double _min_usec; + double _max_usec; + double _avg_usec; + + void dump(const std::string tag); + void accumulate(const deepspeed_aio_latency_t&); + void scale(const float value); +}; + +struct deepspeed_aio_perf_t { + deepspeed_aio_latency_t _submit; + deepspeed_aio_latency_t _complete; + double _e2e_usec; + double _e2e_rate_GB; +}; + +struct deepspeed_aio_config_t { + const int _block_size; + const int _queue_depth; + const bool _single_submit; + const bool _overlap_events; + const bool _lock_memory; + + deepspeed_aio_config_t(); + deepspeed_aio_config_t(const int block_size, + const int queue_depth, + const bool single_submit, + const bool overlap_events, + const bool lock_memory); +}; + +struct aio_context { + io_context_t _io_ctxt; + std::vector _io_events; + std::vector _iocbs; + int _block_size; + int _queue_depth; + + aio_context(const int block_size, const int queue_depth); + ~aio_context(); +}; diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.cpp new file mode 100644 index 0000000000000000000000000000000000000000..763b2c253a34c43501ec522a0570e068cdf11acc --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.cpp @@ -0,0 +1,126 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Functionality for swapping optimizer tensors to/from (NVMe) storage devices. +*/ + +#include +#include + +#include "deepspeed_aio_utils.h" + +using namespace std; + +const int c_block_size = 128 * 1024; +const int c_io_queue_depth = 8; + +io_xfer_ctxt::io_xfer_ctxt(const int fd, + const long long int file_offset, + const long long int num_bytes, + const void* buffer) + : _fd(fd), _base_offset(file_offset), _mem_buffer(buffer), _num_bytes(num_bytes) +{ +} + +io_prep_context::io_prep_context(const bool read_op, + const std::unique_ptr& xfer_ctxt, + const size_t block_size, + const std::vector* iocbs) + : _read_op(read_op), _xfer_ctxt(xfer_ctxt), _block_size(block_size), _iocbs(iocbs) +{ +} + +void io_prep_context::prep_iocbs(const int n_iocbs, + const size_t num_bytes, + const void* start_buffer, + const long long int start_offset) +{ + assert(static_cast(n_iocbs) <= _iocbs->size()); + for (auto i = 0; i < n_iocbs; ++i) { + const auto shift = i * _block_size; + const auto xfer_buffer = (char*)start_buffer + _xfer_ctxt->_base_offset + shift; + const auto xfer_offset = _xfer_ctxt->_base_offset + start_offset + shift; + auto byte_count = _block_size; + if ((shift + _block_size) > num_bytes) { byte_count = num_bytes - shift; } + + if (_read_op) { + io_prep_pread(_iocbs->at(i), _xfer_ctxt->_fd, xfer_buffer, byte_count, xfer_offset); + } else { + io_prep_pwrite(_iocbs->at(i), _xfer_ctxt->_fd, xfer_buffer, byte_count, xfer_offset); + } + } +} + +io_prep_generator::io_prep_generator(const bool read_op, + const std::unique_ptr& xfer_ctxt, + const size_t block_size) + : _read_op(read_op), + _xfer_ctxt(xfer_ctxt), + _block_size(block_size), + _remaining_bytes(xfer_ctxt->_num_bytes), + _next_iocb_index(0) +{ + _num_io_blocks = + static_cast(ceil(static_cast(xfer_ctxt->_num_bytes) / block_size)); + _remaining_io_blocks = _num_io_blocks; +} + +int io_prep_generator::prep_iocbs(const int n_iocbs, std::vector* iocbs) +{ + if ((_remaining_bytes) == 0 || (_remaining_io_blocks == 0)) { + assert(static_cast(_remaining_bytes) == _remaining_io_blocks); + return 0; + } + + assert(static_cast(n_iocbs) <= iocbs->size()); + + auto actual_n_iocbs = min(static_cast(n_iocbs), _remaining_io_blocks); + for (auto i = 0; i < actual_n_iocbs; ++i, ++_next_iocb_index) { + const auto xfer_offset = _xfer_ctxt->_base_offset + (_next_iocb_index * _block_size); + const auto xfer_buffer = (char*)_xfer_ctxt->_mem_buffer + xfer_offset; + const auto num_bytes = min(static_cast(_block_size), _remaining_bytes); + + if (_read_op) { + io_prep_pread(iocbs->at(i), _xfer_ctxt->_fd, xfer_buffer, num_bytes, xfer_offset); + } else { + io_prep_pwrite(iocbs->at(i), _xfer_ctxt->_fd, xfer_buffer, num_bytes, xfer_offset); + } + _remaining_bytes -= num_bytes; + } + _remaining_io_blocks -= actual_n_iocbs; + + return actual_n_iocbs; +} + +int get_file_size(const char* filename, long long int& size) +{ + struct stat st; + if (stat(filename, &st) == -1) { return -1; } + size = st.st_size; + return 0; +} + +void* ds_page_aligned_alloc(const size_t size, const bool lock) +{ + void* ptr; + int retval; + + retval = posix_memalign(&ptr, (size_t)sysconf(_SC_PAGESIZE), size); + if (retval) { return nullptr; } + + if (lock == false) { return ptr; } + + auto mlock_ret = mlock(ptr, size); + if (mlock_ret != 0) { + auto mlock_error = errno; + std::cerr << "mlock failed to allocate " << size << " bytes with error no " << mlock_error + << " msg " << strerror(mlock_error) << std::endl; + free(ptr); + return nullptr; + } + + return ptr; +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..9c58c2286610b2b18b2168b42159880232c8dcf6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/common/deepspeed_aio_utils.h @@ -0,0 +1,79 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Functionality for swapping optimizer tensors to/from (NVMe) storage devices. +*/ + +#pragma once + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +struct io_xfer_ctxt { + const int _fd; + const long long int _base_offset; + const void* _mem_buffer; + const long long int _num_bytes; + + io_xfer_ctxt(const int fd, + const long long int file_offset, + const long long int num_bytes, + const void* buffer); +}; + +struct io_prep_context { + const bool _read_op; + const std::unique_ptr& _xfer_ctxt; + const size_t _block_size; + const std::vector* _iocbs; + + io_prep_context(const bool read_op, + const std::unique_ptr& xfer_ctxt, + const size_t block_size, + const std::vector* iocbs); + + void prep_iocbs(const int n_iocbs, + const size_t num_bytes, + const void* start_buffer, + const long long int start_offset); +}; + +struct io_prep_generator { + const bool _read_op; + const std::unique_ptr& _xfer_ctxt; + const size_t _block_size; + + long long int _remaining_bytes; + long long int _num_io_blocks; + long long int _remaining_io_blocks; + long long int _next_iocb_index; + + io_prep_generator(const bool read_op, + const std::unique_ptr& xfer_ctxt, + const size_t block_size); + + int prep_iocbs(const int n_iocbs, std::vector* iocbs); +}; + +void* ds_page_aligned_alloc(const size_t size, const bool lock = false); + +int get_file_size(const char* filename, long long int& size); diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c852711a28c035a6c8a40f325e819f0beee68098 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.cpp @@ -0,0 +1,104 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Functionality for swapping optimizer tensors to/from (NVMe) storage devices. +*/ + +#include "deepspeed_aio_thread.h" + +#if defined(__ENABLE_CANN__) +#include "torch_npu/csrc/framework/utils/OpAdapter.h" +#include "torch_npu/csrc/framework/utils/UtilForOpAdapter.h" +#endif + +using namespace std; + +io_op_desc_t::io_op_desc_t(const bool read_op, + const torch::Tensor& buffer, + const int fd, + const char* filename, + const long long int num_bytes, + const bool validate) + : _read_op(read_op), + _buffer(buffer), + _fd(fd), + _filename(filename), + _num_bytes(num_bytes), + _validate(validate) +{ + _cpu_buffer = (_buffer.is_cuda() || _buffer.is_xpu() +#if defined(__ENABLE_CANN__) + || torch_npu::utils::is_npu(_buffer) +#endif + ) + ? _buffer.to(torch::kCPU).pin_memory() + : _buffer; + _contiguous_buffer = _cpu_buffer.contiguous(); +} + +char* io_op_desc_t::data_ptr() const { return (char*)_contiguous_buffer.data_ptr(); } + +void io_op_desc_t::fini() +{ + if (_read_op && _buffer.is_cuda()) { _buffer.copy_(_cpu_buffer.to(torch::kCUDA)); } + if (_read_op && _buffer.is_xpu()) { _buffer.copy_(_cpu_buffer.to(torch::kXPU)); } +#if defined(__ENABLE_CANN__) + if (_read_op && torch_npu::utils::is_npu(_buffer)) { + auto device = at::Device("npu:0"); + _buffer.copy_(_cpu_buffer.to(device)); + } +#endif +} + +deepspeed_aio_thread_t::deepspeed_aio_thread_t(const int tid, deepspeed_aio_config_t& aio_config) + : _tid(tid), + _aio_config(aio_config), + _aio_ctxt(new aio_context(aio_config._block_size, aio_config._queue_depth)), + _time_to_exit(false) +{ +} + +deepspeed_aio_thread_t::~deepspeed_aio_thread_t() {} + +void deepspeed_aio_thread_t::run() +{ + while (true) { + std::shared_ptr next_io_op = nullptr; + + { + std::unique_lock lock(_work_sync._mutex); + _work_sync._cond_var.wait(lock, + [this] { return (!_work_queue.empty() || _time_to_exit); }); + if (!_work_queue.empty()) { + next_io_op = _work_queue.front(); + _work_queue.pop(); + } + } + + if (next_io_op) { + const auto base_offset = next_io_op->_num_bytes * _tid; + + std::unique_ptr xfer_ctxt(new io_xfer_ctxt( + next_io_op->_fd, base_offset, next_io_op->_num_bytes, next_io_op->data_ptr())); + + if (_aio_config._overlap_events) { + do_aio_operation_overlap( + next_io_op->_read_op, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr); + } else { + do_aio_operation_sequential( + next_io_op->_read_op, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr); + } + + { + std::lock_guard lock(_complete_sync._mutex); + _complete_queue.push(next_io_op); + } + _complete_sync._cond_var.notify_one(); + } + + if (_time_to_exit) { break; } + } +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.h new file mode 100644 index 0000000000000000000000000000000000000000..20799ecbb018eccfd7051b2ebf015695af35934a --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_aio_thread.h @@ -0,0 +1,59 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Functionality for swapping optimizer tensors to/from (NVMe) storage devices. +*/ + +#include +#include +#include +#include "deepspeed_py_aio.h" + +struct io_op_desc_t { + const bool _read_op; + torch::Tensor _buffer; + int _fd; + const std::string _filename; + const long long int _num_bytes; + torch::Tensor _cpu_buffer; + torch::Tensor _contiguous_buffer; + const bool _validate; + + io_op_desc_t(const bool read_op, + const torch::Tensor& buffer, + const int fd, + const char* filename, + const long long int num_bytes, + const bool validate); + + char* data_ptr() const; + void fini(); +}; + +struct thread_sync_t { + std::mutex _mutex; + std::condition_variable _cond_var; +}; + +struct deepspeed_aio_thread_t { + const int _tid; + deepspeed_aio_config_t& _aio_config; + + std::unique_ptr _aio_ctxt; + std::queue> _work_queue; + std::queue> _complete_queue; + + bool _time_to_exit; + + struct thread_sync_t _work_sync; + struct thread_sync_t _complete_sync; + + deepspeed_aio_thread_t(const int tid, deepspeed_aio_config_t& aio_config); + + ~deepspeed_aio_thread_t(); + + void run(); +}; diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_pin_tensor.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_pin_tensor.cpp new file mode 100644 index 0000000000000000000000000000000000000000..752823dc7dd2cac7a006088f09d11ef208d18d63 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_pin_tensor.cpp @@ -0,0 +1,45 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Functionality for managing CPU tensors occupying page-locked memory. +*/ + +#include "deepspeed_pin_tensor.h" + +using namespace std; + +deepspeed_pin_tensor_t::~deepspeed_pin_tensor_t() +{ + for (auto iter = _locked_tensors.begin(); iter != _locked_tensors.end(); ++iter) { + munlock(iter->first, iter->second); + } + _locked_tensors.clear(); +} + +torch::Tensor deepspeed_pin_tensor_t::alloc(const size_t num_elem, const at::ScalarType& elem_type) +{ + const auto num_bytes = num_elem * elementSize(elem_type); + auto pinned_buffer = ds_page_aligned_alloc(num_bytes, true); + assert(nullptr != pinned_buffer); + + _locked_tensors[pinned_buffer] = num_bytes; + + auto options = torch::TensorOptions().dtype(elem_type).device(torch::kCPU); + + return at::from_blob(pinned_buffer, static_cast(num_bytes), options); +} + +bool deepspeed_pin_tensor_t::free(torch::Tensor& locked_tensor) +{ + auto addr = locked_tensor.data_ptr(); + if (_locked_tensors.find(addr) != _locked_tensors.end()) { + munlock(addr, _locked_tensors[addr]); + _locked_tensors.erase(addr); + return true; + } + + return false; +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_pin_tensor.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_pin_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..4350a4ac7df6754b497b2ab863ef02ee6df32426 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_pin_tensor.h @@ -0,0 +1,27 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Functionality for managing CPU tensors occupying page-locked memory. +TODO: Implement a full-featured manager that +1. Avoid page-locked memory leaks +2. Minimize page-locked memory usage by reducing internal fragmentation +Functionality for managing CPU tensors occupying page-locked memory. +*/ + +#include +#include "deepspeed_py_aio.h" + +struct deepspeed_pin_tensor_t { + std::map _locked_tensors; + + deepspeed_pin_tensor_t() = default; + + ~deepspeed_pin_tensor_t(); + + torch::Tensor alloc(const size_t num_elem, const at::ScalarType& elem_type); + + bool free(torch::Tensor& locked_tensor); +}; diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.cpp new file mode 100644 index 0000000000000000000000000000000000000000..387b713f2bfc2ba90fc4b3b677b2834623d166c2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.cpp @@ -0,0 +1,125 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Copyright 2020 The Microsoft DeepSpeed Team +Licensed under the MIT license. + +Functionality for swapping optimizer tensors to/from (NVMe) storage devices. +*/ + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "deepspeed_py_aio.h" + +using namespace std; +using namespace std::chrono; + +#define DEBUG_DS_AIO_READ 0 +#define DEBUG_DS_AIO_WRITE 0 + +static const std::string c_library_name = "deepspeed_aio"; + +int deepspeed_py_aio_write(const torch::Tensor& buffer, + const char* filename, + const int block_size, + const int queue_depth, + const bool single_submit, + const bool overlap_events, + const bool validate) +{ + const auto start_time = std::chrono::high_resolution_clock::now(); + deepspeed_aio_config_t config(block_size, queue_depth, single_submit, overlap_events, false); + + const auto fd = open_file(filename, false); + if (fd == -1) { return -1; } + + auto write_buffer = (char*)buffer.data_ptr(); + const auto num_write_bytes = static_cast(buffer.nbytes()); + std::unique_ptr xfer_ctxt(new io_xfer_ctxt(fd, 0, num_write_bytes, write_buffer)); + std::unique_ptr aio_ctxt(new aio_context(config._block_size, config._queue_depth)); + + if (config._overlap_events) { + do_aio_operation_overlap(false, aio_ctxt, xfer_ctxt, &config, nullptr); + } else { + do_aio_operation_sequential(false, aio_ctxt, xfer_ctxt, &config, nullptr); + } + const std::chrono::duration aio_time = + std::chrono::high_resolution_clock::now() - start_time; + + close(fd); + + if (validate) { validate_aio_operation(false, filename, write_buffer, num_write_bytes); } + + const std::chrono::duration fn_time = + std::chrono::high_resolution_clock::now() - start_time; + std::cout << "Elapsed time(usec): " + << "aio = " << aio_time.count() * 1e6 << " call = " << fn_time.count() * 1e6 + << std::endl; + return 0; +} + +int deepspeed_py_aio_read(torch::Tensor& buffer, + const char* filename, + const int block_size, + const int queue_depth, + const bool single_submit, + const bool overlap_events, + const bool validate) +{ + const auto start_time = std::chrono::high_resolution_clock::now(); + long long num_file_bytes; + if (-1 == get_file_size(filename, num_file_bytes)) { + const auto error_code = errno; + report_file_error(filename, " fstat for read", error_code); + return -1; + } + + deepspeed_aio_config_t config(block_size, queue_depth, single_submit, overlap_events, false); + const auto fd = open_file(filename, true); + if (fd == -1) { return -1; } + + auto read_buffer = (char*)buffer.data_ptr(); + assert(static_cast(buffer.nbytes()) == num_file_bytes); + + std::unique_ptr xfer_ctxt(new io_xfer_ctxt(fd, 0, num_file_bytes, read_buffer)); + std::unique_ptr aio_ctxt(new aio_context(config._block_size, config._queue_depth)); + + if (config._overlap_events) { + do_aio_operation_overlap(true, aio_ctxt, xfer_ctxt, &config, nullptr); + } else { + do_aio_operation_sequential(true, aio_ctxt, xfer_ctxt, &config, nullptr); + } + const std::chrono::duration aio_time = + std::chrono::high_resolution_clock::now() - start_time; + + close(fd); + + if (validate) { validate_aio_operation(true, filename, read_buffer, num_file_bytes); } + + const std::chrono::duration fn_time = + std::chrono::high_resolution_clock::now() - start_time; + std::cout << "Elapsed time(usec): " + << "aio = " << aio_time.count() * 1e6 << " call = " << fn_time.count() * 1e6 + << std::endl; + return 0; +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.h new file mode 100644 index 0000000000000000000000000000000000000000..11d5225de9f10855878060cd8228a3113534a5ea --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio.h @@ -0,0 +1,31 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Copyright 2020 The Microsoft DeepSpeed Team +Licensed under the MIT license. + +Functionality for swapping optimizer tensors to/from (NVMe) storage devices. +*/ + +#include +#include +#include + +int deepspeed_py_aio_write(const torch::Tensor& buffer, + const char* filename, + const int block_size, + const int queue_depth, + const bool single_submit, + const bool overlap_events, + const bool validate); + +int deepspeed_py_aio_read(torch::Tensor& buffer, + const char* filename, + const int block_size, + const int queue_depth, + const bool single_submit, + const bool overlap_events, + const bool validate); diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c21e92de94496cab5ca41bd9e32bb449c339f3ef --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.cpp @@ -0,0 +1,298 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Copyright 2020 The Microsoft DeepSpeed Team +Licensed under the MIT license. + +Functionality for swapping optimizer tensors to/from (NVMe) storage devices. +*/ + +#include "deepspeed_py_aio_handle.h" + +using namespace std; + +static void _start_aio_thread(std::shared_ptr ctxt) { ctxt->run(); } + +deepspeed_aio_handle_t::deepspeed_aio_handle_t(const int block_size, + const int queue_depth, + const bool single_submit, + const bool overlap_events, + const int num_threads) + : _aio_ctxt(new aio_context(block_size, queue_depth)), + _single_submit(single_submit), + _overlap_events(overlap_events), + _num_threads(num_threads), + _aio_config(block_size, queue_depth, single_submit, overlap_events, false), + _num_pending_ops(0), + _pinned_tensor_mgr(new deepspeed_pin_tensor_t()) +{ + for (auto i = 0; i < num_threads; ++i) { + _thread_contexts.push_back(std::make_shared(i, _aio_config)); + } + + for (auto& ctxt : _thread_contexts) { + _threads.push_back(std::thread(_start_aio_thread, ctxt)); + } +} + +deepspeed_aio_handle_t::~deepspeed_aio_handle_t() +{ + _stop_threads(); + for (auto& thr : _threads) { thr.join(); } +} + +const int deepspeed_aio_handle_t::get_block_size() const +{ + return _aio_ctxt ? _aio_ctxt->_block_size : -1; +} + +const int deepspeed_aio_handle_t::get_queue_depth() const +{ + return _aio_ctxt ? _aio_ctxt->_queue_depth : -1; +} + +const bool deepspeed_aio_handle_t::get_single_submit() const { return _single_submit; } + +const bool deepspeed_aio_handle_t::get_overlap_events() const { return _overlap_events; } + +const int deepspeed_aio_handle_t::get_thread_count() const { return _num_threads; } + +int deepspeed_aio_handle_t::read(torch::Tensor& buffer, const char* filename, const bool validate) +{ + const auto start_time = std::chrono::high_resolution_clock::now(); + + assert(_aio_ctxt); + + long long num_file_bytes; + if (-1 == get_file_size(filename, num_file_bytes)) { + const auto error_code = errno; + report_file_error(filename, " fstat for read", error_code); + return -1; + } + assert(static_cast(buffer.nbytes()) == num_file_bytes); + + const auto fd = open_file(filename, true); + if (fd == -1) { return -1; } + + auto read_buffer = (char*)buffer.data_ptr(); + std::unique_ptr xfer_ctxt(new io_xfer_ctxt(fd, 0, num_file_bytes, read_buffer)); + + if (_aio_config._overlap_events) { + do_aio_operation_overlap(true, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr); + } else { + do_aio_operation_sequential(true, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr); + } + + close(fd); + const std::chrono::duration aio_time = + std::chrono::high_resolution_clock::now() - start_time; + + if (validate) { validate_aio_operation(true, filename, read_buffer, num_file_bytes); } + const std::chrono::duration fn_time = + std::chrono::high_resolution_clock::now() - start_time; + std::cout << "Elapsed time(usec): " + << "aio = " << aio_time.count() * 1e6 << " call = " << fn_time.count() * 1e6 + << std::endl; + return 0; +} + +int deepspeed_aio_handle_t::write(const torch::Tensor& buffer, + const char* filename, + const bool validate) +{ + assert(_aio_ctxt); + + const auto start_time = std::chrono::high_resolution_clock::now(); + + const auto fd = open_file(filename, false); + if (fd == -1) { return -1; } + + auto write_buffer = (char*)buffer.data_ptr(); + const auto num_write_bytes = static_cast(buffer.nbytes()); + std::unique_ptr xfer_ctxt(new io_xfer_ctxt(fd, 0, num_write_bytes, write_buffer)); + + if (_aio_config._overlap_events) { + do_aio_operation_overlap(false, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr); + } else { + do_aio_operation_sequential(false, _aio_ctxt, xfer_ctxt, &_aio_config, nullptr); + } + const std::chrono::duration aio_time = + std::chrono::high_resolution_clock::now() - start_time; + + close(fd); + + if (validate) { validate_aio_operation(false, filename, write_buffer, num_write_bytes); } + + const std::chrono::duration fn_time = + std::chrono::high_resolution_clock::now() - start_time; + std::cout << "Elapsed time(usec): " + << "aio = " << aio_time.count() * 1e6 << " call = " << fn_time.count() * 1e6 + << std::endl; + return 0; +} + +void deepspeed_aio_handle_t::_schedule_aio_work(std::shared_ptr scheduled_op) +{ + for (auto& ctxt : _thread_contexts) { + { + std::lock_guard lock(ctxt->_work_sync._mutex); + ctxt->_work_queue.push(scheduled_op); + } + ctxt->_work_sync._cond_var.notify_one(); + } + _num_pending_ops++; +} + +std::shared_ptr deepspeed_aio_handle_t::_wait_for_aio_work() +{ + std::shared_ptr completed_op = nullptr; + for (auto& ctxt : _thread_contexts) { + std::unique_lock lock(ctxt->_complete_sync._mutex); + ctxt->_complete_sync._cond_var.wait(lock, + [ctxt] { return !ctxt->_complete_queue.empty(); }); + completed_op = ctxt->_complete_queue.front(); + ctxt->_complete_queue.pop(); + } + return completed_op; +} + +void deepspeed_aio_handle_t::_stop_threads() +{ + assert(0 == _num_pending_ops); + for (auto& ctxt : _thread_contexts) { + { + std::lock_guard lock(ctxt->_work_sync._mutex); + ctxt->_time_to_exit = true; + } + ctxt->_work_sync._cond_var.notify_one(); + } +} + +int deepspeed_aio_handle_t::wait() +{ + assert(_num_pending_ops > 0); + auto num_completed_ops = 0; + + while (_num_pending_ops > 0) { + auto completed_op = _wait_for_aio_work(); + + completed_op->fini(); + + close(completed_op->_fd); + + if (completed_op->_validate) { + validate_aio_operation(completed_op->_read_op, + completed_op->_filename.c_str(), + completed_op->data_ptr(), + _num_threads * completed_op->_num_bytes); + } + --_num_pending_ops; + ++num_completed_ops; + } + + return num_completed_ops; +} + +bool deepspeed_aio_handle_t::_is_valid_parallel_aio_op(const bool read_op, + const long long int num_bytes) +{ + const auto op_string = read_op ? "Read" : "Write"; + if (num_bytes % get_thread_count()) { + std::cout << "deepspeed_aio failure: parallel " << op_string << " num_bytes = " << num_bytes + << " not divisible by thread count = " << get_thread_count() << std::endl; + return false; + } + + return true; +} + +int deepspeed_aio_handle_t::pread(const torch::Tensor& buffer, + const char* filename, + const bool validate, + const bool async) +{ + long long num_file_bytes; + if (-1 == get_file_size(filename, num_file_bytes)) { + const auto error_code = errno; + report_file_error(filename, " fstat for read", error_code); + return -1; + } + const auto buffer_bytes = static_cast(buffer.nbytes()); + if (buffer_bytes != num_file_bytes) { + std::cout << filename << ": buffer nbytes != file bytes " << buffer_bytes + << " != " << num_file_bytes << std::endl; + } + assert(static_cast(buffer.nbytes()) == num_file_bytes); + assert((num_file_bytes % _num_threads) == 0); + + if (!_is_valid_parallel_aio_op(true, num_file_bytes)) { return -1; } + + const auto fd = open_file(filename, true); + if (fd == -1) { return -1; } + + auto scheduled_op = std::make_shared( + true, buffer, fd, filename, (num_file_bytes / _num_threads), validate); + + _schedule_aio_work(scheduled_op); + + if (async) { return 0; } + + return wait(); +} + +int deepspeed_aio_handle_t::pwrite(const torch::Tensor& buffer, + const char* filename, + const bool validate, + const bool async) +{ + const auto num_write_bytes = static_cast(buffer.nbytes()); + assert((num_write_bytes % _num_threads) == 0); + + if (!_is_valid_parallel_aio_op(false, num_write_bytes)) { return -1; } + + const auto fd = open_file(filename, false); + if (fd == -1) { return -1; } + + auto scheduled_op = std::make_shared( + false, buffer, fd, filename, (num_write_bytes / _num_threads), validate); + + _schedule_aio_work(scheduled_op); + + if (async) { return 0; } + + return wait(); +} + +int deepspeed_aio_handle_t::sync_pread(torch::Tensor& buffer, const char* filename) +{ + return pread(buffer, filename, false, false); +} + +int deepspeed_aio_handle_t::sync_pwrite(const torch::Tensor& buffer, const char* filename) +{ + return pwrite(buffer, filename, false, false); +} + +int deepspeed_aio_handle_t::async_pread(torch::Tensor& buffer, const char* filename) +{ + return pread(buffer, filename, false, true); +} + +int deepspeed_aio_handle_t::async_pwrite(const torch::Tensor& buffer, const char* filename) +{ + return pwrite(buffer, filename, false, true); +} + +at::Tensor deepspeed_aio_handle_t::new_cpu_locked_tensor(const size_t num_elem, + const torch::Tensor& example_tensor) +{ + return _pinned_tensor_mgr->alloc(num_elem, example_tensor.scalar_type()); +} + +bool deepspeed_aio_handle_t::free_cpu_locked_tensor(torch::Tensor& locked_tensor) +{ + return _pinned_tensor_mgr->free(locked_tensor); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.h new file mode 100644 index 0000000000000000000000000000000000000000..3a254c3814a2755108a4cd94fae85cf6f5495de3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_aio_handle.h @@ -0,0 +1,77 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Functionality for swapping optimizer tensors to/from (NVMe) storage devices. +*/ + +#include +#include +#include "deepspeed_aio_thread.h" +#include "deepspeed_pin_tensor.h" + +struct deepspeed_aio_handle_t { + std::unique_ptr _aio_ctxt; + const bool _single_submit; + const bool _overlap_events; + const int _num_threads; + deepspeed_aio_config_t _aio_config; + + std::vector> _thread_contexts; + std::vector _threads; + int _num_pending_ops; + std::unique_ptr _pinned_tensor_mgr; + + deepspeed_aio_handle_t(const int block_size, + const int queue_depth, + const bool single_submit, + const bool overlap_events, + const int num_threads); + + ~deepspeed_aio_handle_t(); + + const int get_block_size() const; + const int get_queue_depth() const; + const bool get_single_submit() const; + const bool get_overlap_events() const; + const int get_thread_count() const; + + int read(torch::Tensor& buffer, const char* filename, const bool validate); + + int write(const torch::Tensor& buffer, const char* filename, const bool validate); + + int pread(const torch::Tensor& buffer, + const char* filename, + const bool validate, + const bool async); + + int pwrite(const torch::Tensor& buffer, + const char* filename, + const bool validate, + const bool async); + + int sync_pread(torch::Tensor& buffer, const char* filename); + + int sync_pwrite(const torch::Tensor& buffer, const char* filename); + + int async_pread(torch::Tensor& buffer, const char* filename); + + int async_pwrite(const torch::Tensor& buffer, const char* filename); + + // TODO: Make API's args to be shape and dtype. + torch::Tensor new_cpu_locked_tensor(const size_t num_elem, const torch::Tensor& example_tensor); + + bool free_cpu_locked_tensor(torch::Tensor&); + + int wait(); + + void _stop_threads(); + + void _schedule_aio_work(std::shared_ptr scheduled_op); + + std::shared_ptr _wait_for_aio_work(); + + bool _is_valid_parallel_aio_op(const bool read_op, const long long int num_bytes); +}; diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8a59107dd3474645dab614ef33b7b249d7ee97e4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.cpp @@ -0,0 +1,135 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Functionality for swapping optimizer tensors to/from (NVMe) storage devices. +*/ + +#include "deepspeed_py_copy.h" +#include + +#define ROUND_DOWN(size, step) ((size) & ~((step)-1)) + +#if defined(__AVX512__) or defined(__AVX256__) +union AVX_Data { +#if defined(__AVX512__) + __m512 data; +#else + __m256 data; +#endif +}; +#endif + +static void helper_memcpy_1(float* dest, float* src, size_t param_size) +{ + size_t rounded_size = 0; + +#if defined(__AVX512__) or defined(__AVX256__) + + rounded_size = ROUND_DOWN(param_size, SIMD_WIDTH); + + for (size_t t = 0; t < rounded_size; t += TILE) { + size_t copy_size = TILE; + if ((t + TILE) > rounded_size) copy_size = rounded_size - t; + size_t offset = copy_size + t; +#pragma omp parallel for + for (size_t i = t; i < offset; i += SIMD_WIDTH) { + AVX_Data src_4; + src_4.data = SIMD_LOAD(src + i); + + SIMD_STORE(dest + i, src_4.data); + } + } + +#endif + + if (param_size > rounded_size) { +#pragma omp parallel for + for (size_t k = rounded_size; k < param_size; k++) { dest[k] = src[k]; } + } +} + +static void helper_memcpy_4(float* dest, float* src, size_t param_size) +{ + size_t rounded_size = 0; + +#if defined(__AVX512__) or defined(__AVX256__) + + rounded_size = ROUND_DOWN(param_size, (SIMD_WIDTH << 2)); + + for (size_t t = 0; t < rounded_size; t += TILE) { + size_t copy_size = TILE; + if ((t + TILE) > rounded_size) copy_size = rounded_size - t; + size_t offset = copy_size + t; +#pragma omp parallel for + for (size_t i = t; i < offset; i += (SIMD_WIDTH << 2)) { + AVX_Data src_4[4]; + src_4[0].data = SIMD_LOAD(src + i); + src_4[1].data = SIMD_LOAD(src + i + SIMD_WIDTH); + src_4[2].data = SIMD_LOAD(src + i + (SIMD_WIDTH << 1)); + src_4[3].data = SIMD_LOAD(src + i + SIMD_WIDTH * 3); + + SIMD_STORE(dest + i, src_4[0].data); + SIMD_STORE(dest + i + SIMD_WIDTH, src_4[1].data); + SIMD_STORE(dest + i + (SIMD_WIDTH << 1), src_4[2].data); + SIMD_STORE(dest + i + SIMD_WIDTH * 3, src_4[3].data); + } + } +#endif + if (param_size > rounded_size) + helper_memcpy_1((dest + rounded_size), (src + rounded_size), (param_size - rounded_size)); +} + +static void helper_mempcy_8(float* dest, float* src, size_t param_size) +{ + size_t rounded_size = 0; + +#if defined(__AVX512__) or defined(__AVX256__) + + rounded_size = ROUND_DOWN(param_size, (SIMD_WIDTH << 2)); + + for (size_t t = 0; t < rounded_size; t += TILE) { + size_t copy_size = TILE; + if ((t + TILE) > rounded_size) copy_size = rounded_size - t; + size_t offset = copy_size + t; +#pragma omp parallel for + for (size_t i = t; i < offset; i += (SIMD_WIDTH << 3)) { + AVX_Data src_4[8]; + src_4[0].data = SIMD_LOAD(src + i); + src_4[1].data = SIMD_LOAD(src + i + SIMD_WIDTH); + src_4[2].data = SIMD_LOAD(src + i + (SIMD_WIDTH << 1)); + src_4[3].data = SIMD_LOAD(src + i + SIMD_WIDTH * 3); + src_4[4].data = SIMD_LOAD(src + i + (SIMD_WIDTH << 2)); + src_4[5].data = SIMD_LOAD(src + i + SIMD_WIDTH * 5); + src_4[6].data = SIMD_LOAD(src + i + SIMD_WIDTH * 6); + src_4[7].data = SIMD_LOAD(src + i + SIMD_WIDTH * 7); + + SIMD_STORE(dest + i, src_4[0].data); + SIMD_STORE(dest + i + SIMD_WIDTH, src_4[1].data); + SIMD_STORE(dest + i + (SIMD_WIDTH << 1), src_4[2].data); + SIMD_STORE(dest + i + SIMD_WIDTH * 3, src_4[3].data); + SIMD_STORE(dest + i + (SIMD_WIDTH << 2), src_4[4].data); + SIMD_STORE(dest + i + SIMD_WIDTH * 5, src_4[5].data); + SIMD_STORE(dest + i + SIMD_WIDTH * 6, src_4[6].data); + SIMD_STORE(dest + i + SIMD_WIDTH * 7, src_4[7].data); + } + } +#endif + if (param_size > rounded_size) + helper_memcpy_4((dest + rounded_size), (src + rounded_size), (param_size - rounded_size)); +} + +int deepspeed_py_memcpy(torch::Tensor& dest, const torch::Tensor& src) +{ + auto dest_c = dest.contiguous(); + auto src_c = src.contiguous(); + + float* dest_ptr = (float*)dest_c.data_ptr(); + float* src_ptr = (float*)src_c.data_ptr(); + + helper_mempcy_8(dest_ptr, src_ptr, dest_c.size(0)); + + return 0; +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..19ba28317d008c40f54f0154ac2c75d8a885e227 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/deepspeed_py_copy.h @@ -0,0 +1,46 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Copyright 2020 The Microsoft DeepSpeed Team +Licensed under the MIT license. + +Functionality for swapping optimizer tensors to/from (NVMe) storage devices. +*/ + +#if (__x86_64__ || __i386__) +#include +#include +#endif + +#include +#include +#include + +#define TILE (1024 * 1024 * 1024) + +#if defined(__AVX512__) +#define SIMD_STORE(a, d) _mm512_storeu_ps(a, d) +#define SIMD_LOAD(x) _mm512_loadu_ps(x) +#define SIMD_SET(x) _mm512_set1_ps(x) +#define SIMD_MUL(x, y) _mm512_mul_ps(x, y) +#define SIMD_FMA(x, y, c) _mm512_fmadd_ps(x, y, c) +#define SIMD_SQRT(x) _mm512_sqrt_ps(x) +#define SIMD_DIV(x, y) _mm512_div_ps(x, y) +#define SIMD_WIDTH 16 +#else +#if defined(__AVX256__) +#define SIMD_STORE(a, d) _mm256_storeu_ps(a, d) +#define SIMD_LOAD(x) _mm256_loadu_ps(x) +#define SIMD_SET(x) _mm256_set1_ps(x) +#define SIMD_MUL(x, y) _mm256_mul_ps(x, y) +#define SIMD_FMA(x, y, c) _mm256_fmadd_ps(x, y, c) +#define SIMD_SQRT(x) _mm256_sqrt_ps(x) +#define SIMD_DIV(x, y) _mm256_div_ps(x, y) +#define SIMD_WIDTH 8 +#endif +#endif + +int deepspeed_py_memcpy(torch::Tensor& dest, const torch::Tensor& src); diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/py_ds_aio.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/py_ds_aio.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9033549bc0d2ebf3388ed159e52ede6cbc684ce1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_lib/py_ds_aio.cpp @@ -0,0 +1,46 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Functionality for swapping optimizer tensors to/from (NVMe) storage devices. +*/ + +#include +#include "deepspeed_py_aio_handle.h" +#include "deepspeed_py_copy.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("aio_read", &deepspeed_py_aio_read, "DeepSpeed Asynchronous I/O Read"); + + m.def("aio_write", &deepspeed_py_aio_write, "DeepSpeed Asynchronous I/O Write"); + + m.def("deepspeed_memcpy", &deepspeed_py_memcpy, "DeepSpeed Memory Copy"); + + py::class_(m, "aio_handle") + .def(py::init()) + + .def("get_block_size", &deepspeed_aio_handle_t::get_block_size) + .def("get_queue_depth", &deepspeed_aio_handle_t::get_queue_depth) + .def("get_single_submit", &deepspeed_aio_handle_t::get_single_submit) + .def("get_overlap_events", &deepspeed_aio_handle_t::get_overlap_events) + .def("get_thread_count", &deepspeed_aio_handle_t::get_thread_count) + + .def("read", &deepspeed_aio_handle_t::read) + .def("write", &deepspeed_aio_handle_t::write) + + .def("pread", &deepspeed_aio_handle_t::pread) + .def("pwrite", &deepspeed_aio_handle_t::pwrite) + + .def("sync_pread", &deepspeed_aio_handle_t::sync_pread) + .def("sync_pwrite", &deepspeed_aio_handle_t::sync_pwrite) + .def("async_pread", &deepspeed_aio_handle_t::async_pread) + .def("async_pwrite", &deepspeed_aio_handle_t::async_pwrite) + + .def("new_cpu_locked_tensor", &deepspeed_aio_handle_t::new_cpu_locked_tensor) + .def("free_cpu_locked_tensor", &deepspeed_aio_handle_t::free_cpu_locked_tensor) + + .def("wait", &deepspeed_aio_handle_t::wait); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_test/single_process_config.json b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_test/single_process_config.json new file mode 100644 index 0000000000000000000000000000000000000000..275c54135cd83d3d8508ea1f769b823af9529821 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/aio/py_test/single_process_config.json @@ -0,0 +1,29 @@ +{ + "block_size": [ + "128K", + "256K", + "1M" + ], + "queue_depth": [ + 4, + 16, + 32 + ], + "io_parallel": [ + 1, + 2, + 4, + 8 + ], + "single_submit": [ + true, + false + ], + "overlap_events": [ + true, + false + ], + "threads": [ + 1 + ] +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/adam/fused_adam.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/adam/fused_adam.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d25578f410da278dace61fb5b488b14ed1257c9f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/adam/fused_adam.cpp @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "cpu_adam.h" + +// C++ interface + +void multi_tensor_adam(int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, /*gpmv*/ + const float lr, + const float beta1, + const float beta2, + const float epsilon, + const int step, + const int mode, + const int bias_correction, + const float weight_decay) +{ + static bool initialized = false; + if (!initialized) { + create_adam_optimizer(0); + initialized = true; + } + for (int i = 0; i < tensor_lists[0].size(); i++) { + ds_adam_step(0, + step, + lr, + beta1, + beta2, + epsilon, + weight_decay, + bias_correction, + tensor_lists[1][i], + tensor_lists[0][i], + tensor_lists[2][i], + tensor_lists[3][i]); + } +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("multi_tensor_adam", + &multi_tensor_adam, + "Compute and apply gradient update to parameters for Adam optimizer"); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/ccl.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/ccl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d28509e592668aca100555e18ae47e3aadacb43a --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/ccl.cpp @@ -0,0 +1,344 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include + +#include +#include "shm.h" + +// #define DO_PROFILE +#ifdef DO_PROFILE +#include +#include +#endif + +// Communication settings +static int world_rank = -1; +static int world_size = -1; + +static std::set _comm_ids; +static std::set _colors; +static std::vector _ccl_comms; +static ccl::shared_ptr_class sub_kvs; +static std::map, int> group_to_comm_id; + +ccl::communicator& _get_comm_from_group() { return _ccl_comms[0]; } +ccl::communicator& _get_comm_from_group(py::object group) { return _ccl_comms[0]; } +ccl::communicator& _get_comm_from_group(std::vector ranks) +{ + if (group_to_comm_id.find(ranks) != group_to_comm_id.end()) { + auto id = group_to_comm_id.find(ranks); + return _ccl_comms[id->second]; + } + return _ccl_comms[0]; +} + +#define CCLCHECK(cmd) \ + do { \ + cmd; \ + } while (0) + +#define KVS_CREATE_SUCCESS 0 +#define KVS_CREATE_FAILURE -1 + +static bool is_initialized = 0; + +static ccl::shared_ptr_class kvs; + +static bool all_ranks_local_p = false; + +void initialize(int size, int rank, torch::Tensor& kvs_data) +{ + if (is_initialized) return; + + // Check whether all ranks is on the same physical machine. + // If true, we will use an SHM based low latency allreduce + + auto ls_string = std::getenv("LOCAL_SIZE"); + int ls = 0; + if (ls_string != NULL) { ls = std::stoi(std::getenv("LOCAL_SIZE")); } + + if (size >= 1 && size == ls) { all_ranks_local_p = true; } + + world_size = size; + world_rank = rank; + is_initialized = 1; + + ccl::kvs::address_type main_addr; + + if (rank != 0) { + memcpy(main_addr.data(), kvs_data.data_ptr(), main_addr.size()); + kvs = ccl::create_kvs(main_addr); + } + + _ccl_comms.emplace_back(ccl::create_communicator(size, rank, kvs)); + + auto addr_string = std::getenv("MASTER_ADDR"); + if (addr_string == NULL) { addr_string = ""; } + auto port_string = std::getenv("MASTER_PORT"); + if (port_string == NULL) { port_string = ""; } + + if (all_ranks_local_p) { shm_initialize(size, rank, addr_string, port_string); } +} + +/* + rank == 0: create main kvs and return its address + rank == else: return an empty address +*/ +std::vector get_kvs_addr(int rank) +{ + if (rank == 0) { + kvs = ccl::create_main_kvs(); + ccl::kvs::address_type main_addr = kvs->get_address(); + auto ccl_kvs_addr = std::vector(main_addr.begin(), main_addr.end()); + return ccl_kvs_addr; + } else { + ccl::kvs::address_type main_addr; + auto ccl_kvs_addr = std::vector(main_addr.begin(), main_addr.end()); + return ccl_kvs_addr; + } +} + +int get_rank(int group = 0) { return world_rank; } + +int get_world_size(int group = 0) { return world_size; } + +// Find the next ordered, unique value to a set. E.g. <0,1,2,7> --> 3 +int next_unique_val(std::set s) +{ + std::set::iterator itr; + // Base case. Add 0 to start of set. + if (s.empty() || *s.begin() != 0) { + return 0; + // second base case where s = {0} (the case of s = {n != 0} is caught above) + } else if (s.size() == 1) { + return 1; + } else { + int prev_val = *s.begin(); + for (itr = std::next(s.begin()); itr != s.end(); itr++) { + if (*itr != prev_val + 1) { return prev_val + 1; } + prev_val = *itr; + } + return *(s.end()) + 1; + } +} + +std::vector get_sub_kvs_addr(bool first) +{ + if (first) { + sub_kvs = ccl::create_main_kvs(); + ccl::kvs::address_type main_addr = sub_kvs->get_address(); + auto ccl_kvs_addr = std::vector(main_addr.begin(), main_addr.end()); + return ccl_kvs_addr; + } else { + ccl::kvs::address_type main_addr; + auto ccl_kvs_addr = std::vector(main_addr.begin(), main_addr.end()); + return ccl_kvs_addr; + } +} + +void initialize_sub_comm(int size, int rank, torch::Tensor& kvs_data, std::vector ranks) +{ + ccl::kvs::address_type main_addr; + if (rank != 0) { + memcpy(main_addr.data(), kvs_data.data_ptr(), main_addr.size()); + sub_kvs = ccl::create_kvs(main_addr); + } + _ccl_comms.push_back(ccl::create_communicator(size, rank, sub_kvs)); + group_to_comm_id[ranks] = _ccl_comms.size() - 1; +} + +ccl::datatype get_ccl_datatype(c10::ScalarType type) +{ + ccl::datatype ccl_type; + switch (type) { + case c10::ScalarType::Int: ccl_type = ccl::datatype::int32; break; + case c10::ScalarType::Long: ccl_type = ccl::datatype::int64; break; + case c10::ScalarType::Float: ccl_type = ccl::datatype::float32; break; + case c10::ScalarType::Double: ccl_type = ccl::datatype::float64; break; + case c10::ScalarType::BFloat16: ccl_type = ccl::datatype::bfloat16; break; + case c10::ScalarType::Half: ccl_type = ccl::datatype::float16; break; + default: ccl_type = ccl::datatype::int8; + } + return ccl_type; +} + +ccl::reduction get_ccl_reduce_op(py::object op, at::Tensor& input) +{ + py::object ReduceOp = py::module_::import("deepspeed.comm").attr("ReduceOp"); + if (!py::isinstance(op, ReduceOp)) { + throw std::runtime_error("Error: Op must be of type ReduceOp"); + } + + int op_val = py::int_(op.attr("value")); + ccl::reduction ccl_op; + + if (input.scalar_type() == at::kBool) { + if (op_val == (int)py::int_(ReduceOp.attr("SUM").attr("value"))) { + // For bool tensors, map sum to max, which both represent a bitwise or. + // This is to prevent overflow issues with sum, since we use uint8 to + // represent a bool (see cclDataType mapping). + ccl_op = ccl::reduction::max; + } else if (op_val == (int)py::int_(ReduceOp.attr("AVG").attr("value"))) { + throw std::runtime_error("Error: For bool tensors, op must be of type ReduceOp"); + } + } + + if (op_val == (int)py::int_(ReduceOp.attr("SUM").attr("value"))) { + ccl_op = ccl::reduction::sum; + } else if (op_val == (int)py::int_(ReduceOp.attr("MIN").attr("value"))) { + ccl_op = ccl::reduction::min; + } else if (op_val == (int)py::int_(ReduceOp.attr("MAX").attr("value"))) { + ccl_op = ccl::reduction::max; + } else if (op_val == (int)py::int_(ReduceOp.attr("PRODUCT").attr("value"))) { + ccl_op = ccl::reduction::prod; + } else { + throw std::runtime_error("Error: Unrecognized ReduceOp type"); + } + return ccl_op; +} + +void broadcast(torch::Tensor& data, int src, std::vector group, bool async_op) +{ + CCLCHECK(ccl::broadcast(data.data_ptr(), + data.numel(), + get_ccl_datatype(data.scalar_type()), + src, + _get_comm_from_group(group)) + .wait()); +} + +// TODO: implement torch's async_op behavior, document it. +void all_reduce(torch::Tensor& data, py::object op, std::vector group, bool async_op) +{ + CCLCHECK(ccl::allreduce(data.data_ptr(), + data.data_ptr(), + data.numel(), + get_ccl_datatype(data.scalar_type()), + get_ccl_reduce_op(op, data), + _get_comm_from_group(group)) + .wait()); +} + +void all_reduce_caching(torch::Tensor& data, + py::object op, + std::string match_id, + std::vector group, + bool async_op) +{ + ccl::allreduce_attr attr = ccl::default_allreduce_attr; + auto match_str = ccl::v1::string(match_id); + attr.template set(true); + attr.template set(match_str); + // To control this, use operation attribute and set true value for to_cache field and unique + // string (for example, tensor name) for match_id field. Note that: + // match_id should be the same for a specific communication operation across all ranks. + // If the same tensor is a part of different communication operations, match_id should have + // different values for each of these operations. + CCLCHECK(ccl::allreduce(data.data_ptr(), + data.data_ptr(), + data.numel(), + get_ccl_datatype(data.scalar_type()), + get_ccl_reduce_op(op, data), + _get_comm_from_group(group), + attr) + .wait()); +} + +void inference_all_reduce(torch::Tensor& data, py::object op) +{ +#ifdef DO_PROFILE + static double total_time = 0.0; + static double total_time_sq = 0.0; + static int count = -16; // warmup + static double max_time = 0.0; + static double min_time = DBL_MAX; + // make sure all rank reach this point before measuring time + // turn on this if you suspect each rank didn't reach here at the same time (stragger) + // if (all_ranks_local_p) { + // barrier_wait(0, world_size); + //} + auto start = std::chrono::system_clock::now(); +#endif + + static py::object ReduceOp = py::module_::import("deepspeed.comm").attr("ReduceOp"); + static auto ReduceOpSum = (int)py::int_(ReduceOp.attr("SUM").attr("value")); + + assert(py::int_(op.attr("value")) == ReduceOpSum); + + auto numel = data.numel(); + + int data_size = 0; + bool data_type_fallback = false; + + switch (data.scalar_type()) { + case c10::ScalarType::BFloat16: data_size = numel * 2; break; + case c10::ScalarType::Float: data_size = numel * 4; break; + default: data_type_fallback = true; + } + + if (data_type_fallback || !all_ranks_local_p) { + // fallback to oneccl allreduce + CCLCHECK(ccl::allreduce(data.data_ptr(), + data.data_ptr(), + data.numel(), + get_ccl_datatype(data.scalar_type()), + get_ccl_reduce_op(op, data), + _get_comm_from_group()) + .wait()); + } else { + all_reduce_outer_loop(data, numel, data_size); + } + +#ifdef DO_PROFILE + auto end = std::chrono::system_clock::now(); + count++; + if (count > 0) { + double elapsed = std::chrono::duration_cast(end - start).count(); + if (elapsed > max_time) { max_time = elapsed; } + if (elapsed < min_time) { min_time = elapsed; } + total_time += elapsed; + total_time_sq += elapsed * elapsed; + if (world_rank == 0 && count == 1000) { + auto avg = total_time / count; + auto sd = + sqrt(total_time_sq / count - total_time * total_time / (count * count)) / avg * 100; + printf(" C++ kernel\t\t %.2f\t %.2f\t%.2f\t %.2f\n", + min_time, + max_time, + total_time / count, + sd); + } + } +#endif +} + +void barrier(std::vector group, bool async_op) +{ + CCLCHECK(ccl::barrier(_get_comm_from_group(group)).wait()); +} + +std::vector get_available_coll() +{ + std::vector colls{ + "broadcast", "all_reduce", "inference_all_reduce", "all_reduce_caching", "barrier"}; + return colls; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("get_kvs_addr", &get_kvs_addr, "create and get main kvs addr"); + m.def("initialize", &initialize, "ccl initialize"); + m.def("get_rank", &get_rank, "get rank"); + m.def("get_world_size", &get_world_size, "get world size"); + m.def("broadcast", &broadcast, "ccl broadcast"); + m.def("all_reduce", &all_reduce, "ccl all_reduce"); + m.def("inference_all_reduce", &inference_all_reduce, "low latency all_reduce implementation"); + m.def("all_reduce_caching", &all_reduce_caching, "ccl all_reduce with caching"); + m.def("barrier", &barrier, "barrier"); + m.def("initialize_sub_comm", &initialize_sub_comm, "initialize_sub_comm"); + m.def("get_sub_kvs_addr", &get_sub_kvs_addr, "get_sub_kvs_addr"); + m.def("get_available_coll", &get_available_coll, "get_available_coll"); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/shm.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/shm.cpp new file mode 100644 index 0000000000000000000000000000000000000000..859c2fec292d190fe69869a296b81da95b3d9e9c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/shm.cpp @@ -0,0 +1,686 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include + +#include +#include +#include +#include +#include +#include "shm.h" + +// #define DO_PROFILE +#ifdef DO_PROFILE +#include +#include +#endif + +// states for collectives +enum coll_state { + coll_begin = 0, + coll_allreduce_naive__copy_in_done, // this state is for rank != 0 + coll_allreduce_naive__reduce_done, // this state is for rank == 0 + coll_allreduce_naive__copy_out_done, // this state is for rank != 0 +}; + +// SHM building blocks +struct SharedData { + const char* name; + int descriptor; + void* bytes; + size_t nbytes; +}; + +void shared_open(SharedData* data, const char* name, size_t nbytes) +{ + int d = shm_open(name, O_RDWR, S_IRUSR | S_IWUSR); + if (d != -1) { + void* bytes = mmap(NULL, nbytes, PROT_READ | PROT_WRITE, MAP_SHARED, d, 0); + data->name = name; + data->descriptor = d; + data->bytes = bytes; + data->nbytes = nbytes; + } else { + if (errno != ENOENT) { + // don't print if shm can not be found because we want to loop over from + // caller again until the other ranks created the shm + printf("shared_open %s failed, errno=%d\n", name, errno); + } + data->descriptor = -1; + } +} + +void shared_create(SharedData* data, const char* name, void* bytes, size_t nbytes) +{ + int d = shm_open(name, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR); + if (d != -1) { + if (nbytes = write(d, bytes, nbytes)) { shared_open(data, name, nbytes); } + } else { + printf("shared_create %s failed\n", name); + } +} + +void shared_close(SharedData* data) +{ + if (data->descriptor != -1) { + munmap(data->bytes, data->nbytes); + shm_unlink(data->name); + } +} + +// SHM based allreduce helper functions +// buffer that holds shm name +#define NAME_BUF_SIZE 1000 +#define MAX_BUF_SIZE 1048576 * 32 +#define NAIVE_ALLREDUCE_THRESHOLD 1048576 +#define SHM_BUFFER_NAME "deepspeed_allreduce_buffer" +struct allreduce_workspace { + enum coll_state state; + sem_t mutex; + sem_t turnstile1; + sem_t turnstile2; + int counter; + char buffer[MAX_BUF_SIZE]; +}; +struct allreduce_workspace** workspace; + +void wait_buffer_state_until(int index, enum coll_state state) +{ + volatile enum coll_state* state_ptr = &(workspace[index]->state); + + while (*state_ptr != state) + ; +} + +void wait_buffer_state_until_range(int index, enum coll_state start, int size) +{ + volatile enum coll_state* state_ptr = &(workspace[index]->state); + enum coll_state end = (enum coll_state)(start + size); + + while (1) { + volatile enum coll_state cur_state = *state_ptr; + if (cur_state >= start and cur_state < end) break; + } +} + +void wait_buffer_state_until_not(int index, enum coll_state state) +{ + volatile enum coll_state* state_ptr = &(workspace[index]->state); + + while (*state_ptr == state) + ; +} + +void barrier_wait(int root_idx, int num_ranks) +{ + // Phase 1: Wait for all threads to enter the barrier + auto shared = workspace[root_idx]; + sem_wait(&shared->mutex); + shared->counter++; + if (shared->counter == num_ranks) { + for (int i = 0; i < num_ranks; ++i) { sem_post(&shared->turnstile1); } + } + sem_post(&shared->mutex); + sem_wait(&shared->turnstile1); + + // Phase 2: Wait for all threads to exit the barrier + sem_wait(&shared->mutex); + shared->counter--; + if (shared->counter == 0) { + for (int i = 0; i < num_ranks; ++i) { sem_post(&shared->turnstile2); } + } + sem_post(&shared->mutex); + sem_wait(&shared->turnstile2); +} + +__m512 cvt_bf16_to_fp32(const __m256i src) __attribute__((target("avx512bw"))); +inline __m512 cvt_bf16_to_fp32(const __m256i src) +{ + auto y = _mm512_cvtepu16_epi32(src); + return _mm512_castsi512_ps(_mm512_bslli_epi128(y, 2)); +} + +inline __m256i cvt_fp32_to_bf16(const __m512 src) __attribute__((target("avx512bw"))); +inline __m256i cvt_fp32_to_bf16(const __m512 src) +{ + __m512i value = _mm512_castps_si512(src); + __m512i nan = _mm512_set1_epi32(0xffff); + auto mask_value = _mm512_cmp_ps_mask(src, src, _CMP_ORD_Q); + __m512i ones = _mm512_set1_epi32(0x1); + __m512i vec_bias = _mm512_set1_epi32(0x7fff); + // uint32_t lsb = (input >> 16) & 1; + auto t_value = _mm512_and_si512(_mm512_srli_epi32(value, 16), ones); + // uint32_t rounding_bias = 0x7fff + lsb; + t_value = _mm512_add_epi32(t_value, vec_bias); + // input += rounding_bias; + t_value = _mm512_add_epi32(t_value, value); + // input = input >> 16; + t_value = _mm512_srli_epi32(t_value, 16); + // Check NaN before converting back to bf16 + t_value = _mm512_mask_blend_epi32(mask_value, nan, t_value); + return _mm512_cvtusepi32_epi16(t_value); +} + +void reduce_2_bf16_buffers_iio(int num_elements, void* in0, void* in1, void* out) + __attribute__((target("avx512bw"))); + +void reduce_bf16_buffers(int start_elements, + int num_elements, + int num_buffers, + int to_buffer_idx, + struct allreduce_workspace** workspace) + __attribute__((target("avx512bw"))); + +void reduce_2_fp32_buffers_iio(int num_elements, void* in0, void* in1, void* out) + __attribute__((target("avx512bw"))); + +void reduce_fp32_buffers(int start_elements, + int num_elements, + int num_buffers, + int to_buffer_idx, + struct allreduce_workspace** workspace) + __attribute__((target("avx512bw"))); + +// N_REDUCE_LIMIT is the number of buffers that can be reduced together in one shot. +// Compared with do N-1 2-reduces which needs 2*(N-1) read and N-1 write, +// N-reduce only needs N read and 1 write, this saves 2/3 memory bandwidth. +// When increase N_REDUCE_LIMIT to a bigger number, do the following steps +// 1. Extend REPEAT_ macros list down below +// 2. Extend switch cases which call "REPEAT(X, ...)" down below +#define N_REDUCE_LIMIT 16 + +void reduce_all_buffers(struct allreduce_workspace** workspace, + int start_elements, + int num_elements, + c10::ScalarType scalar_type, + int num_buffers, + int to_buffer_idx) +{ + switch (scalar_type) { + case c10::ScalarType::BFloat16: + if (num_buffers > 2 && num_buffers <= N_REDUCE_LIMIT) { + reduce_bf16_buffers( + start_elements, num_elements, num_buffers, to_buffer_idx, workspace); + } else { + for (int i = 0; i < num_buffers; i++) { + if (i == to_buffer_idx) continue; + reduce_2_bf16_buffers_iio( + num_elements, + workspace[i]->buffer + start_elements * 2, + workspace[to_buffer_idx]->buffer + start_elements * 2, + workspace[to_buffer_idx]->buffer + start_elements * 2); + } + } + break; + case c10::ScalarType::Float: + if (num_buffers > 2 && num_buffers <= N_REDUCE_LIMIT) { + reduce_fp32_buffers( + start_elements, num_elements, num_buffers, to_buffer_idx, workspace); + } else { + for (int i = 0; i < num_buffers; i++) { + if (i == to_buffer_idx) continue; + reduce_2_fp32_buffers_iio( + num_elements, + workspace[i]->buffer + start_elements * 4, + workspace[to_buffer_idx]->buffer + start_elements * 4, + workspace[to_buffer_idx]->buffer + start_elements * 4); + } + } + break; + default: assert(!"Should not get here"); + } +} + +#define REPEAT(N, x) REPEAT_##N(x) +#define REPEAT_1(x) x(1) +#define REPEAT_2(x) \ + REPEAT_1(x); \ + x(2) +#define REPEAT_3(x) \ + REPEAT_2(x); \ + x(3) +#define REPEAT_4(x) \ + REPEAT_3(x); \ + x(4) +#define REPEAT_5(x) \ + REPEAT_4(x); \ + x(5) +#define REPEAT_6(x) \ + REPEAT_5(x); \ + x(6) +#define REPEAT_7(x) \ + REPEAT_6(x); \ + x(7) +#define REPEAT_8(x) \ + REPEAT_7(x); \ + x(8) +#define REPEAT_9(x) \ + REPEAT_8(x); \ + x(9) +#define REPEAT_10(x) \ + REPEAT_9(x); \ + x(10) +#define REPEAT_11(x) \ + REPEAT_10(x); \ + x(11) +#define REPEAT_12(x) \ + REPEAT_11(x); \ + x(12) +#define REPEAT_13(x) \ + REPEAT_12(x); \ + x(13) +#define REPEAT_14(x) \ + REPEAT_13(x); \ + x(14) +#define REPEAT_15(x) \ + REPEAT_14(x); \ + x(15) + +#define CVT_ADD_BF16(x) \ + do { \ + auto in##x##_val = \ + cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(workspace[x]->buffer + i))); \ + inout_val = _mm512_add_ps(inout_val, in##x##_val); \ + } while (0) + +// Reduce functions down below use vectorized algorithm, the number of bytes processed each +// iteration depends on vector length. 256bit vector ==> 32 bytes, 512bit vector ==> 64 bytes +// If you change implementation of reduce_2_bf16_buffers_iio or reduce_2_fp32_buffers_iio, check +// whether this number needs to be changed +#define VECTOR_LENGTH_IN_BYTES 32 + +void reduce_bf16_buffers(int start_elements, + int num_elements, + int num_buffers, + int to_buffer_idx, + struct allreduce_workspace** workspace) +{ + const int element_size = 2; + const int vector_length = VECTOR_LENGTH_IN_BYTES / element_size; + int main_elements = num_elements - (num_elements % vector_length); + int remain_elements = num_elements % vector_length; + + // process aligned part +#pragma omp parallel for + for (int i = start_elements * element_size; i < (start_elements + main_elements) * element_size; + i += VECTOR_LENGTH_IN_BYTES) { + auto inout_val = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)(workspace[0]->buffer + i))); + switch (num_buffers) { + case 16: REPEAT(15, CVT_ADD_BF16); break; + case 15: REPEAT(14, CVT_ADD_BF16); break; + case 14: REPEAT(13, CVT_ADD_BF16); break; + case 13: REPEAT(12, CVT_ADD_BF16); break; + case 12: REPEAT(11, CVT_ADD_BF16); break; + case 11: REPEAT(10, CVT_ADD_BF16); break; + case 10: REPEAT(9, CVT_ADD_BF16); break; + case 9: REPEAT(8, CVT_ADD_BF16); break; + case 8: REPEAT(7, CVT_ADD_BF16); break; + case 7: REPEAT(6, CVT_ADD_BF16); break; + case 6: REPEAT(5, CVT_ADD_BF16); break; + case 5: REPEAT(4, CVT_ADD_BF16); break; + case 4: REPEAT(3, CVT_ADD_BF16); break; + case 3: REPEAT(2, CVT_ADD_BF16); break; + default: assert(!"Should not get here."); + } + _mm256_storeu_si256((__m256i*)(workspace[to_buffer_idx]->buffer + i), + cvt_fp32_to_bf16(inout_val)); + } + + // process remaining part + int i = (start_elements + main_elements) * element_size; + while (remain_elements > 0) { + float val = 0.0f; + for (int j = 0; j < num_buffers; j++) { val += *(at::BFloat16*)(workspace[j]->buffer + i); } + *(at::BFloat16*)(workspace[to_buffer_idx]->buffer + i) = val; + remain_elements--; + i += element_size; + } +} + +void reduce_2_bf16_buffers_iio(int num_elements, void* in0, void* in1, void* out) +{ + const int element_size = 2; + const int vector_length = VECTOR_LENGTH_IN_BYTES / element_size; + int main_elements = num_elements - (num_elements % vector_length); + int remain_elements = num_elements % vector_length; + + // process aligned part +#pragma omp parallel for + for (int i = 0; i < main_elements * element_size; i += VECTOR_LENGTH_IN_BYTES) { + auto in0_val = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)((char*)in0 + i))); + auto in1_val = cvt_bf16_to_fp32(_mm256_loadu_si256((__m256i*)((char*)in1 + i))); + auto out_val = _mm512_add_ps(in0_val, in1_val); + _mm256_storeu_si256((__m256i*)((char*)out + i), cvt_fp32_to_bf16(out_val)); + } + + // process remaining part + int i = main_elements * element_size; + while (remain_elements > 0) { + float in0_val = *((at::BFloat16*)((char*)in0 + i)); + float in1_val = *((at::BFloat16*)((char*)in1 + i)); + *((at::BFloat16*)((char*)out + i)) = in0_val + in1_val; + remain_elements--; + i += element_size; + } +} + +#define CVT_ADD_F32(x) \ + do { \ + auto in##x##_val = _mm256_loadu_ps((float*)(workspace[x]->buffer + i)); \ + inout_val = _mm256_add_ps(inout_val, in##x##_val); \ + } while (0) + +void reduce_fp32_buffers(int start_elements, + int num_elements, + int num_buffers, + int to_buffer_idx, + struct allreduce_workspace** workspace) +{ + const int element_size = 4; + const int vector_length = VECTOR_LENGTH_IN_BYTES / element_size; + int main_elements = num_elements - (num_elements % vector_length); + int remain_elements = num_elements % vector_length; + + // process aligned part +#pragma omp parallel for + for (int i = start_elements * element_size; i < (start_elements + main_elements) * element_size; + i += VECTOR_LENGTH_IN_BYTES) { + auto inout_val = _mm256_loadu_ps((float*)(workspace[0]->buffer + i)); + switch (num_buffers) { + case 16: REPEAT(15, CVT_ADD_F32); break; + case 15: REPEAT(14, CVT_ADD_F32); break; + case 14: REPEAT(13, CVT_ADD_F32); break; + case 13: REPEAT(12, CVT_ADD_F32); break; + case 12: REPEAT(11, CVT_ADD_F32); break; + case 11: REPEAT(10, CVT_ADD_F32); break; + case 10: REPEAT(9, CVT_ADD_F32); break; + case 9: REPEAT(8, CVT_ADD_F32); break; + case 8: REPEAT(7, CVT_ADD_F32); break; + case 7: REPEAT(6, CVT_ADD_F32); break; + case 6: REPEAT(5, CVT_ADD_F32); break; + case 5: REPEAT(4, CVT_ADD_F32); break; + case 4: REPEAT(3, CVT_ADD_F32); break; + case 3: REPEAT(2, CVT_ADD_F32); break; + default: assert(!"Should not get here."); + } + _mm256_storeu_ps((float*)(workspace[to_buffer_idx]->buffer + i), inout_val); + } + + // process remaining part + int i = (start_elements + main_elements) * element_size; + while (remain_elements > 0) { + float val = 0.0f; + for (int j = 0; j < num_buffers; j++) { val += *(float*)(workspace[j]->buffer + i); } + *(float*)(workspace[to_buffer_idx]->buffer + i) = val; + remain_elements--; + i += element_size; + } +} + +void reduce_2_fp32_buffers_iio(int num_elements, void* in0, void* in1, void* out) +{ + const int element_size = 4; + const int vector_length = VECTOR_LENGTH_IN_BYTES / element_size; + int main_elements = num_elements - (num_elements % vector_length); + int remain_elements = num_elements % vector_length; + + // process aligned part +#pragma omp parallel for + for (int i = 0; i < main_elements * element_size; i += VECTOR_LENGTH_IN_BYTES) { + auto in0_val = _mm256_loadu_ps((float*)((char*)in0 + i)); + auto in1_val = _mm256_loadu_ps((float*)((char*)in1 + i)); + auto out_val = _mm256_add_ps(in0_val, in1_val); + _mm256_storeu_ps((float*)((char*)out + i), out_val); + } + + // process remaining part + int i = main_elements * element_size; + while (remain_elements > 0) { + float in0_val = *((float*)((char*)in0 + i)); + float in1_val = *((float*)((char*)in1 + i)); + *((float*)((char*)out + i)) = in0_val + in1_val; + remain_elements--; + i += element_size; + } +} + +static bool is_initialized = 0; +static int world_size; +static int world_rank; + +void shm_initialize(int size, int rank, char* addr_string, char* port_string) +{ + if (is_initialized) return; + is_initialized = 1; + + world_size = size; + world_rank = rank; + + char shm_name_prefix[NAME_BUF_SIZE]; + char shm_name[NAME_BUF_SIZE]; + snprintf(shm_name_prefix, + NAME_BUF_SIZE, + "%s_%d_%s_%s", + SHM_BUFFER_NAME, + getuid(), + addr_string, + port_string); + // create shared workspace for SHM based allreduce + SharedData allreduce_buffer; + // allocate workspace_buf for current rank + struct allreduce_workspace* workspace_buf; + struct allreduce_workspace* workspace_buf_other; + workspace_buf = (struct allreduce_workspace*)malloc(sizeof(struct allreduce_workspace)); + snprintf(shm_name, NAME_BUF_SIZE, "%s_%d", shm_name_prefix, rank); + shared_create(&allreduce_buffer, shm_name, workspace_buf, sizeof(struct allreduce_workspace)); + workspace_buf = (struct allreduce_workspace*)allreduce_buffer.bytes; + workspace_buf->state = coll_begin; + + // create the workspace pointer list + workspace = (struct allreduce_workspace**)malloc(size * sizeof(struct allreduce_workspace*)); + + // map shm of all ranks + for (int i = 0; i < size; i++) { + if (i != rank) { + snprintf(shm_name, NAME_BUF_SIZE, "%s_%d", shm_name_prefix, i); + // printf("open %s, %d\n", shm_name, rank); + do { + shared_open(&allreduce_buffer, shm_name, sizeof(struct allreduce_workspace)); + } while (allreduce_buffer.descriptor == -1 && errno == ENOENT); + workspace_buf_other = (struct allreduce_workspace*)allreduce_buffer.bytes; + workspace[i] = workspace_buf_other; + } else { + workspace[i] = workspace_buf; + workspace_buf->counter = 0; + sem_init(&workspace_buf->mutex, 1, 1); + sem_init(&workspace_buf->turnstile1, 1, 0); + sem_init(&workspace_buf->turnstile2, 1, 0); + } + } +} + +static void parallel_memcpy(void* to, void* from, size_t n_bytes) + __attribute__((target("avx512bw"))); +static void parallel_memcpy(void* to, void* from, size_t n_bytes) +{ + auto aligned_bytes = n_bytes - (n_bytes % VECTOR_LENGTH_IN_BYTES); + // process aligned part +#pragma omp parallel for + for (int i = 0; i < aligned_bytes; i += VECTOR_LENGTH_IN_BYTES) { + auto val = _mm256_loadu_si256((__m256i*)((char*)from + i)); + _mm256_storeu_si256((__m256i*)((char*)to + i), val); + } + + // process remaining part + for (int i = aligned_bytes; i < n_bytes; i++) { *((char*)to + i) = *((char*)from + i); } +} + +#define positive_mod(num, mod) ((((num) % (mod)) + (mod)) % (mod)) +#define rank_mod(rank) positive_mod(rank, world_size) +size_t slice_size(size_t chunk_el, int slice_idx) +{ + size_t slice_size = chunk_el / world_size; + return slice_idx == world_size - 1 ? slice_size + (chunk_el % world_size) : slice_size; +} + +char* slice_data(char* data_ptr, size_t chunk_el, int el_size, int slice_idx) +{ + size_t slice_size = chunk_el / world_size; + size_t el_offset = slice_size * slice_idx; + return data_ptr + el_offset * el_size; +} + +size_t slice_el_start(size_t chunk_el, int slice_idx) +{ + size_t slice_size = chunk_el / world_size; + return slice_size * slice_idx; +} + +void naive_all_reduce(char* data_ptr, + c10::ScalarType scalar_type, + size_t chunk_size, + size_t chunk_el) +{ + parallel_memcpy(workspace[world_rank]->buffer, data_ptr, chunk_size); + std::atomic_thread_fence(std::memory_order_release); + workspace[world_rank]->state = coll_allreduce_naive__copy_in_done; + + if (world_rank == 0) { + // compute allreduce result on rank 0 + for (int i = 1; i < world_size; i++) { + // wait until the other rank copy the buffer + wait_buffer_state_until(i, coll_allreduce_naive__copy_in_done); + } + reduce_all_buffers(workspace, 0, chunk_el, scalar_type, world_size, 0); + std::atomic_thread_fence(std::memory_order_release); + workspace[world_rank]->state = coll_allreduce_naive__reduce_done; + parallel_memcpy(data_ptr, workspace[0]->buffer, chunk_size); + } + if (world_rank != 0) { + wait_buffer_state_until(0, coll_allreduce_naive__reduce_done); + parallel_memcpy(data_ptr, workspace[0]->buffer, chunk_size); + std::atomic_thread_fence(std::memory_order_release); + workspace[world_rank]->state = coll_allreduce_naive__copy_out_done; + } + if (world_rank == 0) { + for (int i = 1; i < world_size; i++) { + wait_buffer_state_until(i, coll_allreduce_naive__copy_out_done); + } + std::atomic_thread_fence(std::memory_order_release); + workspace[world_rank]->state = coll_begin; + } + if (world_rank != 0) { + // if rank 0 spin too fast it could be in state 1 of next allreduce + // in this case wait_buffer_state_until(0, 0) may cause deadlock + // what we are certain is when rank 0 finishes the state won't be 2 + wait_buffer_state_until_not(0, coll_allreduce_naive__reduce_done); + workspace[world_rank]->state = coll_begin; + } +} + +// naive allreduce distributed, each rank do naive reduce on its slice +void distributed_naive_reduce(char* data_ptr, + c10::ScalarType scalar_type, + size_t chunk_size, + size_t chunk_el) +{ +#ifdef DO_PROFILE + static double total_t1_t0 = 0.0; + static double total_t2_t1 = 0.0; + static double total_t3_t2 = 0.0; + static double total_t4_t3 = 0.0; + static double total_t5_t4 = 0.0; + static int count = -16; // warmup + auto t0 = std::chrono::system_clock::now(); +#endif + + int data_size = chunk_size / chunk_el; + parallel_memcpy(workspace[world_rank]->buffer, data_ptr, chunk_size); + std::atomic_thread_fence(std::memory_order_release); + workspace[world_rank]->state = coll_allreduce_naive__copy_in_done; + +#ifdef DO_PROFILE + auto t1 = std::chrono::system_clock::now(); +#endif + + for (int i = 0; i < world_size; i++) { + // wait until all the other ranks copy the buffer + wait_buffer_state_until_range(i, coll_allreduce_naive__copy_in_done, 2); + } + +#ifdef DO_PROFILE + auto t2 = std::chrono::system_clock::now(); +#endif + + // reduce scatter + reduce_all_buffers(workspace, + slice_el_start(chunk_el, world_rank), + slice_size(chunk_el, world_rank), + scalar_type, + world_size, + world_rank); + std::atomic_thread_fence(std::memory_order_release); + workspace[world_rank]->state = coll_allreduce_naive__reduce_done; + +#ifdef DO_PROFILE + auto t3 = std::chrono::system_clock::now(); +#endif + + for (int i = 0; i < world_size; i++) { + int rank = (i + world_rank) % world_size; + // wait until the other rank reduce the buffer + wait_buffer_state_until_range(rank, coll_allreduce_naive__reduce_done, 2); + parallel_memcpy(slice_data(data_ptr, chunk_el, data_size, rank), + slice_data(workspace[rank]->buffer, chunk_el, chunk_size / chunk_el, rank), + slice_size(chunk_el, rank) * data_size); + } + std::atomic_thread_fence(std::memory_order_release); + workspace[world_rank]->state = coll_allreduce_naive__copy_out_done; + +#ifdef DO_PROFILE + auto t4 = std::chrono::system_clock::now(); +#endif + + for (int i = 0; i < world_size; i++) { + wait_buffer_state_until_not(i, coll_allreduce_naive__reduce_done); + } + + std::atomic_thread_fence(std::memory_order_release); + workspace[world_rank]->state = coll_begin; + +#ifdef DO_PROFILE + auto t5 = std::chrono::system_clock::now(); + count++; + if (count > 0) { + total_t1_t0 += std::chrono::duration_cast(t1 - t0).count(); + total_t2_t1 += std::chrono::duration_cast(t2 - t1).count(); + total_t3_t2 += std::chrono::duration_cast(t3 - t2).count(); + total_t4_t3 += std::chrono::duration_cast(t4 - t3).count(); + total_t5_t4 += std::chrono::duration_cast(t5 - t4).count(); + if (world_rank == 0 && count == 1000) { + printf("distributed_naive_reduce time breakdown:\n"); + printf("\tcopy input buffer: %.2f\n", total_t1_t0 / count); + printf("\twait for copy: %.2f\n", total_t2_t1 / count); + printf("\treduce: %.2f\n", total_t3_t2 / count); + printf("\tcopy buffer to output: %.2f\n", total_t4_t3 / count); + printf("\twait finish: %.2f\n", total_t5_t4 / count); + } + } +#endif +} + +void all_reduce_outer_loop(torch::Tensor& data, size_t numel, int data_size) +{ + for (int offset = 0; offset < data_size; offset += MAX_BUF_SIZE) { + auto data_ptr = ((char*)(data.data_ptr()) + offset); + size_t chunk_size = data_size - offset > MAX_BUF_SIZE ? MAX_BUF_SIZE : data_size - offset; + size_t chunk_el = chunk_size / (data_size / numel); + if (chunk_size < NAIVE_ALLREDUCE_THRESHOLD) + naive_all_reduce(data_ptr, data.scalar_type(), chunk_size, chunk_el); + else + distributed_naive_reduce(data_ptr, data.scalar_type(), chunk_size, chunk_el); + } +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/shm.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/shm.h new file mode 100644 index 0000000000000000000000000000000000000000..7f73197a8caa5c4487c79c0c30ef926797bae97b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/shm.h @@ -0,0 +1,12 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#ifndef __SHM_COLLECTIVES__ +#define __SHM_COLLECTIVES__ +#define VECTOR_LENGTH_IN_BYTES 32 +void shm_initialize(int size, int rank, char* addr_string, char* port_string); +void all_reduce_outer_loop(torch::Tensor& data, size_t numel, int data_size); +void barrier_wait(int root_idx, int num_ranks); +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/shm_interface.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/shm_interface.cpp new file mode 100644 index 0000000000000000000000000000000000000000..981ea36515fca84759f7a60e3438d238bc2a3c2f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/comm/shm_interface.cpp @@ -0,0 +1,120 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include + +#include "shm.h" + +// #define DO_PROFILE +#ifdef DO_PROFILE +#include +#include +#endif + +// Communication settings +static int world_rank = -1; +static int world_size = -1; + +static bool is_initialized = 0; + +static bool all_ranks_local_p = false; + +void initialize(int size, int rank) +{ + if (is_initialized) return; + + // Check whether all ranks is on the same physical machine. + // If true, we will use an SHM based low latency allreduce + + auto ls_string = std::getenv("LOCAL_SIZE"); + int ls = 0; + if (ls_string != NULL) { ls = std::stoi(std::getenv("LOCAL_SIZE")); } + + if (size >= 1 && size == ls) { all_ranks_local_p = true; } + + world_size = size; + world_rank = rank; + is_initialized = 1; + + auto addr_string = std::getenv("MASTER_ADDR"); + if (addr_string == NULL) { addr_string = ""; } + auto port_string = std::getenv("MASTER_PORT"); + if (port_string == NULL) { port_string = ""; } + + if (all_ranks_local_p) { shm_initialize(size, rank, addr_string, port_string); } +} + +int get_rank(int group = 0) { return world_rank; } + +int get_world_size(int group = 0) { return world_size; } + +// Success - return 0 +// Fail (cannot hornor the request and need to fall back) - return -1 +int inference_all_reduce(torch::Tensor& data, py::object op) +{ + if (!all_ranks_local_p) return -1; +#ifdef DO_PROFILE + static double total_time = 0.0; + static double total_time_sq = 0.0; + static int count = -16; // warmup + static double max_time = 0.0; + static double min_time = DBL_MAX; + // make sure all rank reach this point before measuring time + // turn on this if you suspect each rank didn't reach here at the same time (stragger) + // if (all_ranks_local_p) { barrier_wait(0, world_size); } + auto start = std::chrono::system_clock::now(); +#endif + + static py::object ReduceOp = py::module_::import("deepspeed.comm").attr("ReduceOp"); + static auto ReduceOpSum = (int)py::int_(ReduceOp.attr("SUM").attr("value")); + + assert(py::int_(op.attr("value")) == ReduceOpSum); + + auto numel = data.numel(); + + int data_size = 0; + bool data_type_fallback = false; + + switch (data.scalar_type()) { + case c10::ScalarType::BFloat16: data_size = numel * 2; break; + case c10::ScalarType::Float: data_size = numel * 4; break; + default: data_type_fallback = true; + } + + if (data_type_fallback) return -1; + + all_reduce_outer_loop(data, numel, data_size); + +#ifdef DO_PROFILE + auto end = std::chrono::system_clock::now(); + count++; + if (count > 0) { + double elapsed = std::chrono::duration_cast(end - start).count(); + if (elapsed > max_time) { max_time = elapsed; } + if (elapsed < min_time) { min_time = elapsed; } + total_time += elapsed; + total_time_sq += elapsed * elapsed; + if (world_rank == 0 && count == 1000) { + auto avg = total_time / count; + auto sd = + sqrt(total_time_sq / count - total_time * total_time / (count * count)) / avg * 100; + printf(" C++ kernel\t\t %.2f\t %.2f\t%.2f\t %.2f\n", + min_time, + max_time, + total_time / count, + sd); + } + } +#endif + return 0; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("initialize", &initialize, "shm initialize"); + m.def("get_rank", &get_rank, "get rank"); + m.def("get_world_size", &get_world_size, "get world size"); + m.def("inference_all_reduce", &inference_all_reduce, "low latency all_reduce implementation"); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/lion/fused_lion.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/lion/fused_lion.cpp new file mode 100644 index 0000000000000000000000000000000000000000..708df7f0146aa996e1652ed938c038331738e149 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/cpu/lion/fused_lion.cpp @@ -0,0 +1,43 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "cpu_lion.h" + +// C++ interface + +void multi_tensor_lion(int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, /*gpmv*/ + const float lr, + const float beta1, + const float beta2, + const int step, + const int mode, + const float weight_decay) +{ + static bool initialized = false; + if (!initialized) { + create_lion_optimizer(0); + initialized = true; + } + for (int i = 0; i < tensor_lists[0].size(); i++) { + ds_lion_step(0, + step, + lr, + beta1, + beta2, + weight_decay, + tensor_lists[1][i], + tensor_lists[0][i], + tensor_lists[2][i]); + } +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("multi_tensor_lion", + &multi_tensor_lion, + "Compute and apply gradient update to parameters for Lion optimizer"); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_grad_bias.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_grad_bias.h new file mode 100644 index 0000000000000000000000000000000000000000..17b6479ed8c53711a4b0c5fd7e23bee44ff9e142 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_grad_bias.h @@ -0,0 +1,250 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once +#include +#include +#include "../iterators/predicated_tile_iterator_atomic.h" +#include "cutlass/epilogue/threadblock/epilogue.h" + +namespace cutlass { +namespace epilogue { +namespace threadblock { +template +struct EpilogueTensorOpAffineRankN : public DefaultEpilogueTensorOpAffineRankN { + using Base = DefaultEpilogueTensorOpAffineRankN; + using OutputTileIterator = + cutlass::epilogue::threadblock::PredicatedTileIteratorAffineRankNAtomic< + typename Base::OutputTileThreadMap, + typename Base::ElementOutput, + Rank>; + + using Epilogue = + cutlass::epilogue::threadblock::Epilogue; +}; + +template +struct EpilogueVoltaTensorOpAffineRankN + : public DefaultEpilogueVoltaTensorOpAffineRankN { + using Base = DefaultEpilogueVoltaTensorOpAffineRankN; + using OutputTileIterator = + cutlass::epilogue::threadblock::PredicatedTileIteratorAffineRankNAtomic< + typename Base::OutputTileThreadMap, + typename Base::ElementOutput, + Rank>; + + using Epilogue = + cutlass::epilogue::threadblock::Epilogue; +}; + +template +struct EpilogueTensorOp : public DefaultEpilogueTensorOp { + using Base = DefaultEpilogueTensorOp; + using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorAtomic< + typename Base::OutputTileThreadMap, + typename Base::ElementOutput, + ScatterD, + PermuteDLayout>; + using Epilogue = + cutlass::epilogue::threadblock::Epilogue; +}; + +template +struct EpilogueVoltaTensorOp : public DefaultEpilogueVoltaTensorOp { + using Base = DefaultEpilogueVoltaTensorOp; + using OutputTileIterator = cutlass::epilogue::threadblock::PredicatedTileIteratorAtomic< + typename Base::OutputTileThreadMap, + typename Base::ElementOutput, + ScatterD, + PermuteDLayout>; + using Epilogue = + cutlass::epilogue::threadblock::Epilogue; +}; +} // namespace threadblock +} // namespace epilogue +} // namespace cutlass + +template +struct BiasGradEpilogue { + using Epilogue = + typename cutlass::epilogue::threadblock::EpilogueTensorOp::Epilogue; +}; + +template +struct BiasGradEpilogue { + using Epilogue = + typename cutlass::epilogue::threadblock::EpilogueVoltaTensorOp::Epilogue; +}; + +template +struct BiasGradEpilogueAffineRankN { + using Epilogue = typename cutlass::epilogue::threadblock::EpilogueTensorOpAffineRankN< + Rank, + Shape_, + WarpMmaTensorOp_, + PartitionsK, + OutputOp_, + ElementsPerAccess>::Epilogue; +}; + +template +struct BiasGradEpilogueAffineRankN { + using Epilogue = typename cutlass::epilogue::threadblock::EpilogueVoltaTensorOpAffineRankN< + Rank, + Shape_, + WarpMmaTensorOp_, + PartitionsK, + OutputOp_, + ElementsPerAccess>::Epilogue; +}; diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_pipelined.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_pipelined.h new file mode 100644 index 0000000000000000000000000000000000000000..3b7b32d6145202ea59d707b025669cbe75a8bc51 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_pipelined.h @@ -0,0 +1,592 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holdvr nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/*! \file + \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. + + File copied from "cutlass/epilogue/threadblock/epilogue.h" + then modified to: + (1) load 2 source fragments at the same time (pipelining) + (2) support reading from a different dtype + (3) pass the row id to the OutputOp if it takes it + (see MemoryEfficientAttentionNormalize) + Note that in general the fragment passed to the OutputOp could + span multiple rows but it does not happen with the configurations we have +*/ + +#pragma once + +#if defined(__CUDACC_RTC__) +#include +#else +#include +#endif + +#include "cutlass/aligned_buffer.h" +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/functional.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/layout/vector.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_coord.h" + +#include "cutlass/gemm/gemm.h" + +#include "cutlass/transform/pitch_linear_thread_map.h" +#include "cutlass/transform/threadblock/regular_tile_iterator.h" + +#include "cutlass/epilogue/threadblock/epilogue_base.h" +#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" +#include "cutlass/numeric_types.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace epilogue { +namespace threadblock { + +template +struct ApplyEpilogueOp { + static CUTLASS_DEVICE typename Op::FragmentOutput apply( + Op const& output_op, + int row_id, + typename Op::FragmentAccumulator const& accum, + typename Op::FragmentOutput const& source) + { + return output_op(accum, source); + } + static CUTLASS_DEVICE typename Op::FragmentOutput + apply(Op const& output_op, int row_id, typename Op::FragmentAccumulator const& accum) + { + return output_op(accum); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Epilogue operator +template ::value), + typename OutputTileSourceIterator_ = + OutputTileIterator_ ///< Tile iterator reading tensors + > +class EpiloguePipelined : public EpilogueBase { +public: + using Base = EpilogueBase; + + using Shape = Shape_; + using WarpMmaOperator = WarpMmaOperator_; + static int const kPartitionsK = PartitionsK; + using OutputTileIterator = OutputTileIterator_; + using OutputTileSourceIterator = OutputTileSourceIterator_; + using AccumulatorFragmentIterator = AccumulatorFragmentIterator_; + using WarpTileIterator = WarpTileIterator_; + using SharedLoadIterator = SharedLoadIterator_; + using OutputOp = OutputOp_; + using Padding = Padding_; + + using Layout = layout::RowMajor; + using LongIndex = typename Layout::LongIndex; + + /// The complete warp-level accumulator tile + using AccumulatorTile = typename Base::AccumulatorTile; + + /// Accumulator element + using ElementAccumulator = typename WarpTileIterator::Element; + + /// Output element + using ElementOutput = typename OutputTileIterator::Element; + using ElementSource = typename OutputTileSourceIterator::Element; + + /// Output access size + static int const kElementsPerAccess = OutputTileIterator::kElementsPerAccess; + + /// Tensor reference to destination tensor + using TensorRef = typename OutputTileIterator::TensorRef; + + /// Tensor reference to sync tensor + using SyncTensorRef = typename cutlass::TensorRef; + + /// Const tensor reference to source tensor + using ConstTensorRef = typename OutputTileIterator::ConstTensorRef; + + /// Array type used to output + using OutputAccessType = + Array; + using SourceAccessType = Array; + + /// Array type used by output functor + using AccumulatorAccessType = + Array; + + /// Number of warps + using WarpCount = typename Base::WarpCount; + + static int constexpr kSmemTiles = + Base::kFragmentsPerIteration > 1 ? Base::kFragmentsPerIteration : kPartitionsK; + static int constexpr kSmemPointerOffset = + Base::SharedStorage::StorageShape::kCount / kSmemTiles; + +public: + static_assert(OutputTileSourceIterator::Fragment::kElements == + OutputTileIterator::Fragment::kElements, + "Mismatch between input tile and output tile iterator (kElements)"); + static_assert(OutputTileSourceIterator::kIterations == OutputTileIterator::kIterations, + "Mismatch between input tile and output tile iterator (kIterations)"); + static_assert(SharedLoadIterator::Fragment::kElements == + OutputTileIterator::Fragment::kElements, + "Mismatch between shared load iterator and output tile iterator."); + + static_assert(OutputTileIterator::kElementsPerAccess, + "OutputTileIterator::kElementsPerAccess must not be zero."); + + static_assert(!(OutputTileIterator::Fragment::kElements % + OutputTileIterator::kElementsPerAccess), + "Divisibility"); + +private: + /// Loads fragment from shared memory aligned with output tensor + SharedLoadIterator shared_load_iterator_; + +public: + /// Constructor + CUTLASS_DEVICE + EpiloguePipelined(typename Base::SharedStorage& shared_storage, ///< Shared storage object + int thread_idx, ///< ID of a thread within the threadblock + int warp_idx, ///< ID of warp within threadblock + int lane_idx ///< Id of thread within warp + ) + : Base(shared_storage, thread_idx, warp_idx, lane_idx), + shared_load_iterator_(shared_storage.reference(), thread_idx) + { + } + + /// Streams the result to global memory + CUTLASS_DEVICE + void operator()(OutputOp const& output_op, ///< Output operator + OutputTileIterator destination_iterator, ///< Tile iterator for destination + AccumulatorTile const& accumulators, ///< Complete warp-level accumulator tile + OutputTileSourceIterator source_iterator) + { ///< Threadblock tile coordinate in GEMM (in units + ///< of threadblock tiles) + + if (!output_op.is_source_needed()) { + compute_source_not_needed_(output_op, destination_iterator, accumulators); + } else { + compute_source_needed_(output_op, destination_iterator, accumulators, source_iterator); + } + } + CUTLASS_DEVICE + void operator()(OutputOp const& output_op, ///< Output operator + OutputTileIterator destination_iterator, ///< Tile iterator for destination + AccumulatorTile const& accumulators) + { ///< Complete warp-level accumulator tile + compute_source_not_needed_(output_op, destination_iterator, accumulators); + } + +private: + template + struct acc2smem_source_not_needed; + + template + struct acc2smem_source_not_needed> { + template + CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator, + WarpTileIterator& warp_tile_iterator) + { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Advance; i++) { ++accum_fragment_iterator; } + + CUTLASS_PRAGMA_UNROLL + for (int p = 0; p < Base::kFragmentsPerIteration; ++p) { + typename AccumulatorFragmentIterator::Fragment accum_fragment; + + accum_fragment_iterator.load(accum_fragment); + ++accum_fragment_iterator; + + warp_tile_iterator.store(accum_fragment); + if (p < Base::kFragmentsPerIteration - 1) { + warp_tile_iterator.add_pointer_offset(kSmemPointerOffset); + } + } + + if (Base::kFragmentsPerIteration > 1) { + warp_tile_iterator.add_pointer_offset(kSmemPointerOffset * + (1 - Base::kFragmentsPerIteration)); + } + } + + CUTLASS_DEVICE + static void push(size_t pos, + AccumulatorFragmentIterator const& iterator_begin, + WarpTileIterator& warp_tile_iterator) + { + int dummy[] = { + (pos == (Seq * Base::kFragmentsPerIteration)) && + (helper(iterator_begin, warp_tile_iterator), + 0)...}; + + CUTLASS_UNUSED(dummy[0]); + } + }; + + static_assert(kPartitionsK == 1 || Base::kFragmentsPerIteration == 1, + "One of these must be exactly 1."); + + /// Streams the result to global memory + CUTLASS_DEVICE + void compute_source_not_needed_( + OutputOp const& output_op, ///< Output operator + OutputTileIterator destination_iterator, ///< Tile iterator for destination + AccumulatorTile const& accumulators ///< Complete warp-level accumulator tile + ) + { + // + // Iterator over warp-level accumulator fragment + // + + AccumulatorFragmentIterator accum_fragment_iterator(accumulators); + + // + // Iterate over accumulator tile + // + +#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations / Base::kFragmentsPerIteration \ + : 1) + for (int iter = 0; iter < OutputTileIterator::kIterations; + iter += Base::kFragmentsPerIteration) { + // + // Convert and store fragment + // + + __syncthreads(); + + acc2smem_source_not_needed>:: + push(iter, accum_fragment_iterator, this->warp_tile_iterator_); + + __syncthreads(); + + // + // Load fragments from shared memory + // + + CUTLASS_PRAGMA_UNROLL + for (int p = 0; p < Base::kFragmentsPerIteration; ++p) { + typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK]; + + shared_load_iterator_.load(aligned_accum_fragment[0]); + + if (p < Base::kFragmentsPerIteration - 1) { + shared_load_iterator_.add_pointer_offset(kSmemPointerOffset); + } else if (kPartitionsK > 1) { + plus add_fragments; + + CUTLASS_PRAGMA_UNROLL + for (int i = 1; i < kPartitionsK; ++i) { + shared_load_iterator_.add_pointer_offset(kSmemPointerOffset); + shared_load_iterator_.load(aligned_accum_fragment[i]); + aligned_accum_fragment[0] = + add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]); + } + + shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * + kSmemPointerOffset); + } + + // + // Compute the output result + // + + typename OutputTileIterator::Fragment output_fragment; + + apply_output_operator_source_not_needed_(destination_iterator.thread_start_row(), + output_fragment, + output_op, + aligned_accum_fragment[0]); + + // + // Store the final result + // + + destination_iterator.store(output_fragment); + ++destination_iterator; + } + + if (Base::kFragmentsPerIteration > 1) { + shared_load_iterator_.add_pointer_offset(kSmemPointerOffset * + (1 - Base::kFragmentsPerIteration)); + } + } + } + + template + struct acc2smem_source_needed; + + template + struct acc2smem_source_needed> { + template + CUTLASS_DEVICE static void helper(AccumulatorFragmentIterator accum_fragment_iterator, + WarpTileIterator& warp_tile_iterator) + { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Advance; i++) { ++accum_fragment_iterator; } + + typename AccumulatorFragmentIterator::Fragment accum_fragment; + accum_fragment_iterator.load(accum_fragment); + warp_tile_iterator.store(accum_fragment); + } + + CUTLASS_DEVICE + static void push(size_t pos, + AccumulatorFragmentIterator const& iterator_begin, + WarpTileIterator& warp_tile_iterator) + { + int dummy[] = {(pos == Seq) && (helper(iterator_begin, warp_tile_iterator), 0)...}; + } + }; + + /// Streams the result to global memory + CUTLASS_DEVICE + void compute_source_needed_( + OutputOp const& output_op, ///< Output operator + OutputTileIterator destination_iterator, ///< Tile iterator for destination + AccumulatorTile const& accumulators, ///< Complete warp-level accumulator tile + OutputTileSourceIterator source_iterator ///< Threadblock tile coordinate in GEMM (in units + ///< of threadblock tiles) + ) + { + typename OutputTileSourceIterator::Fragment source_fragment[2]; + + source_fragment[0].clear(); + source_iterator.load(source_fragment[0]); + ++source_iterator; + source_fragment[1].clear(); + + // + // Iterator over warp-level accumulator fragment + // + + AccumulatorFragmentIterator accum_fragment_iterator(accumulators); + + // + // Iterate over accumulator tile + // + +#pragma unroll(IterationsUnroll ? OutputTileIterator::kIterations : 1) + for (int iter = 0; iter < OutputTileIterator::kIterations; ++iter) { + if (iter > 0) { __syncthreads(); } + // + // Load the source for next iteration (pipelining) + // + + if (iter + 1 < OutputTileIterator::kIterations) { + source_iterator.load(source_fragment[(iter + 1) % 2]); + } + ++source_iterator; + acc2smem_source_needed>:: + push(iter, accum_fragment_iterator, this->warp_tile_iterator_); + + __syncthreads(); + + // + // Load fragments from shared memory + // + + typename SharedLoadIterator::Fragment aligned_accum_fragment[kPartitionsK]; + + shared_load_iterator_.load(aligned_accum_fragment[0]); + + // If the number of k-slices is > 1 - perform a reduction amongst the + // k-slices + if (kPartitionsK > 1) { + plus add_fragments; + + CUTLASS_PRAGMA_UNROLL + for (int i = 1; i < kPartitionsK; ++i) { + shared_load_iterator_.add_pointer_offset(kSmemPointerOffset); + shared_load_iterator_.load(aligned_accum_fragment[i]); + aligned_accum_fragment[0] = + add_fragments(aligned_accum_fragment[0], aligned_accum_fragment[i]); + } + + shared_load_iterator_.add_pointer_offset((1 - kPartitionsK) * kSmemPointerOffset); + } + + // + // Compute the output result + // + + typename OutputTileIterator::Fragment output_fragment; + + apply_output_operator_(destination_iterator.thread_start_row(), + output_fragment, + output_op, + aligned_accum_fragment[0], + source_fragment[iter % 2]); + + // + // Store the final result + // + + destination_iterator.store(output_fragment); + ++destination_iterator; + } + } + + /// Helper to invoke the output functor over each vector of output + CUTLASS_DEVICE + void apply_output_operator_(int begin_row, + typename OutputTileIterator::Fragment& output_fragment, + OutputOp const& output_op, ///< Output operator + typename SharedLoadIterator::Fragment const& aligned_accum_fragment, + typename OutputTileSourceIterator::Fragment const& source_fragment) + { + OutputAccessType* output_frag_ptr = reinterpret_cast(&output_fragment); + + AccumulatorAccessType const* compute_frag_ptr = + reinterpret_cast(&aligned_accum_fragment); + + SourceAccessType const* source_frag_ptr = + reinterpret_cast(&source_fragment); + + int const kOutputOpIterations = + OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kOutputOpIterations; ++i) { + // Call the output operator + output_frag_ptr[i] = ApplyEpilogueOp::apply( + output_op, + begin_row + getRowOffset(i * OutputTileIterator::kElementsPerAccess), + compute_frag_ptr[i], + source_frag_ptr[i]); + } + } + + /// Helper to invoke the output functor over each vector of output + CUTLASS_DEVICE + void apply_output_operator_source_not_needed_( + int begin_row, + typename OutputTileIterator::Fragment& output_fragment, + OutputOp const& output_op, ///< Output operator + typename SharedLoadIterator::Fragment const& aligned_accum_fragment) + { + OutputAccessType* output_frag_ptr = reinterpret_cast(&output_fragment); + + AccumulatorAccessType const* compute_frag_ptr = + reinterpret_cast(&aligned_accum_fragment); + + int const kOutputOpIterations = + OutputTileIterator::Fragment::kElements / OutputTileIterator::kElementsPerAccess; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kOutputOpIterations; ++i) { + // Call the output operator + output_frag_ptr[i] = ApplyEpilogueOp::apply( + output_op, + begin_row + getRowOffset(i * OutputTileIterator::kElementsPerAccess), + compute_frag_ptr[i]); + } + } + + // This should be constexpr, but it's only supported on c++14 + static int CUTLASS_HOST_DEVICE getRowOffset(int i) + { + using ThreadMap = typename OutputTileIterator::ThreadMap; + + CUTLASS_PRAGMA_UNROLL + for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { + CUTLASS_PRAGMA_UNROLL + for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { + int row_offset = row * ThreadMap::Delta::kRow + + group * ThreadMap::Delta::kGroup + + cluster * ThreadMap::Delta::kCluster; + int frag_row_idx = + (row + ThreadMap::Iterations::kRow * + (group + ThreadMap::Iterations::kGroup * cluster)); + CUTLASS_PRAGMA_UNROLL + for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { + int frag_idx = ThreadMap::kElementsPerAccess * + (frag_row_idx * ThreadMap::Iterations::kColumn + column); + if (i < frag_idx + ThreadMap::kElementsPerAccess) { return row_offset; } + } + } + } + } + return -1; + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace epilogue +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_rescale_output.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_rescale_output.h new file mode 100644 index 0000000000000000000000000000000000000000..f81a09f74f1ebed8e3d46fee52ac220efd98214b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_rescale_output.h @@ -0,0 +1,251 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holdvr nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/*! \file + \brief Epilogue for threadblock scoped GEMMs using Tensor Ops. + + The epilogue rearranges the result of a matrix product through shared memory + to match canonical tensor layouts in global memory. Epilogues support + conversion and reduction operations. + + This is a copy of cutlass/epilogue/threadblock/epilogue.h that can + handle "row_id" as a first argument, as uses it to get the corresponding + `m_prime` / `s_prime` to rescale the output. +*/ + +#pragma once + +#if defined(__CUDACC_RTC__) +#include +#else +#include +#endif + +#include "cutlass/aligned_buffer.h" +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/functional.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/layout/vector.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_coord.h" + +#include "cutlass/gemm/gemm.h" + +#include "cutlass/transform/pitch_linear_thread_map.h" +#include "cutlass/transform/threadblock/regular_tile_iterator.h" + +#include "cutlass/epilogue/threadblock/epilogue_base.h" +#include "cutlass/epilogue/threadblock/predicated_tile_iterator.h" +#include "cutlass/numeric_types.h" + +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/epilogue/thread/scale_type.h" +#include "cutlass/functional.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/numeric_types.h" +#include "epilogue_pipelined.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace epilogue { +namespace thread { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Applies a linear combination operator to an array of elements. +// output <- alpha * accumulator + beta * source +// with: +// alpha = 1 / s_prime (to normalize when isLast=True, 1 otherwise) +// beta = alpha / m_prime (renormalize the output when the max changes) +// source is the current output +template , + ///< but we use 64 or 32 sometimes when there are not enough data + ///< to store + typename ElementAccumulator_, ///< Accumulator data type + typename ElementCompute_, ///< Data type used to compute linear combination + bool isFirst, + bool isLast, + typename FragmentAlphaBeta_, + FloatRoundStyle Round = FloatRoundStyle::round_to_nearest> +class MemoryEfficientAttentionNormalize { +public: + using ElementOutput = ElementOutput_; + using ElementSource = ElementSource_; + using ElementAccumulator = ElementAccumulator_; + using ElementCompute = ElementCompute_; + + static int const kCount = Count; + + using FragmentOutput = Array; + using FragmentSource = Array; + using FragmentAccumulator = Array; + using ComputeFragment = Array; + using FragmentAlphaBeta = FragmentAlphaBeta_; + + static FloatRoundStyle const kRound = Round; + +private: + // + // Data members + // + + FragmentAlphaBeta const& s_prime_; + FragmentAlphaBeta const& m_prime_; + +public: + /// Constructs the function object, possibly loading from pointers in host + /// memory + CUTLASS_HOST_DEVICE + MemoryEfficientAttentionNormalize(FragmentAlphaBeta const& s_prime, + FragmentAlphaBeta const& m_prime) + : s_prime_(s_prime), m_prime_(m_prime) + { + } + + /// Returns true if source is needed + CUTLASS_HOST_DEVICE + bool is_source_needed() const { return !isFirst; } + + /// Functionally required for serial reduction in the epilogue + CUTLASS_HOST_DEVICE + void set_k_partition(int k_partition, int k_partition_count) {} + + /// Computes linear scaling: D = alpha * accumulator + beta * source + CUTLASS_HOST_DEVICE + FragmentOutput operator()(int row, + FragmentAccumulator const& accumulator, + FragmentSource const& source) const + { + assert(!isFirst); + + // Convert source to internal compute numeric type + NumericArrayConverter source_converter; + NumericArrayConverter + accumulator_converter; + + // Convert to destination numeric type + NumericArrayConverter destination_converter; + + ComputeFragment converted_source = source_converter(source); + ComputeFragment converted_accumulator = accumulator_converter(accumulator); + + // Perform binary operations + ComputeFragment intermediate; + + multiplies mul_add_source; + multiply_add mul_add_accumulator; + + ElementCompute alpha = isLast ? (1 / s_prime_[row]) : 1; + ElementCompute beta = alpha * m_prime_[row]; + + intermediate = mul_add_source(beta, converted_source); // X = beta * C + + intermediate = mul_add_accumulator( + alpha, converted_accumulator, intermediate); // D = alpha * Accum + X + + return destination_converter(intermediate); + } + + /// Computes linear scaling: D = alpha * accumulator + CUTLASS_HOST_DEVICE + FragmentOutput operator()(int row, FragmentAccumulator const& accumulator) const + { + assert(isFirst); + + // Convert source to internal compute numeric type + NumericArrayConverter + accumulator_converter; + + // Convert to destination numeric type + NumericArrayConverter destination_converter; + + ComputeFragment converted_accumulator = accumulator_converter(accumulator); + + ComputeFragment intermediate; + multiplies mul_accumulator; + + ElementCompute alpha = isLast ? (1 / s_prime_[row]) : 1; + + intermediate = mul_accumulator(alpha, converted_accumulator); // X = alpha * C + uniform + + return destination_converter(intermediate); + } +}; + +} // namespace thread + +namespace threadblock { +template +struct ApplyEpilogueOp< + thread::MemoryEfficientAttentionNormalize> { + using Op = thread::MemoryEfficientAttentionNormalize; + static CUTLASS_DEVICE typename Op::FragmentOutput apply( + Op const& output_op, + int row_id, + typename Op::FragmentAccumulator const& accum, + typename Op::FragmentSource const& source) + { + return output_op(row_id, accum, source); + } + static CUTLASS_DEVICE typename Op::FragmentOutput + apply(Op const& output_op, int row_id, typename Op::FragmentAccumulator const& accum) + { + return output_op(row_id, accum); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace epilogue +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_thread_apply_logsumexp.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_thread_apply_logsumexp.h new file mode 100644 index 0000000000000000000000000000000000000000..46fb2bf17c1c1c20400244593dd8a98a91973d0d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/epilogue/epilogue_thread_apply_logsumexp.h @@ -0,0 +1,168 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights + *reserved. SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + *this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + *LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + *CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + *INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + *CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + *ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + *POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/*! \file + \brief Functor performing linear combination operations used by epilogues. +*/ + +#pragma once + +#include + +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/epilogue/thread/activation.h" +#include "cutlass/functional.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/numeric_types.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace epilogue { +namespace thread { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace detail { + +template +struct ArrayExponential { + CUTLASS_HOST_DEVICE + Array operator()( + Array const& input) const + { + Array result; + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < ElementsPerAccess; ++i) { result[i] = expf(input[i]); } + + return result; + } +}; + +template +struct ArrayExponential { + CUTLASS_DEVICE + Array operator()(Array const& input) const + { + Array result; + + int const kVectorCount = ElementsPerAccess / 2; + + __half2 const* input_ptr = reinterpret_cast<__half2 const*>(input.raw_data()); + __half2* res_ptr = reinterpret_cast<__half2*>(result.raw_data()); + + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kVectorCount; ++i) { res_ptr[i] = h2exp(input_ptr[i]); } + + return result; + } +}; +} // namespace detail + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Applies: +/// output <- (input - lse).exp() +template +class ApplyLogSumExp { +public: + using ElementOutput = ElementOutput_; + using ElementAccumulator = ElementAccumulator_; + using ElementCompute = ElementCompute_; + using ElementLSE = ElementLSE_; + + static int const kElementsPerAccess = ElementsPerAccess; + static int const kCount = kElementsPerAccess; + static const ScaleType::Kind kScale = cutlass::epilogue::thread::ScaleType::NoBetaScaling; + + using FragmentOutput = Array; + using FragmentAccumulator = Array; + using FragmentCompute = Array; + using FragmentLSE = Array; + using FragmentScaleBias = FragmentLSE; // Used by epilogue_smem_accumulator.h + +public: + // + // Methods + // + + CUTLASS_HOST_DEVICE + ApplyLogSumExp() {} + + /// Returns true if source is needed + CUTLASS_HOST_DEVICE + bool is_source_needed() const { return true; } + + /// Functionally required for serial reduction in the epilogue + CUTLASS_HOST_DEVICE + void set_k_partition(int k_partition, int k_partition_count) {} + + CUTLASS_HOST_DEVICE + FragmentOutput operator()(FragmentAccumulator const& AB, + FragmentLSE const& scale_unused, + // bias used as LSE + FragmentLSE const& bias) const + { + FragmentCompute frag_AB = + NumericArrayConverter()(AB); + FragmentCompute frag_lse_compute = + NumericArrayConverter()(bias); + FragmentCompute frag_compute; + + minus minus_lse; + detail::ArrayExponential apply_exp; + frag_compute = minus_lse(frag_AB, frag_lse_compute); + frag_compute = apply_exp(frag_compute); + + return NumericArrayConverter()( + frag_compute); + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace thread +} // namespace epilogue +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma.h new file mode 100644 index 0000000000000000000000000000000000000000..75833bbfe7d2f3c4a91db1ddc7f4453b270db968 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma.h @@ -0,0 +1,119 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holdvr nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include "custom_mma_multistage.h" +#include "custom_mma_pipelined.h" +#include "cutlass/gemm/threadblock/mma_multistage.h" +#include "cutlass/gemm/threadblock/mma_pipelined.h" + +template +struct MakeCustomMma; + +template +struct MakeCustomMma, + kMaxK> { + // Reduce the number of stages if we don't need that many + static int constexpr kStages = + kMaxK == cutlass::platform::numeric_limits::max() + ? Stages + : cutlass::const_min(Stages, (kMaxK + int(Shape::kK) - 1) / int(Shape::kK)); + using Mma = cutlass::gemm::threadblock::CustomMmaMultistage; +}; + +template +struct MakeCustomMma, + kMaxK> { + using Mma = cutlass::gemm::threadblock::CustomMmaPipelined; +}; diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_base.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_base.h new file mode 100644 index 0000000000000000000000000000000000000000..bbf91240b90075da6f711dba55cf27e19506f898 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_base.h @@ -0,0 +1,181 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights + *reserved. SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + *this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + *LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + *CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + *INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + *CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + *ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + *POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/*! \file + \brief Template for a double-buffered threadblock-scoped GEMM kernel. +*/ + +#pragma once + +#include "cutlass/aligned_buffer.h" +#include "cutlass/arch/memory.h" +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/threadblock/mma_base.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_types.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math +/// instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Number of stages, + int Stages, + /// Used for partial specialization + typename Enable = bool> +class CustomMmaBase { +public: + ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using Shape = Shape_; + + ///< Policy describing tuning details + using Policy = Policy_; + + // + // Dependent types + // + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + /// Shape describing the overall GEMM computed from shared memory + /// by each warp. + using WarpGemm = typename Policy::Operator::Shape; + + /// Shape describing the number of warps filling the CTA + using WarpCount = + GemmShape; + + /// Number of warp-level GEMM oeprations + static int const kWarpGemmIterations = (WarpGemm::kK / Operator::Policy::MmaShape::kK); + + /// Number of stages + static int const kStages = Stages; + + // + // Nested structs + // + + /// Shared storage object needed by threadblock-scoped GEMM + template + struct OperandSharedStorage { + AlignedBuffer buffer; + using TensorRef = TensorRef; + + CUTLASS_DEVICE + static OperandLayout Layout() + { + return OperandLayout::packed({OperandShape::kRow, OperandShape::kColumn}); + } + + /// Returns a TensorRef to the operand + CUTLASS_HOST_DEVICE + TensorRef ref() { return TensorRef{buffer.data(), Layout()}; } + }; + + /// Shape of the A matrix operand in shared memory + using ShapeA = MatrixShape; + + /// Shape of the B matrix operand in shared memory + using ShapeB = MatrixShape; + + using SharedStorageA = + OperandSharedStorage; + using SharedStorageB = + OperandSharedStorage; + using TensorRefA = typename SharedStorageA::TensorRef; + using TensorRefB = typename SharedStorageB::TensorRef; + + struct SharedStorage { + /// Buffer for A operand + SharedStorageA operand_A; + + /// Buffer for B operand + SharedStorageB operand_B; + }; + +protected: + // + // Data members + // + + /// Iterator to load a warp-scoped tile of A operand from shared memory + typename Operator::IteratorA warp_tile_iterator_A_; + + /// Iterator to load a warp-scoped tile of B operand from shared memory + typename Operator::IteratorB warp_tile_iterator_B_; + +public: + /// Construct from tensor references + CUTLASS_DEVICE + CustomMmaBase( + ///< Shared storage needed for internal use by threadblock-scoped GEMM + SharedStorageA& shared_storageA, + SharedStorageB& shared_storageB, + ///< ID within the threadblock + int thread_idx, + ///< ID of warp + int warp_idx, + ///< ID of each thread within a warp + int lane_idx) + : warp_tile_iterator_A_(shared_storageA.ref(), lane_idx), + warp_tile_iterator_B_(shared_storageB.ref(), lane_idx) + { + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_multistage.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_multistage.h new file mode 100644 index 0000000000000000000000000000000000000000..3760ccab852a5ceec65fa6693070a84fec284d8a --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_multistage.h @@ -0,0 +1,714 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights + *reserved. SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + *this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + *LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + *CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + *INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + *CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + *ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + *POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/*! \file + \brief Template for a double-buffered threadblock-scoped GEMM kernel. +*/ + +#pragma once + +#include "cutlass/aligned_buffer.h" +#include "cutlass/arch/cache_operation.h" +#include "cutlass/arch/memory.h" +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_types.h" + +#include "custom_mma_base.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math +/// instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Iterates over tiles of A operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorA_, + /// Iterates over tiles of A operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorA_, + /// Cache operation for operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Iterates over tiles of B operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorB_, + /// Iterates over tiles of B operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorB_, + /// Cache operation for operand B + cutlass::arch::CacheOperation::Kind CacheOpB, + /// Data type of accumulator matrix + typename ElementC_, + /// Data type of accumulator matrix + typename LayoutC_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Number of stages, + int Stages, + /// Use zfill or predicate for out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone, + /// Upper boundon the K dimension + int kMaxK = cutlass::platform::numeric_limits::max(), + /// Used for partial specialization + typename Enable = bool> +class CustomMmaMultistage : public CustomMmaBase { +public: + ///< Base class + using Base = CustomMmaBase; + ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using Shape = Shape_; + ///< Iterates over tiles of A operand in global memory + using IteratorA = IteratorA_; + ///< Iterates over tiles of B operand in global memory + using IteratorB = IteratorB_; + ///< Data type of accumulator matrix + using ElementC = ElementC_; + ///< Layout of accumulator matrix + using LayoutC = LayoutC_; + ///< Policy describing tuning details + using Policy = Policy_; + + using SmemIteratorA = SmemIteratorA_; + using SmemIteratorB = SmemIteratorB_; + + static cutlass::arch::CacheOperation::Kind const kCacheOpA = CacheOpA; + static cutlass::arch::CacheOperation::Kind const kCacheOpB = CacheOpB; + + // + // Dependent types + // + + /// Fragment of accumulator tile + using FragmentC = typename Policy::Operator::FragmentC; + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + /// Minimum architecture is Sm80 to support cp.async + using ArchTag = arch::Sm80; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = Operator::kTransformA; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = Operator::kTransformB; + + /// Internal structure exposed for introspection. + struct Detail { + static_assert(Base::kWarpGemmIterations > 1, + "The pipelined structure requires at least two warp-level " + "GEMM operations."); + + /// Number of cp.async instructions to load one stage of operand A + static int const AsyncCopyIterationsPerStageA = IteratorA::ThreadMap::Iterations::kCount; + + /// Number of cp.async instructions to load one stage of operand B + static int const AsyncCopyIterationsPerStageB = IteratorB::ThreadMap::Iterations::kCount; + + /// Number of stages + static int const kStages = Stages; + + /// Number of cp.async instructions to load on group of operand A + static int const kAccessesPerGroupA = + (AsyncCopyIterationsPerStageA + Base::kWarpGemmIterations - 1) / + Base::kWarpGemmIterations; + + /// Number of cp.async instructions to load on group of operand B + static int const kAccessesPerGroupB = + (AsyncCopyIterationsPerStageB + Base::kWarpGemmIterations - 1) / + Base::kWarpGemmIterations; + }; + + static bool const kSmemContainsEntireMat = kMaxK <= Shape::kK * Stages; + static constexpr int kNumStagesConcurrentLoad = kSmemContainsEntireMat ? Stages : Stages - 1; + +private: + using WarpLoadedFragmentA = typename Operator::FragmentA; + using WarpLoadedFragmentB = typename Operator::FragmentB; + using WarpTransformedFragmentA = typename Operator::TransformedFragmentA; + using WarpTransformedFragmentB = typename Operator::TransformedFragmentB; + +private: + // + // Data members + // + + /// Iterator to write threadblock-scoped tile of A operand to shared memory + SmemIteratorA smem_iterator_A_; + + /// Iterator to write threadblock-scoped tile of B operand to shared memory + SmemIteratorB smem_iterator_B_; + + bool prologue_done_; + + // Set to `True` to ensure the accumulator will be zero outside the GEMM + // footprint + bool zero_outside_bounds_; + +public: + /// Construct from tensor references + CUTLASS_DEVICE + CustomMmaMultistage( + ///< Shared storage needed for internal use by threadblock-scoped GEMM + typename Base::SharedStorageA& shared_storageA, + typename Base::SharedStorageB& shared_storageB, + ///< ID within the threadblock + int thread_idx, + ///< ID of warp + int warp_idx, + ///< ID of each thread within a warp + int lane_idx) + : Base(shared_storageA, shared_storageB, thread_idx, warp_idx, lane_idx), + smem_iterator_A_(shared_storageA.ref(), thread_idx), + smem_iterator_B_(shared_storageB.ref(), thread_idx), + prologue_done_(false), + zero_outside_bounds_(false) + { + // Compute warp location within threadblock tile by mapping the warp_id to + // three coordinates: + // _m: the warp's position within the threadblock along the M dimension + // _n: the warp's position within the threadblock along the N dimension + // _k: the warp's position within the threadblock along the K dimension + + int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); + int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); + + int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; + int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; + + // Add per-warp offsets in units of warp-level tiles + this->warp_tile_iterator_A_.add_tile_offset( + {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); + this->warp_tile_iterator_B_.add_tile_offset( + {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); + } + CUTLASS_DEVICE + CustomMmaMultistage( + ///< Shared storage needed for internal use by threadblock-scoped GEMM + typename Base::SharedStorage& st, + ///< ID within the threadblock + int thread_idx, + ///< ID of warp + int warp_idx, + ///< ID of each thread within a warp + int lane_idx) + : CustomMmaMultistage(st.operand_A, st.operand_B, thread_idx, warp_idx, lane_idx) + { + } + + CUTLASS_DEVICE + bool set_prologue_done(bool value) + { + prologue_done_ = value; + return true; + } + + CUTLASS_DEVICE + bool set_zero_outside_bounds(bool value) + { + zero_outside_bounds_ = value; + return true; + } + + template + CUTLASS_DEVICE static void prologue(typename Base::SharedStorage& shared_storage, + ///< iterator over A operand in global memory + IteratorA iterator_A, + ///< iterator over B operand in global memory + IteratorB iterator_B, + int thread_idx, + int problem_size_k) + { + prologue(shared_storage.operand_A, + shared_storage.operand_B, + iterator_A, + iterator_B, + thread_idx, + problem_size_k); + } + + template + CUTLASS_DEVICE static void prologue(typename Base::SharedStorageA& shared_storageA, + typename Base::SharedStorageB& shared_storageB, + ///< iterator over A operand in global memory + IteratorA iterator_A, + ///< iterator over B operand in global memory + IteratorB iterator_B, + int thread_idx, + int problem_size_k) + { + SmemIteratorA smem_iterator_A(shared_storageA.ref(), thread_idx); + SmemIteratorB smem_iterator_B(shared_storageB.ref(), thread_idx); + int32_t iter = (problem_size_k + Base::Shape::kK - 1) / Base::Shape::kK; + _prologue(iterator_A, iterator_B, iter, smem_iterator_A, smem_iterator_B); + } + + CUTLASS_DEVICE + void copy_tiles_and_advance(IteratorA& iterator_A, + IteratorB& iterator_B, + int group_start_A = 0, + int group_start_B = 0) + { + iterator_A.set_iteration_index(group_start_A * IteratorA::kAccessesPerVector); + this->smem_iterator_A_.set_iteration_index(group_start_A); + + // Async Copy for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::kAccessesPerGroupA; ++j) { + if (group_start_A + j < Detail::AsyncCopyIterationsPerStageA) { + typename IteratorA::AccessType* dst_ptr = + reinterpret_cast(this->smem_iterator_A_.get()); + + int const kSrcBytes = sizeof_bits::value * + IteratorA::ThreadMap::kElementsPerAccess / + IteratorA::kAccessesPerVector / 8; + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { + auto gmem_ptr = iterator_A.get(); + + if (zero_outside_bounds_ || + SharedMemoryClear == SharedMemoryClearOption::kZfill) { + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, iterator_A.valid()); + } else { + cutlass::arch::cp_async( + dst_ptr + v, gmem_ptr, iterator_A.valid()); + } + + ++iterator_A; + } + + ++this->smem_iterator_A_; + } + } + + iterator_B.set_iteration_index(group_start_B * IteratorB::kAccessesPerVector); + this->smem_iterator_B_.set_iteration_index(group_start_B); + + // Async Copy for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::kAccessesPerGroupB; ++j) { + if (group_start_B + j < Detail::AsyncCopyIterationsPerStageB) { + typename IteratorB::AccessType* dst_ptr = + reinterpret_cast(this->smem_iterator_B_.get()); + + int const kSrcBytes = sizeof_bits::value * + IteratorB::ThreadMap::kElementsPerAccess / + IteratorB::kAccessesPerVector / 8; + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { + auto gmem_ptr = iterator_B.get(); + + if (zero_outside_bounds_ || + SharedMemoryClear == SharedMemoryClearOption::kZfill) { + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, iterator_B.valid()); + } else { + cutlass::arch::cp_async( + dst_ptr + v, gmem_ptr, iterator_B.valid()); + } + + ++iterator_B; + } + ++this->smem_iterator_B_; + } + } + } + + template + CUTLASS_DEVICE static void _prologue(IteratorA& iterator_A, + IteratorB& iterator_B, + int32_t& gemm_k_iterations, + SmemIteratorA& smem_iterator_A_, + SmemIteratorB& smem_iterator_B_) + { + // Issue several complete stages + CUTLASS_PRAGMA_UNROLL + for (int stage = 0; stage < kNumStagesConcurrentLoad; ++stage, --gemm_k_iterations) { + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + + iterator_A.set_iteration_index(0); + smem_iterator_A_.set_iteration_index(0); + + // Async Copy for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { + typename IteratorA::AccessType* dst_ptr = + reinterpret_cast(smem_iterator_A_.get()); + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorA::kAccessesPerVector; ++v) { + int const kSrcBytes = sizeof_bits::value * + IteratorA::ThreadMap::kElementsPerAccess / + IteratorA::kAccessesPerVector / 8; + + int src_bytes = (iterator_A.valid() ? kSrcBytes : 0); + + if (kLoadA) { + cutlass::arch::cp_async_zfill( + dst_ptr + v, iterator_A.get(), iterator_A.valid()); + } + + ++iterator_A; + } + + ++smem_iterator_A_; + } + + iterator_B.set_iteration_index(0); + smem_iterator_B_.set_iteration_index(0); + + // Async Copy for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { + typename IteratorB::AccessType* dst_ptr = + reinterpret_cast(smem_iterator_B_.get()); + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB::kAccessesPerVector; ++v) { + int const kSrcBytes = sizeof_bits::value * + IteratorB::ThreadMap::kElementsPerAccess / + IteratorB::kAccessesPerVector / 8; + + if (kLoadB) { + cutlass::arch::cp_async_zfill( + dst_ptr + v, iterator_B.get(), iterator_B.valid()); + } + + ++iterator_B; + } + + ++smem_iterator_B_; + } + + // Move to the next stage + iterator_A.add_tile_offset({0, 1}); + iterator_B.add_tile_offset({1, 0}); + + smem_iterator_A_.add_tile_offset({0, 1}); + smem_iterator_B_.add_tile_offset({1, 0}); + + // Defines the boundary of a stage of cp.async. + cutlass::arch::cp_async_fence(); + } + } + + /// Perform a threadblock-scoped matrix multiply-accumulate + CUTLASS_DEVICE + void operator()( + ///< problem size of GEMM + int gemm_k_iterations, + ///< destination accumulator tile + FragmentC& accum, + ///< iterator over A operand in global memory + IteratorA iterator_A, + ///< iterator over B operand in global memory + IteratorB iterator_B, + ///< initial value of accumulator + FragmentC const& src_accum) + { + // + // Prologue + // + + if (!prologue_done_) { + _prologue( + iterator_A, iterator_B, gemm_k_iterations, smem_iterator_A_, smem_iterator_B_); + } else if (!kSmemContainsEntireMat) { + _prologue( + iterator_A, iterator_B, gemm_k_iterations, smem_iterator_A_, smem_iterator_B_); + } else { + gemm_k_iterations -= kNumStagesConcurrentLoad; + } + + // Perform accumulation in the 'd' output operand + accum = src_accum; + + // + // Clear the remaining tiles of SMEM. This is a functional requirement for + // some kernels so that all accumulator elements outside the GEMM footprint + // are zero. + // + + if (SharedMemoryClear == SharedMemoryClearOption::kClearLastStage) { + /// Iterator to write threadblock-scoped tile of A operand to shared + /// memory + SmemIteratorA last_smem_iterator_A(this->smem_iterator_A_); + + typename IteratorA::AccessType zero_A; + zero_A.clear(); + + last_smem_iterator_A.set_iteration_index(0); + + // Async Copy for operand A + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::AsyncCopyIterationsPerStageA; ++j) { + typename IteratorA::AccessType* dst_ptr = + reinterpret_cast(last_smem_iterator_A.get()); + + *dst_ptr = zero_A; + + ++last_smem_iterator_A; + } + + /// Iterator to write threadblock-scoped tile of B operand to shared + /// memory + SmemIteratorB last_smem_iterator_B(this->smem_iterator_B_); + typename IteratorB::AccessType zero_B; + + zero_B.clear(); + last_smem_iterator_B.set_iteration_index(0); + + // Async Copy for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::AsyncCopyIterationsPerStageB; ++j) { + typename IteratorB::AccessType* dst_ptr = + reinterpret_cast(last_smem_iterator_B.get()); + + *dst_ptr = zero_B; + + ++last_smem_iterator_B; + } + } + + // Waits until kStages-2 stages have committed. + cutlass::arch::cp_async_wait(); + __syncthreads(); + + // Pair of fragments used to overlap shared memory loads and math + // instructions + WarpLoadedFragmentA warp_loaded_frag_A[2]; + WarpLoadedFragmentB warp_loaded_frag_B[2]; + WarpTransformedFragmentA warp_transformed_frag_A[2]; + WarpTransformedFragmentB warp_transformed_frag_B[2]; + + Operator warp_mma; + + this->warp_tile_iterator_A_.set_kgroup_index(0); + this->warp_tile_iterator_B_.set_kgroup_index(0); + + this->warp_tile_iterator_A_.load(warp_loaded_frag_A[0]); + this->warp_tile_iterator_B_.load(warp_loaded_frag_B[0]); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + + int smem_write_stage_idx = Base::kStages - 1; + int smem_read_stage_idx = 0; + + warp_mma.transform(warp_transformed_frag_A[0], + warp_transformed_frag_B[0], + warp_loaded_frag_A[0], + warp_loaded_frag_B[0]); + + // tf32x3 kernels use staging accumulation. warp_mma uses a temporary + // accumulator and this temporary accumulator is added to the final + // accumulator once in every mainloop iteration. + plus plus_accum; + + FragmentC tmp_accum; + + if (platform::is_same::value || + platform::is_same::value) { + tmp_accum.clear(); + } + + // + // Mainloop + // + + CUTLASS_GEMM_LOOP + for (; gemm_k_iterations > (-kNumStagesConcurrentLoad);) { + // + // Loop over GEMM K dimension + // + + // Computes a warp-level GEMM on data held in shared memory + // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate + CUTLASS_PRAGMA_UNROLL + for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { + // Load warp-level tiles from shared memory, wrapping to k offset if + // this is the last group as the case may be. + + this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % + Base::kWarpGemmIterations); + this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % + Base::kWarpGemmIterations); + + // In case of a non-circular buffer ("kSmemContainsEntireMat") + // make sure we don't load out of bounds data. + if (!kSmemContainsEntireMat || gemm_k_iterations > (-kNumStagesConcurrentLoad) || + warp_mma_k < Base::kWarpGemmIterations - 1) { + this->warp_tile_iterator_A_.load(warp_loaded_frag_A[(warp_mma_k + 1) % 2]); + this->warp_tile_iterator_B_.load(warp_loaded_frag_B[(warp_mma_k + 1) % 2]); + } + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + + if (warp_mma_k > 0) + warp_mma.transform(warp_transformed_frag_A[warp_mma_k % 2], + warp_transformed_frag_B[warp_mma_k % 2], + warp_loaded_frag_A[warp_mma_k % 2], + warp_loaded_frag_B[warp_mma_k % 2]); + + if (platform::is_same::value || + platform::is_same::value) { + warp_mma(tmp_accum, + warp_transformed_frag_A[warp_mma_k % 2], + warp_transformed_frag_B[warp_mma_k % 2], + tmp_accum); + + if (warp_mma_k == 0) { + accum = plus_accum(accum, tmp_accum); + tmp_accum.clear(); + } + } else { + warp_mma(accum, + warp_transformed_frag_A[warp_mma_k % 2], + warp_transformed_frag_B[warp_mma_k % 2], + accum); + } + + // Issue global->shared copies for the this stage + if (!kSmemContainsEntireMat && warp_mma_k < Base::kWarpGemmIterations - 1) { + int group_start_iteration_A, group_start_iteration_B; + + group_start_iteration_A = warp_mma_k * Detail::kAccessesPerGroupA; + group_start_iteration_B = warp_mma_k * Detail::kAccessesPerGroupB; + + copy_tiles_and_advance( + iterator_A, iterator_B, group_start_iteration_A, group_start_iteration_B); + } + + if (warp_mma_k + 2 == Base::kWarpGemmIterations) { + if (!kSmemContainsEntireMat) { + int group_start_iteration_A, group_start_iteration_B; + group_start_iteration_A = (warp_mma_k + 1) * Detail::kAccessesPerGroupA; + group_start_iteration_B = (warp_mma_k + 1) * Detail::kAccessesPerGroupB; + + copy_tiles_and_advance(iterator_A, + iterator_B, + group_start_iteration_A, + group_start_iteration_B); + } + + // Inserts a memory fence between stages of cp.async instructions. + cutlass::arch::cp_async_fence(); + + // Waits until kStages-2 stages have committed. + cutlass::arch::cp_async_wait(); + __syncthreads(); + + // Move to the next stage + iterator_A.add_tile_offset({0, 1}); + iterator_B.add_tile_offset({1, 0}); + + this->smem_iterator_A_.add_tile_offset({0, 1}); + this->smem_iterator_B_.add_tile_offset({1, 0}); + + // Add negative offsets to return iterators to the 'start' of the + // circular buffer in shared memory + if (smem_write_stage_idx == (Base::kStages - 1)) { + this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); + this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); + smem_write_stage_idx = 0; + } else { + ++smem_write_stage_idx; + } + + if (!kSmemContainsEntireMat && smem_read_stage_idx == (Base::kStages - 1)) { + this->warp_tile_iterator_A_.add_tile_offset( + {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); + this->warp_tile_iterator_B_.add_tile_offset( + {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0}); + smem_read_stage_idx = 0; + } else { + ++smem_read_stage_idx; + } + + --gemm_k_iterations; + iterator_A.clear_mask(gemm_k_iterations == 0); + iterator_B.clear_mask(gemm_k_iterations == 0); + } + + // Do any conversions feeding the first stage at the end of the loop so + // we can start right away on mma instructions + if (warp_mma_k + 1 == Base::kWarpGemmIterations) + warp_mma.transform(warp_transformed_frag_A[(warp_mma_k + 1) % 2], + warp_transformed_frag_B[(warp_mma_k + 1) % 2], + warp_loaded_frag_A[(warp_mma_k + 1) % 2], + warp_loaded_frag_B[(warp_mma_k + 1) % 2]); + } + } + + if (platform::is_same::value || + platform::is_same::value) { + accum = plus_accum(accum, tmp_accum); + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_pipelined.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_pipelined.h new file mode 100644 index 0000000000000000000000000000000000000000..07b26ca3129968f8d78810df5187dd92c3c166c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/custom_mma_pipelined.h @@ -0,0 +1,388 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights + *reserved. SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + *this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + *LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + *CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + *INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + *CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + *ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + *POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/*! \file + \brief Template for a double-buffered threadblock-scoped GEMM kernel. +*/ + +#pragma once + +#include "cutlass/aligned_buffer.h" +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/numeric_conversion.h" + +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_types.h" + +#include "custom_mma_base.h" +#include "cutlass/gemm/gemm.h" + +///////////////////////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace gemm { +namespace threadblock { + +///////////////////////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math +/// instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Iterates over tiles of A operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorA_, + /// Iterates over tiles of A operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorA_, + /// Iterates over tiles of B operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorB_, + /// Iterates over tiles of B operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorB_, + /// Data type of accumulator matrix + typename ElementC_, + /// Data type of accumulator matrix + typename LayoutC_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Transformation applied to A operand + typename TransformA_ = NumericArrayConverter, + /// + /// Transformation applied to B operand + typename TransformB_ = NumericArrayConverter, + /// Used for partial specialization + typename Enable = bool> +class CustomMmaPipelined : public CustomMmaBase { +public: + ///< Base class + using Base = CustomMmaBase; + + using Shape = Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using IteratorA = IteratorA_; ///< Iterates over tiles of A operand in global memory + using IteratorB = IteratorB_; ///< Iterates over tiles of B operand in global memory + using ElementC = ElementC_; ///< Data type of accumulator matrix + using LayoutC = LayoutC_; ///< Layout of accumulator matrix + using Policy = Policy_; ///< Policy describing tuning details + + using SmemIteratorA = SmemIteratorA_; + using SmemIteratorB = SmemIteratorB_; + + using TransformA = TransformA_; + using TransformB = TransformB_; + + // + // Dependent types + // + + /// Fragment of operand A loaded from global memory + using FragmentA = typename IteratorA::Fragment; + + /// Fragment of operand B loaded from global memory + using FragmentB = typename IteratorB::Fragment; + + /// Fragment of accumulator tile + using FragmentC = typename Policy::Operator::FragmentC; + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + /// Obtain the arch tag from the warp-level operator + using ArchTag = typename Policy::Operator::ArchTag; + + /// Complex transform on A operand + static ComplexTransform const kTransformA = Operator::kTransformA; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = Operator::kTransformB; + + // staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline) + static_assert((Base::kStages == 2), "MmaPipelined requires kStages set to value 2"); + + static bool const kSmemContainsEntireMat = false; + +private: + using WarpFragmentA = typename Operator::FragmentA; + using WarpFragmentB = typename Operator::FragmentB; + +protected: + /// Iterator to write threadblock-scoped tile of A operand to shared memory + SmemIteratorA smem_iterator_A_; + + /// Iterator to write threadblock-scoped tile of B operand to shared memory + SmemIteratorB smem_iterator_B_; + +public: + /// Construct from tensor references + CUTLASS_DEVICE + CustomMmaPipelined(typename Base::SharedStorageA& shared_storageA, + typename Base::SharedStorageB& shared_storageB, + int thread_idx, ///< ID within the threadblock + int warp_idx, ///< ID of warp + int lane_idx ///< ID of each thread within a warp + ) + : Base(shared_storageA, shared_storageB, thread_idx, warp_idx, lane_idx), + smem_iterator_A_(shared_storageA.ref(), thread_idx), + smem_iterator_B_(shared_storageB.ref(), thread_idx) + { + // Compute warp location within threadblock tile by mapping the warp_id to + // three coordinates: + // _m: the warp's position within the threadblock along the M dimension + // _n: the warp's position within the threadblock along the N dimension + // _k: the warp's position within the threadblock along the K dimension + + int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); + int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); + + int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; + int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; + + // Add per-warp offsets in units of warp-level tiles + this->warp_tile_iterator_A_.add_tile_offset( + {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); + this->warp_tile_iterator_B_.add_tile_offset( + {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); + } + CUTLASS_DEVICE + CustomMmaPipelined( + ///< Shared storage needed for internal use by threadblock-scoped GEMM + typename Base::SharedStorage& st, + ///< ID within the threadblock + int thread_idx, + ///< ID of warp + int warp_idx, + ///< ID of each thread within a warp + int lane_idx) + : CustomMmaPipelined(st.operand_A, st.operand_B, thread_idx, warp_idx, lane_idx) + { + } + + CUTLASS_DEVICE + bool set_prologue_done(bool value) + { + // NOT IMPLEMENTED FOR PIPELINED + } + + CUTLASS_DEVICE + bool set_zero_outside_bounds(bool value) + { + // NOT NEEDED FOR PIPELINED + // shared memory will always be zero-filled + } + + template + CUTLASS_DEVICE static void prologue(typename Base::SharedStorage& shared_storage, + ///< iterator over A operand in global memory + IteratorA iterator_A, + ///< iterator over B operand in global memory + IteratorB iterator_B, + int thread_idx, + int problem_size_k) + { + prologue(shared_storage.operand_A, + shared_storage.operand_B, + iterator_A, + iterator_B, + thread_idx, + problem_size_k); + } + + template + CUTLASS_DEVICE static void prologue(typename Base::SharedStorageA& shared_storageA, + typename Base::SharedStorageB& shared_storageB, + ///< iterator over A operand in global memory + IteratorA iterator_A, + ///< iterator over B operand in global memory + IteratorB iterator_B, + int thread_idx, + int problem_size_k) + { + // NOT IMPLEMENTED FOR PIPELINED + } + + /// Perform a threadblock-scoped matrix multiply-accumulate + CUTLASS_DEVICE + void operator()( + int gemm_k_iterations, ///< number of iterations of the mainloop + FragmentC& accum, ///< destination accumulator tile + IteratorA iterator_A, ///< iterator over A operand in global memory + IteratorB iterator_B, ///< iterator over B operand in global memory + FragmentC const& src_accum, ///< source accumulator tile + TransformA transform_A = TransformA(), ///< transformation applied to A fragment + TransformB transform_B = TransformB()) + { ///< transformation applied to B fragment + + // + // Prologue + // + + // Perform accumulation in the 'd' output operand + accum = src_accum; + + FragmentA tb_frag_A; + FragmentB tb_frag_B; + + tb_frag_A.clear(); + tb_frag_B.clear(); + + // The last kblock is loaded in the prolog + iterator_A.load(tb_frag_A); + iterator_B.load(tb_frag_B); + + ++iterator_A; + ++iterator_B; + + this->smem_iterator_A_.store(transform_A(tb_frag_A)); + this->smem_iterator_B_.store(transform_B(tb_frag_B)); + + ++this->smem_iterator_A_; + ++this->smem_iterator_B_; + + __syncthreads(); + + // Pair of fragments used to overlap shared memory loads and math + // instructions + WarpFragmentA warp_frag_A[2]; + WarpFragmentB warp_frag_B[2]; + + this->warp_tile_iterator_A_.set_kgroup_index(0); + this->warp_tile_iterator_B_.set_kgroup_index(0); + + this->warp_tile_iterator_A_.load(warp_frag_A[0]); + this->warp_tile_iterator_B_.load(warp_frag_B[0]); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + + Operator warp_mma; + + int smem_write_stage_idx = 1; + + // Avoid reading out of bounds + iterator_A.clear_mask(gemm_k_iterations <= 1); + iterator_B.clear_mask(gemm_k_iterations <= 1); + + // Issue loads during the first warp-level matrix multiply-add *AFTER* + // issuing shared memory loads (which have the tightest latency requirement). + + // + // Mainloop + // + + // Note: The main loop does not support Base::kWarpGemmIterations == 2. + CUTLASS_GEMM_LOOP + for (; gemm_k_iterations > 0; --gemm_k_iterations) { + // + // Loop over GEMM K dimension + // + + CUTLASS_PRAGMA_UNROLL + for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { + // Load warp-level tiles from shared memory, wrapping to k offset if + // this is the last group as the case may be. + + if (warp_mma_k == Base::kWarpGemmIterations - 1) { + // Write fragments to shared memory + this->smem_iterator_A_.store(transform_A(tb_frag_A)); + + this->smem_iterator_B_.store(transform_B(tb_frag_B)); + + __syncthreads(); + + ++this->smem_iterator_A_; + ++this->smem_iterator_B_; + + // Add negative offsets to return iterators to the 'start' of the + // circular buffer in shared memory + if (smem_write_stage_idx == 1) { + this->smem_iterator_A_.add_tile_offset({0, -Base::kStages}); + this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); + } else { + this->warp_tile_iterator_A_.add_tile_offset( + {0, -Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations}); + this->warp_tile_iterator_B_.add_tile_offset( + {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0}); + } + + smem_write_stage_idx ^= 1; + } + + this->warp_tile_iterator_A_.set_kgroup_index((warp_mma_k + 1) % + Base::kWarpGemmIterations); + this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % + Base::kWarpGemmIterations); + + this->warp_tile_iterator_A_.load(warp_frag_A[(warp_mma_k + 1) % 2]); + this->warp_tile_iterator_B_.load(warp_frag_B[(warp_mma_k + 1) % 2]); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_B_; + + if (warp_mma_k == 0) { + iterator_A.load(tb_frag_A); + iterator_B.load(tb_frag_B); + + ++iterator_A; + ++iterator_B; + + // Avoid reading out of bounds if this was the last loop iteration + iterator_A.clear_mask(gemm_k_iterations <= 2); + iterator_B.clear_mask(gemm_k_iterations <= 2); + } + + warp_mma(accum, warp_frag_A[warp_mma_k % 2], warp_frag_B[warp_mma_k % 2], accum); + } + } + } +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/find_default_mma.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/find_default_mma.h new file mode 100644 index 0000000000000000000000000000000000000000..163dcbf852595c2f97fe950b5be6576d938d4efb --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/find_default_mma.h @@ -0,0 +1,191 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holdvr nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/*! \file + \brief Cutlass provides helper template functions to figure out the right + datastructures to instantiate to run a GEMM with various parameters (see + `cutlass/gemm/threadblock/default_mma.h`). However, due to template + instantiation priority rules, it will only create an MmaMultiStage with + kStages=3 (otherwise creates an MmePipelined - which is not compatible with + FastF32). kStages=3 uses too much shared memory and we want to use kStages=2, + so we just copy-pasted some code from `default_mma.h` and + `default_mma_core.h` files and wrapped this template to allow our usecase. + + This is really only for the FastF32 case - aka using TensorCores with fp32. +*/ + +#pragma once + +#include "cutlass/gemm/threadblock/default_mma.h" +#include "cutlass/gemm/threadblock/default_mma_core_simt.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm70.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm75.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm80.h" + +namespace cutlass { +namespace gemm { +namespace threadblock { + +template < + /// Element type for A matrix operand + typename ElementA, + /// Layout type for A matrix operand + typename LayoutA, + /// Access granularity of A matrix in units of elements + int kAlignmentA, + /// Element type for B matrix operand + typename ElementB, + /// Layout type for B matrix operand + typename LayoutB, + /// Access granularity of B matrix in units of elements + int kAlignmentB, + /// Element type for internal accumulation + typename ElementAccumulator, + /// Layout type for C and D matrix operand + typename LayoutC, + /// Operator class tag + typename OperatorClass, + /// Tag indicating architecture to tune for + typename ArchTag, + /// Threadblock-level tile size (concept: GemmShape) + typename ThreadblockShape, + /// Warp-level tile size (concept: GemmShape) + typename WarpShape, + /// Instruction-level tile size (concept: GemmShape) + typename InstructionShape, + /// Number of stages used in the pipelined mainloop + int Stages, + /// Operation performed by GEMM + typename Operator, + typename Enable_ = void> +struct FindDefaultMma { + static constexpr bool AccumulatorsInRowMajor = false; + static constexpr SharedMemoryClearOption SharedMemoryClear = SharedMemoryClearOption::kNone; + using DefaultMma = cutlass::gemm::threadblock::DefaultMma; +}; + +/// Specialization for sm80 / FastF32 / multistage with kStages=2 +template +struct FindDefaultMma 1)>::type> { + using LayoutC = layout::RowMajor; + using OperatorClass = arch::OpClassTensorOp; + using ArchTag = arch::Sm80; + + using DefaultMma_ = cutlass::gemm::threadblock::DefaultMma; + struct DefaultMma : DefaultMma_ { + using MmaCore_ = typename DefaultMma_::MmaCore; + // Define the threadblock-scoped multistage matrix multiply + using ThreadblockMma = + cutlass::gemm::threadblock::MmaMultistage; + }; +}; + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/mma_accum_lambda_iterator.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/mma_accum_lambda_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..5e2f0cf681bfe10fdc30c2dfcbe390c5f711dae0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/mma_accum_lambda_iterator.h @@ -0,0 +1,347 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holdvr nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include "cutlass/functional.h" +#include "cutlass/gemm/warp/mma_simt_tile_iterator.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm70.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_iterator_sm80.h" +#include "cutlass/matrix_shape.h" + +/* +TensorCores have different accumulator layouts. +This file provides a class to easily map the accumulator +i-th element with the corresponding matrix row/col. +*/ + +template +struct AccumLambdaIteratorSm80 { + static_assert(cutlass::platform::is_same::value, + "only RowMajor is supported"); + + using Policy = typename T::Policy; + using InstructionShape = typename T::InstructionShape; + using OpDelta = typename T::OpDelta; + using Shape = typename T::Shape; + static int const kElementsPerAccess = InstructionShape::kN / 4; + static int const kRowsPerTile = 8; + static int const kAccumulatorRows = InstructionShape::kM / kRowsPerTile; + + static cutlass::MatrixCoord CUTLASS_DEVICE + get_lane_offset(int8_t lane_id, int8_t warp_id, typename T::TensorCoord const& tile_offset) + { + int quad = (lane_id >> 2); + int lane_in_quad = (lane_id & 3); + return cutlass::MatrixCoord( + quad + tile_offset.row() * Shape::kRow, + lane_in_quad * kElementsPerAccess + tile_offset.column() * Shape::kColumn); + } + + template + CUTLASS_DEVICE static void iterateRows(cutlass::MatrixCoord& lane_offset, + FA beginRow, + FB op, + FC endRow) + { + // See cutlass/gemm/warp/mma_tensor_op_tile_iterator.h + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < kAccumulatorRows; ++row) { + int accum_m = mma_m * InstructionShape::kM * OpDelta::kRow + row * kRowsPerTile + + lane_offset.row(); + beginRow(accum_m); + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + int mma_accum_start = kAccumulatorRows * kElementsPerAccess * + (mma_n * Policy::MmaIterations::kRow + mma_m); + CUTLASS_PRAGMA_UNROLL + for (int col = 0; col < kElementsPerAccess; ++col) { + int accum_n = mma_n * InstructionShape::kN * OpDelta::kColumn + col + + lane_offset.column(); + int idx = mma_accum_start + row * kElementsPerAccess + col; + op(accum_m, accum_n, idx); + } + } + + endRow(accum_m); + } + } + } + + template + CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn) + { + // In each warp, 4 threads will work on the same row + // - the ones with the same `quad` + auto otherV = __shfl_xor_sync(0xffffffff, myValue, 1); + myValue = fn(myValue, otherV); + otherV = __shfl_xor_sync(0xffffffff, myValue, 2); + myValue = fn(myValue, otherV); + int lane_in_quad = (lane_id & 3); + return lane_in_quad == 0; + } +}; + +template +struct AccumLambdaIteratorSm70 { + static_assert(cutlass::platform::is_same::value, + "only RowMajor is supported"); + + using Policy = typename T::Policy; + using InstructionShape = typename T::InstructionShape; + using OpDelta = typename T::OpDelta; + using Shape = typename T::Shape; + using Element = accum_t; + + static int const kElementsPerPartial = 4; + using EleShapePerPatial = + typename cutlass::platform::conditional::value, + cutlass::MatrixShape<2, 2>, + cutlass::MatrixShape<1, 4>>::type; + static int const kElementsPerMma = 8; + static int const kAccumulatorPatials = 2; + using QuadShapePerPatialMma = cutlass::MatrixShape<4, 4>; + + static cutlass::MatrixCoord CUTLASS_DEVICE + get_lane_offset(int8_t lane_id, int8_t warp_id, typename T::TensorCoord const& tile_offset) + { + int quad = (lane_id >> 2); + int lane_in_quad = (lane_id & 3); + int accum_m, accum_n; + + if (cutlass::platform::is_same::value) { + // (quad[2],quad[0])+lane_in_quad[0] + accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + (lane_in_quad & 1); + // (quad[1])+lane_in_quad[1] + accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials + + (lane_in_quad & 2); + } else { + accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + lane_in_quad; // (quad[2],quad[0]) + accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials; + } + return cutlass::MatrixCoord(accum_m + tile_offset.row() * Shape::kRow, + accum_n + tile_offset.column() * Shape::kColumn); + } + + template + CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn) + { + static_assert(cutlass::platform::is_same::value, + "update to support non-float accum"); + // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-fragment-mma-884-f16 + // T0 & T2 share same line within a quad + auto otherV = __shfl_xor_sync(0xffffffff, myValue, 1 << 1); + myValue = fn(myValue, otherV); + // quad 0 and quad 2 are on the same lines + otherV = __shfl_xor_sync(0xffffffff, myValue, 1 << 3); + myValue = fn(myValue, otherV); + return (lane_id & ((1 << 1) | (1 << 3))) == 0; + } + + template + CUTLASS_DEVICE static void iterateRows(cutlass::MatrixCoord& lane_offset, + FA beginRow, + FB op, + FC endRow) + { + CUTLASS_PRAGMA_UNROLL + for (int tile_m = 0; tile_m < Policy::TileIterations::kRow; ++tile_m) { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < EleShapePerPatial::kRow; ++m) { + int accum_m = tile_m * Policy::InterleavedTile::kRow + + mma_m * QuadShapePerPatialMma::kRow + m * 2 + lane_offset.row(); + beginRow(accum_m); + + CUTLASS_PRAGMA_UNROLL + for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) { + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int p = 0; p < kAccumulatorPatials; ++p) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < EleShapePerPatial::kColumn; ++n) { + int mma_accum_start = + (((tile_n * Policy::TileIterations::kRow + tile_m) * + Policy::MmaIterations::kColumn + + mma_n) * + Policy::MmaIterations::kRow + + mma_m) * + kElementsPerMma; + int accum_n = tile_n * Policy::InterleavedTile::kColumn + + mma_n * QuadShapePerPatialMma::kColumn + + p * Policy::InterleavedTile::kColumn / 2 + n + + lane_offset.column(); + int idx = mma_accum_start + p * kElementsPerPartial + + m * EleShapePerPatial::kColumn + n; + op(accum_m, accum_n, idx); + } + } + } + } + endRow(accum_m); + } + } + } + } +}; + +template +struct AccumLambdaIteratorSimt { + using Policy = typename T::Policy; + using Iterations = typename T::Iterations; + using Element = typename T::Element; + using Delta = typename T::Delta; + using Shape = typename T::Shape; + static_assert(cutlass::platform::is_same::value, + "only RowMajor is supported"); + + template + CUTLASS_DEVICE static bool reduceSameRow(int lane_id, DT& myValue, F fn) + { + CUTLASS_PRAGMA_UNROLL + for (int bit = 1; bit < Policy::WarpShape::kColumn; bit *= 2) { + auto otherV = __shfl_xor_sync(0xffffffff, myValue, bit); + myValue = fn(myValue, otherV); + } + return (lane_id & (Policy::WarpShape::kColumn - 1)) == 0; + } + + template + CUTLASS_DEVICE static void iterateRows(cutlass::MatrixCoord& lane_offset, + FA beginRow, + FB op, + FC endRow) + { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) { + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < Policy::LaneMmaShape::kM; ++m) { + int accum_m = mma_m * Delta::kRow + m + lane_offset.row(); + beginRow(accum_m); + + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) { + int accum_n = mma_n * Policy::WarpShape::kColumn * Policy::LaneMmaShape::kN + + lane_offset.column(); + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < Policy::LaneMmaShape::kN; ++n) { + int idx = n + Policy::LaneMmaShape::kN * + (mma_n + Iterations::kColumn * + (m + mma_m * Policy::LaneMmaShape::kM)); + op(accum_m, accum_n + n, idx); + } + } + endRow(accum_m); + } + } + } + + static cutlass::MatrixCoord CUTLASS_DEVICE + get_lane_offset(int8_t lane_id, int8_t warp_id, typename T::TensorCoord const& tile_offset) + { + static_assert(cutlass::platform::is_same>::value, + ""); + typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); + + cutlass::MatrixCoord lane_offset = + lane_layout.inverse(lane_id) * + cutlass::MatrixCoord(Policy::LaneMmaShape::kM, Policy::LaneMmaShape::kN); + return lane_offset + tile_offset * cutlass::MatrixCoord(Shape::kRow, Shape::kColumn); + } +}; + +template +struct DefaultMmaAccumLambdaIterator; + +// Simt +template +struct DefaultMmaAccumLambdaIterator< + cutlass::gemm::warp::MmaSimtTileIterator, + accum_t, + kWarpSize> { + using WarpIterator = + typename cutlass::gemm::warp::MmaSimtTileIterator; + using Iterator = AccumLambdaIteratorSimt; +}; + +// TensorOp - Volta +template +struct DefaultMmaAccumLambdaIterator< + cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator>, + accum_t, + kWarpSize> { + using WarpIterator = typename cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator< + S1, + accum_t, + cutlass::layout::RowMajor, + S2, + cutlass::MatrixShape<1, 1>>; + using Iterator = AccumLambdaIteratorSm70; +}; + +// TensorOp - Sm75+ +template +struct DefaultMmaAccumLambdaIterator< + cutlass::gemm::warp:: + MmaTensorOpAccumulatorTileIterator, + accum_t, + kWarpSize> { + using WarpIterator = typename cutlass::gemm::warp:: + MmaTensorOpAccumulatorTileIterator; + using Iterator = AccumLambdaIteratorSm80; +}; diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/mma_from_smem.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/mma_from_smem.h new file mode 100644 index 0000000000000000000000000000000000000000..40d3265c7a638396ca5c12282f7da56fa63c925b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm/mma_from_smem.h @@ -0,0 +1,1939 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights + *reserved. SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + *this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + *LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + *CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + *INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + *CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + *ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + *POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/*! \file + \brief Template for a double-buffered threadblock-scoped GEMM kernel. +*/ + +#pragma once + +#include "cutlass/aligned_buffer.h" +#include "cutlass/arch/memory.h" +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/epilogue/thread/linear_combination.h" +#include "cutlass/epilogue/threadblock/default_epilogue_simt.h" +#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h" +#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" +#include "cutlass/functional.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/gemm/warp/mma_tensor_op_fragment_iterator.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/numeric_types.h" +#include "cutlass/platform/platform.h" +#include "cutlass/transform/threadblock/vector_iterator.h" + +#include "../epilogue/epilogue_thread_apply_logsumexp.h" +#include "../gemm/mma_accum_lambda_iterator.h" +#include "../gemm_kernel_utils.h" +#include "../iterators/make_residual_last.h" +#include "../iterators/transpose_warp_iterator.h" +#include "../iterators/warp_iterator_from_smem.h" +#include "cutlass/epilogue/threadblock/epilogue_smem_accumulator.h" +#include "cutlass/gemm/threadblock/mma_base.h" +#include "cutlass/gemm/threadblock/mma_multistage.h" +#include "cutlass/gemm/threadblock/mma_pipelined.h" +#include "cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h" + +namespace cutlass { +namespace gemm { +namespace threadblock { + +/// Shared storage object needed by accumulator +/// From 13_two_tensor_op_fusion/threadblock/b2b_mma_base_smem_accumulator.h +template +class AccumulatorSharedStorage { +public: + // + // Type definitions + // + using Shape = Shape_; + using Element = Element_; + using Layout = Layout_; + using Padding = Padding_; + + /// Tensor reference to the accumulator + using TensorRefAccum = cutlass::TensorRef; + + /// Shape of the accumulator matrix in shared memory + using ShapeAccum = + cutlass::MatrixShape; + +public: + // + // Data members + // + + /// Buffer for accumulator + cutlass::AlignedBuffer accum; + +public: + // + // Methods + // + + /// Returns a layout object for the Accum matrix + CUTLASS_DEVICE + static Layout LayoutAccum() { return Layout::packed({ShapeAccum::kRow, ShapeAccum::kColumn}); } + + /// Returns a TensorRef to the Accumulator + CUTLASS_HOST_DEVICE + TensorRefAccum accum_ref() { return TensorRefAccum{accum.data(), LayoutAccum()}; } +}; + +//////////////////////////////////////////////////////////////////////////////// +// Taken from +// https://github.com/NVIDIA/cutlass/blob/master/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_base_smem_accumulator.h +//////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math +/// instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + // Maximum value for K + int kMaxK, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Number of stages, + int Stages, + /// Used for partial specialization + typename Enable = bool> +class MmaBaseFromSharedMemory { +public: + ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using Shape = Shape_; + + ///< Policy describing tuning details + using Policy = Policy_; + + // + // Dependent types + // + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + /// Shape describing the overall GEMM computed from shared memory + /// by each warp. + using WarpGemm = typename Policy::Operator::Shape; + + /// Shape describing the number of warps filling the CTA + using WarpCount = + GemmShape; + using WarpCount1 = WarpCount; + + /// Number of warp-level GEMM oeprations + static int const kWarpGemmIterations = (WarpGemm::kK / Operator::Policy::MmaShape::kK); + static int const kWarpGemmIterations1 = kWarpGemmIterations; + + /// Number of stages + static int const kStages = Stages; + + /// If this is true, we fill the entire shmem buffer at start + /// and don't need to iterate through it in a circular fashion + static bool const kSmemContainsEntireB = kMaxK <= Shape::kK * kStages; + + /// Tensor reference to the A operand + using TensorRefA = TensorRef; + + /// Tensor reference to the B operand + using TensorRefB = TensorRef; + + // + // Nested structs + // + + /// Shared storage object needed by threadblock-scoped GEMM + class SharedStorage { + public: + // + // Type definitions + // + + /// Shape of the B matrix operand in shared memory + using ShapeB = MatrixShape; + + public: + // + // Data members + // + + /// Buffer for B operand + AlignedBuffer operand_B; + + public: + // + // Methods + // + + /// Returns a layout object for the B matrix + CUTLASS_HOST_DEVICE + static typename Operator::LayoutB LayoutB() + { + return Operator::LayoutB::packed({ShapeB::kRow, ShapeB::kColumn}); + } + + /// Returns a TensorRef to the B operand + CUTLASS_HOST_DEVICE + TensorRefB operand_B_ref() { return TensorRefB{operand_B.data(), LayoutB()}; } + }; + +protected: + // + // Data members + // + + // /// Iterator to load a warp-scoped tile of A operand from shared memory + // typename Operator::IteratorA warp_tile_iterator_A_; + + /// Iterator to load a warp-scoped tile of B operand from shared memory + typename Operator::IteratorB warp_tile_iterator_B_; + +public: + /// Construct from tensor references + CUTLASS_DEVICE + MmaBaseFromSharedMemory( + ///< Shared storage needed for internal use by threadblock-scoped GEMM + SharedStorage& shared_storage, + ///< ID within the threadblock + int thread_idx, + ///< ID of warp + int warp_idx, + ///< ID of each thread within a warp + int lane_idx) + : warp_tile_iterator_B_(shared_storage.operand_B_ref(), lane_idx) + { + } +}; + +namespace { + +// has necessary trait compliance with WarpIteratorFromSmem but doesn't do +// anything, can be default initialized, and uses fragment that takes up +// (almost) no space. this warp iterator is selected at compile time when +// elementwise on-the-fly scaling for operand A is disabled, in which case +// operations related to loading scale factors for operand A get wiped out by +// the compiler. +template +class NoOpWarpIteratorScale { +public: + // in pipelined+multistage MMA implementations we keep an array of fragments. + // if we aren't using scaling we don't want to waste registers on fragments + // of scale elements, so ideally this would be sized 0. + // Since arrays of zero-sized objects are not allowed, using size as 1. + // The compiler will most likely wipe it out anyways. + using Fragment = cutlass::Array; + + CUTLASS_HOST_DEVICE + NoOpWarpIteratorScale() {} + + CUTLASS_HOST_DEVICE + NoOpWarpIteratorScale(TensorRef const&, int) {} + + CUTLASS_HOST_DEVICE + NoOpWarpIteratorScale& add_tile_offset(typename TensorRef::TensorCoord const&) { return *this; } + + CUTLASS_HOST_DEVICE + NoOpWarpIteratorScale& operator++() { return *this; } + + CUTLASS_DEVICE + void load(Fragment&) const {} +}; + +// if scaling is enabled, performs fragment elementwise multiplication between +// fragment and its scaling factor. +template +class FragmentElementwiseScaler; + +// specialization for scaling being enabled. +template +class FragmentElementwiseScaler { +public: + // cast scale_frag to correct type then apply elementwise to fragment + CUTLASS_DEVICE + static Fragment apply(Fragment frag, FragmentScale const& scale_frag) + { + Fragment converted_scale_frag = + cutlass::NumericArrayConverter()(scale_frag); + return cutlass::multiplies()(frag, converted_scale_frag); + } +}; + +// specialization for scaling being disabled. doesn't do anything and should +// just get wiped out by the compiler. +template +class FragmentElementwiseScaler { +public: + CUTLASS_DEVICE + static Fragment apply(Fragment frag, FragmentScale const&) { return frag; } +}; +} // namespace + +//////////////////////////////////////////////////////////////////////////////// +// Taken from +// https://github.com/NVIDIA/cutlass/blob/master/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_pipelined_smem_accumulator.h +//////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math +/// instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + // BEGIN smem + /// Iterates over the intermediate accumulator tile in shared memory + typename WarpIteratorA, + /// whether or not to perform elementwise multiplication of A + // by another matrix (A_scale) that is also kept in shared memory prior + // to matmul A @ B + bool ScaleOperandA_, + // Accumulator type + typename AccumulatorSharedStorage, + // END smem + /// Iterates over tiles of B operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorB_, + /// Iterates over tiles of B operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorB_, + /// Data type of accumulator matrix + typename ElementC_, + /// Data type of accumulator matrix + typename LayoutC_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Transformation applied to B operand + typename TransformB_ = NumericArrayConverter, + /// Used for partial specialization + typename Enable = bool> +class MmaPipelinedFromSharedMemory + : public MmaBaseFromSharedMemory { +public: + ///< Base class + using Base = MmaBaseFromSharedMemory; + + using Shape = Shape_; ///< Size of the Gemm problem - concept: gemm::GemmShape<> + static constexpr bool ScaleOperandA = ScaleOperandA_; + + ///< loads fragments of A_scale from shared memory if operand A scaling is + ///< enabled. otherwise no-op. + using WarpIteratorAScale = typename cutlass::platform::conditional< + ScaleOperandA, + WarpIteratorA, + NoOpWarpIteratorScale>::type; + + using IteratorB = IteratorB_; ///< Iterates over tiles of B operand in global memory + using ElementC = ElementC_; ///< Data type of accumulator matrix + using LayoutC = LayoutC_; ///< Layout of accumulator matrix + using Policy = Policy_; ///< Policy describing tuning details + + using SmemIteratorB = SmemIteratorB_; + + using TransformB = TransformB_; + + // + // Dependent types + // + + /// Fragment of operand B loaded from global memory + using FragmentB = typename IteratorB::Fragment; + + /// Fragment of accumulator tile + using FragmentC = typename Policy::Operator::FragmentC; + + /// Warp-level Mma + using Operator = typename Policy::Operator; + + /// Obtain the arch tag from the warp-level operator + using ArchTag = typename Policy::Operator::ArchTag; + + /// Complex transform on B operand + static ComplexTransform const kTransformB = Operator::kTransformB; + + // staticaly assert kStages for MmaPipelined is two (Double-buffered pipeline) + static_assert((Base::kStages == 2), "MmaPipelined requires kStages set to value 2"); + +private: + using WarpFragmentA = typename Operator::FragmentA; + + /// fragment type of OperandA elementwise scaling matrix. (almost) empty + /// if operand A scaling is disabled. + using WarpFragmentAScale = typename WarpIteratorAScale::Fragment; + + using WarpFragmentB = typename Operator::FragmentB; + + /// applies scaling factor to operand A fragment if operand A scaling is + /// enabled. otherwise no-op. + using FragmentAScaler = + FragmentElementwiseScaler; + +protected: + // /// Iterator to write threadblock-scoped tile of A operand to shared memory + // SmemIteratorA smem_iterator_A_; + + /// Iterator to write threadblock-scoped tile of B operand to shared memory + SmemIteratorB smem_iterator_B_; + + /// Iterator to load a warp-scoped tile of A operand from intermediate + /// accumulator tile + WarpIteratorA warp_tile_iterator_A_; + + /// Iterator to load a warp-scoped tile of A_scale from intermediate + /// accumulator tile (only used if ScaleOperandA_ is true) + WarpIteratorAScale warp_tile_iterator_A_scale_; + +public: + /// constructor for MMA with operand A scaling enabled. + CUTLASS_DEVICE + MmaPipelinedFromSharedMemory( + // shared storage needed for internal use by threadblock-scoped GEMM + typename Base::SharedStorage& shared_storage, + // warp iterator over A tile held in shared memory + WarpIteratorA warp_iter_a, + // warp iterator over A_scale tile held in shared memory + WarpIteratorAScale warp_iter_a_scale, + int thread_idx, + int warp_idx, + int lane_idx) + : Base(shared_storage, thread_idx, warp_idx, lane_idx), + warp_tile_iterator_A_(warp_iter_a), + warp_tile_iterator_A_scale_(warp_iter_a_scale), + smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) + { + // Compute warp location within threadblock tile by mapping the warp_id to + // three coordinates: + // _m: the warp's position within the threadblock along the M dimension + // _n: the warp's position within the threadblock along the N dimension + // _k: the warp's position within the threadblock along the K dimension + int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); + int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); + int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; + int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; + + // Add per-warp offsets in units of warp-level tiles + this->warp_tile_iterator_A_.add_tile_offset( + {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); + this->warp_tile_iterator_A_scale_.add_tile_offset( + {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); + this->warp_tile_iterator_B_.add_tile_offset( + {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); + } + + /// Construct from tensor references + CUTLASS_DEVICE + MmaPipelinedFromSharedMemory( + typename Base::SharedStorage& shared_storage, ///< Shared storage needed for internal use + ///< by threadblock-scoped GEMM + AccumulatorSharedStorage& accumulator_shared_storage, + int thread_idx, ///< ID within the threadblock + int warp_idx, ///< ID of warp + int lane_idx, ///< ID of each thread within a warp + int problem_size_0_n) + : Base(shared_storage, thread_idx, warp_idx, lane_idx), + warp_tile_iterator_A_(accumulator_shared_storage.accum_ref(), lane_idx), + smem_iterator_B_(shared_storage.operand_B_ref(), thread_idx) + { + // Compute warp location within threadblock tile by mapping the warp_id to + // three coordinates: + // _m: the warp's position within the threadblock along the M dimension + // _n: the warp's position within the threadblock along the N dimension + // _k: the warp's position within the threadblock along the K dimension + + int warp_idx_mn = warp_idx % (Base::WarpCount::kM * Base::WarpCount::kN); + int warp_idx_k = warp_idx / (Base::WarpCount::kM * Base::WarpCount::kN); + + int warp_idx_m = warp_idx_mn % Base::WarpCount::kM; + int warp_idx_n = warp_idx_mn / Base::WarpCount::kM; + + // Add per-warp offsets in units of warp-level tiles + this->warp_tile_iterator_A_.add_tile_offset( + {warp_idx_m, Base::kWarpGemmIterations * warp_idx_k}); + this->warp_tile_iterator_B_.add_tile_offset( + {Base::kWarpGemmIterations * warp_idx_k, warp_idx_n}); + } + + // For API compatibility with MmaMultistageFromSharedMemory + // but not supported as it worsens perf: older gpus < sm80 don't + // support async transfers and have to waste registers + CUTLASS_DEVICE + void set_prologue_done(bool value) {} + CUTLASS_DEVICE + static void prologue(typename Base::SharedStorage& shared_storage, + IteratorB iterator_B1, + int thread_idx, + int problem_size_0_n) + { + } + + CUTLASS_DEVICE + static void drain_cp_asyncs() {} + + /// Perform a threadblock-scoped matrix multiply-accumulate + CUTLASS_DEVICE + void operator()(int gemm_k_iterations, ///< number of iterations of the mainloop + FragmentC& accum, ///< destination accumulator tile + // IteratorA iterator_A, ///< iterator over A + // operand in global memory + IteratorB iterator_B, ///< iterator over B operand in global memory + FragmentC const& src_accum, ///< source accumulator tile + // TransformA transform_A = TransformA(), ///< transformation + // applied to A fragment + TransformB transform_B = TransformB()) + { ///< transformation applied to B fragment + + // + // Prologue + // + + // Perform accumulation in the 'd' output operand + accum = src_accum; + + FragmentB tb_frag_B; + + tb_frag_B.clear(); + + // The last kblock is loaded in the prolog + iterator_B.set_residual_tile(gemm_k_iterations == 1); + iterator_B.load(tb_frag_B); + + ++iterator_B; + + this->smem_iterator_B_.store(transform_B(tb_frag_B)); + + ++this->smem_iterator_B_; + + __syncthreads(); + + // remember that WarpFragmentAScale and WarpIteratorAScale are empty/no-op + // if scaling is disabled. + + // Pair of fragments used to overlap shared memory loads and math + // instructions + WarpFragmentA warp_frag_A[2]; + WarpFragmentAScale warp_frag_A_scale[2]; + WarpFragmentB warp_frag_B[2]; + warp_frag_A[0].clear(); + warp_frag_A_scale[0].clear(); + warp_frag_B[0].clear(); + + this->warp_tile_iterator_B_.set_kgroup_index(0); + + this->warp_tile_iterator_A_.load(warp_frag_A[0]); + this->warp_tile_iterator_A_scale_.load(warp_frag_A_scale[0]); + this->warp_tile_iterator_B_.load(warp_frag_B[0]); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_A_scale_; + ++this->warp_tile_iterator_B_; + + Operator warp_mma; + + int smem_write_stage_idx = 1; + + // Avoid reading out of bounds + iterator_B.set_residual_tile(gemm_k_iterations == 2); + iterator_B.clear_mask(gemm_k_iterations <= 1); + + // Issue loads during the first warp-level matrix multiply-add *AFTER* + // issuing shared memory loads (which have the tightest latency requirement). + + // + // Mainloop + // + + // Note: The main loop does not support Base::kWarpGemmIterations == 2. + CUTLASS_GEMM_LOOP + for (; gemm_k_iterations > 0; --gemm_k_iterations) { + // + // Loop over GEMM K dimension + // + + CUTLASS_PRAGMA_UNROLL + for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations; ++warp_mma_k) { + // Load warp-level tiles from shared memory, wrapping to k offset if + // this is the last group as the case may be. + bool hasNext = true; + + if (warp_mma_k == Base::kWarpGemmIterations - 1) { + // Write fragments to shared memory + this->smem_iterator_B_.store(transform_B(tb_frag_B)); + + __syncthreads(); + + ++this->smem_iterator_B_; + + // Add negative offsets to return iterators to the 'start' of the + // circular buffer in shared memory SMEM: Don't reset iterator A, as + // we are continuing our iteration at this point + if (smem_write_stage_idx == 1) { + this->smem_iterator_B_.add_tile_offset({-Base::kStages, 0}); + } else { + this->warp_tile_iterator_B_.add_tile_offset( + {-Base::kStages * Policy::kPartitionsK * Base::kWarpGemmIterations, 0}); + } + + smem_write_stage_idx ^= 1; + hasNext = gemm_k_iterations > 1; + } + + // Only read the next if we need to + if (hasNext) { + this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % + Base::kWarpGemmIterations); + + this->warp_tile_iterator_A_.load(warp_frag_A[(warp_mma_k + 1) % 2]); + this->warp_tile_iterator_A_scale_.load(warp_frag_A_scale[(warp_mma_k + 1) % 2]); + this->warp_tile_iterator_B_.load(warp_frag_B[(warp_mma_k + 1) % 2]); + + ++this->warp_tile_iterator_A_; + ++this->warp_tile_iterator_A_scale_; + ++this->warp_tile_iterator_B_; + + if (warp_mma_k == 0) { + iterator_B.load(tb_frag_B); + + ++iterator_B; + + // Avoid reading out of bounds if this was the last loop iteration + iterator_B.set_residual_tile(gemm_k_iterations == 3); + iterator_B.clear_mask(gemm_k_iterations <= 2); + } + } + + warp_mma(accum, + FragmentAScaler::apply(warp_frag_A[warp_mma_k % 2], + warp_frag_A_scale[warp_mma_k % 2]), + warp_frag_B[warp_mma_k % 2], + accum); + } + } + } +}; + +//////////////////////////////////////////////////////////////////////////////// +// Taken from +// https://github.com/NVIDIA/cutlass/blob/master/examples/13_two_tensor_op_fusion/threadblock/b2b_mma_multistage_smem_accumulator.h +//////////////////////////////////////////////////////////////////////////////// + +/// Structure to compute the matrix product targeting CUDA cores and SIMT math +/// instructions. +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape1_, + /// Iterates over the intermediate accumulator tile in shared memory + typename WarpIteratorA1_, + /// whether or not to perform elementwise multiplication of A + // by another matrix (A_scale) that is also kept in shared memory prior + // to matmul A @ B + bool ScaleOperandA_, + // Accumulator type + typename AccumulatorSharedStorage, + /// Iterates over tiles of B operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorB1_, + /// Iterates over tiles of B operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorB1_, + /// Cache operation for operand B + cutlass::arch::CacheOperation::Kind CacheOpB1, + /// Data type of accumulator matrix + typename ElementC_, + /// Data type of accumulator matrix + typename LayoutC_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy1_, + /// Number of stages, + int Stages_, + int kMaxK_, + /// Used for partial specialization + typename Enable = bool> +class MmaMultistageFromSharedMemory + : public MmaBaseFromSharedMemory { +public: + ///< Base class + using Base = MmaBaseFromSharedMemory; + + ///< Size of the Gemm problem - concept: gemm::GemmShape<> + using Shape1 = Shape1_; + ///< Iterates over tiles of B operand in global memory + using IteratorB1 = IteratorB1_; + using IteratorB = IteratorB1; + ///< Policy describing tuning details + using Policy1 = Policy1_; + + using SmemIteratorB1 = SmemIteratorB1_; + using WarpIteratorA1 = WarpIteratorA1_; ///< Iterates over the intermediate + ///< accumulator tile in shared memory + static constexpr bool ScaleOperandA = ScaleOperandA_; + + ///< warp level iterator over A_scale matrix tile kept in shared memory. + ///< if elementwise A scaling is disabled then everything this does is no-op. + using WarpIteratorAScale = typename cutlass::platform::conditional< + ScaleOperandA, + WarpIteratorA1, + NoOpWarpIteratorScale>::type; + ///< Data type of accumulator matrix + using ElementC = ElementC_; + ///< Layout of accumulator matrix + using LayoutC = LayoutC_; + + static cutlass::arch::CacheOperation::Kind const kCacheOpB1 = CacheOpB1; + static constexpr bool kSmemContainsEntireB = Base::kSmemContainsEntireB; + + // + // Dependent types + // + + /// Fragment of accumulator tile + using FragmentC1 = typename Policy1::Operator::FragmentC; + using FragmentC = FragmentC1; + + /// Warp-level Mma + using Operator1 = typename Policy1::Operator; + + /// Minimum architecture is Sm80 to support cp.async + using ArchTag = arch::Sm80; + + /// Complex transform on B operand + static ComplexTransform const kTransformB1 = Operator1::kTransformB; + + /// Internal structure exposed for introspection. + struct Detail { + static_assert(Base::kWarpGemmIterations1 > 1, + "The pipelined structure requires at least two warp-level " + "GEMM operations."); + + /// Number of cp.async instructions to load one stage of operand B + static int const TBLoadIterationsB1 = IteratorB1::ThreadMap::Iterations::kCount; + + /// Number of cp.async instructions to load on group of operand B + static int const kAccessesPerGroupB1 = + (TBLoadIterationsB1 + Base::kWarpGemmIterations1 - 1) / Base::kWarpGemmIterations1; + }; + + static constexpr int kNumStagesConcurrentLoad = kSmemContainsEntireB ? Base::kStages + : Base::kStages - 1; + +private: + using WarpLoadedFragmentA1 = typename Operator1::FragmentA; + /// fragment of OperandA scale matrix. if operand A scaling is disabled this + /// is (almost) empty. + using WarpLoadedFragmentA1Scale = typename WarpIteratorAScale::Fragment; + using WarpLoadedFragmentB1 = typename Operator1::FragmentB; + using WarpTransformedFragmentA1 = typename Operator1::TransformedFragmentA; + using WarpTransformedFragmentB1 = typename Operator1::TransformedFragmentB; + + /// applies elementwise scaling to fragment of A. if operand A scaling is + /// disabled this is a no-op. + using FragmentAScaler = + FragmentElementwiseScaler; + +private: + // + // Data members + // + + /// Iterator to load a warp-scoped tile of A1 operand from intermediate + /// accumulator tile + WarpIteratorA1 warp_tile_iterator_A1_; + + /// Iterator to load a warp-scoped tile of A1_scale operand from shared memory + /// if operand A scaling is disabled everything this does is a no-op. + WarpIteratorAScale warp_tile_iterator_A1_scale_; + + /// Iterator to write threadblock-scoped tile of B operand to shared memory + SmemIteratorB1 smem_iterator_B1_; + + bool prologue_done_; + +public: + /// constructor for MMA with operand A scaling enabled. + CUTLASS_DEVICE + MmaMultistageFromSharedMemory( + // shared storage needed for internal use by threadblock-scoped GEMM + typename Base::SharedStorage& shared_storage, + // warp level iterator over operand A tile kept in shared memory + WarpIteratorA1 warp_tile_iterator_A1, + // warp level iterator over operand A elementwise scale tile kept in + // shared memory. + WarpIteratorAScale warp_tile_iterator_A1_scale, + int thread_idx, + int warp_idx, + int lane_idx) + : Base(shared_storage, thread_idx, warp_idx, lane_idx), + warp_tile_iterator_A1_(warp_tile_iterator_A1), + warp_tile_iterator_A1_scale_(warp_tile_iterator_A1_scale), + smem_iterator_B1_(shared_storage.operand_B_ref(), thread_idx), + prologue_done_(false) + { + // Compute warp location within threadblock tile by mapping the warp_id to + // three coordinates: + // _m: the warp's position within the threadblock along the M dimension + // _n: the warp's position within the threadblock along the N dimension + // _k: the warp's position within the threadblock along the K dimension + int warp_idx_mn_1 = warp_idx % (Base::WarpCount1::kM * Base::WarpCount1::kN); + int warp_idx_k_1 = warp_idx / (Base::WarpCount1::kM * Base::WarpCount1::kN); + int warp_idx_m_1 = warp_idx_mn_1 % Base::WarpCount1::kM; + int warp_idx_n_1 = warp_idx_mn_1 / Base::WarpCount1::kM; + + // Add per-warp offsets in units of warp-level tiles + warp_tile_iterator_A1_.add_tile_offset( + {warp_idx_m_1, Base::kWarpGemmIterations1 * warp_idx_k_1}); + warp_tile_iterator_A1_scale_.add_tile_offset( + {warp_idx_m_1, Base::kWarpGemmIterations1 * warp_idx_k_1}); + this->warp_tile_iterator_B_.add_tile_offset( + {Base::kWarpGemmIterations1 * warp_idx_k_1, warp_idx_n_1}); + } + + /// Construct from tensor references + CUTLASS_DEVICE + MmaMultistageFromSharedMemory( + typename Base::SharedStorage& shared_storage, ///< Shared storage needed for internal use + ///< by threadblock-scoped GEMM + AccumulatorSharedStorage& accumulator_shared_storage, + ///< ID within the threadblock + int thread_idx, + ///< ID of warp + int warp_idx, + ///< ID of each thread within a warp + int lane_idx, + ///< GEMM0 N is used for accumulator extent + int problem_size_0_n) + : Base(shared_storage, thread_idx, warp_idx, lane_idx), + warp_tile_iterator_A1_(accumulator_shared_storage.accum_ref(), lane_idx), + smem_iterator_B1_(shared_storage.operand_B_ref(), thread_idx), + prologue_done_(false) + { + // Compute warp location within threadblock tile by mapping the warp_id to + // three coordinates: + // _m: the warp's position within the threadblock along the M dimension + // _n: the warp's position within the threadblock along the N dimension + // _k: the warp's position within the threadblock along the K dimension + + int warp_idx_mn_1 = warp_idx % (Base::WarpCount1::kM * Base::WarpCount1::kN); + int warp_idx_k_1 = warp_idx / (Base::WarpCount1::kM * Base::WarpCount1::kN); + + int warp_idx_m_1 = warp_idx_mn_1 % Base::WarpCount1::kM; + int warp_idx_n_1 = warp_idx_mn_1 / Base::WarpCount1::kM; + + // Add per-warp offsets in units of warp-level tiles + warp_tile_iterator_A1_.add_tile_offset( + {warp_idx_m_1, Base::kWarpGemmIterations1 * warp_idx_k_1}); + this->warp_tile_iterator_B_.add_tile_offset( + {Base::kWarpGemmIterations1 * warp_idx_k_1, warp_idx_n_1}); + } + + CUTLASS_DEVICE + void set_prologue_done(bool value) { prologue_done_ = value; } + + CUTLASS_DEVICE + static void prologue(typename Base::SharedStorage& shared_storage, + IteratorB iterator_B1, + int thread_idx, + int problem_size_0_n) + { + SmemIteratorB1 smem_iterator_B1(shared_storage.operand_B_ref(), thread_idx); + _prologue(iterator_B1, + (problem_size_0_n + Base::Shape::kK - 1) / Base::Shape::kK, + smem_iterator_B1); + } + + CUTLASS_DEVICE + static void drain_cp_asyncs() + { + // commit and drain all pending and predicated cp.async pnz from the GEMM + // mainloop + cutlass::arch::cp_async_fence(); + cutlass::arch::cp_async_wait<0>(); + __syncthreads(); + } + + CUTLASS_DEVICE + void copy_tiles_and_advance_1(IteratorB1& iterator_B1, int group_start_B1 = 0) + { + iterator_B1.set_iteration_index(group_start_B1 * IteratorB1::kAccessesPerVector); + this->smem_iterator_B1_.set_iteration_index(group_start_B1); + + // Load for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::kAccessesPerGroupB1; ++j) { + if (group_start_B1 + j < Detail::TBLoadIterationsB1) { + typename IteratorB1::AccessType* dst_ptr = + reinterpret_cast( + this->smem_iterator_B1_.get()); + + int const kSrcBytes = sizeof_bits::value * + IteratorB1::ThreadMap::kElementsPerAccess / + IteratorB1::kAccessesPerVector / 8; + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) { + auto gmem_ptr = iterator_B1.get(); + + cutlass::arch::cp_async_zfill( + dst_ptr + v, gmem_ptr, iterator_B1.valid()); + + ++iterator_B1; + } + ++this->smem_iterator_B1_; + } + } + } + + CUTLASS_DEVICE + static void _prologue(IteratorB& iterator_B1, + int32_t gemm_k_iterations_1, + SmemIteratorB1& smem_iterator_B1_) + { + // Issue several complete stages + CUTLASS_PRAGMA_UNROLL + for (int stage = 0; stage < kNumStagesConcurrentLoad; ++stage, --gemm_k_iterations_1) { + iterator_B1.set_residual_tile(gemm_k_iterations_1 == 1); + iterator_B1.clear_mask(gemm_k_iterations_1 == 0); + + iterator_B1.set_iteration_index(0); + smem_iterator_B1_.set_iteration_index(0); + + // Load for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::TBLoadIterationsB1; ++j) { + typename IteratorB1::AccessType* dst_ptr = + reinterpret_cast(smem_iterator_B1_.get()); + + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) { + int const kSrcBytes = sizeof_bits::value * + IteratorB1::ThreadMap::kElementsPerAccess / + IteratorB1::kAccessesPerVector / 8; + + cutlass::arch::cp_async_zfill( + dst_ptr + v, iterator_B1.get(), iterator_B1.valid()); + + ++iterator_B1; + } + + ++smem_iterator_B1_; + } + + // Move to the next stage + iterator_B1.add_tile_offset({1, 0}); + + smem_iterator_B1_.add_tile_offset({1, 0}); + + // Defines the boundary of a stage of cp.async. + cutlass::arch::cp_async_fence(); + } + iterator_B1.set_residual_tile(gemm_k_iterations_1 == 1); + iterator_B1.clear_mask(gemm_k_iterations_1 == 0); + } + + /// Perform a threadblock-scoped matrix multiply-accumulate + CUTLASS_DEVICE + void operator()( + ///< problem size of GEMM + int gemm_k_iterations_1_, + ///< destination accumulator tile + FragmentC1& accum, + ///< iterator over B1 operand in global memory + IteratorB1 iterator_B1, + ///< initial value of accumulator + FragmentC1 const& src_accum) + { + // 2nd Gemm + + // + // Prologue + // + // Perform accumulation in the 'd' output operand + accum = src_accum; + + if (!prologue_done_) { + _prologue(iterator_B1, gemm_k_iterations_1_, smem_iterator_B1_); + } else if (!kSmemContainsEntireB) { + // Restore the iterators increments + + int gemm_k_iterations_1 = gemm_k_iterations_1_; + // Issue several complete stages + CUTLASS_PRAGMA_UNROLL + for (int stage = 0; stage < kNumStagesConcurrentLoad; ++stage, --gemm_k_iterations_1) { + iterator_B1.set_iteration_index(0); + this->smem_iterator_B1_.set_iteration_index(0); + + // Load for operand B + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < Detail::TBLoadIterationsB1; ++j) { + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < IteratorB1::kAccessesPerVector; ++v) { ++iterator_B1; } + ++this->smem_iterator_B1_; + } + iterator_B1.add_tile_offset({1, 0}); + this->smem_iterator_B1_.add_tile_offset({1, 0}); + } + iterator_B1.set_residual_tile(gemm_k_iterations_1 <= 1); + iterator_B1.clear_mask(gemm_k_iterations_1 <= 0); + } + + // DEPBAR+SYNC + cutlass::arch::cp_async_wait(); + __syncthreads(); + + // remember that WarpFragmentAScale and WarpIteratorAScale are no-op/empty + // if scaling is disabled. + + // Pair of fragments used to overlap shared memory loads and math + // instructions + WarpLoadedFragmentA1 warp_loaded_frag_A1[2]; + WarpLoadedFragmentA1Scale warp_loaded_frag_A1_scale[2]; + WarpLoadedFragmentB1 warp_loaded_frag_B1[2]; + WarpTransformedFragmentA1 warp_transformed_frag_A1[2]; + WarpTransformedFragmentB1 warp_transformed_frag_B1[2]; + + Operator1 warp_mma1; + + warp_tile_iterator_A1_.load(warp_loaded_frag_A1[0]); + ++warp_tile_iterator_A1_; + + warp_tile_iterator_A1_scale_.load(warp_loaded_frag_A1_scale[0]); + ++warp_tile_iterator_A1_scale_; + + this->warp_tile_iterator_B_.set_kgroup_index(0); + this->warp_tile_iterator_B_.load(warp_loaded_frag_B1[0]); + ++this->warp_tile_iterator_B_; + + int smem_write_stage_idx = Base::kStages - 1; + int smem_read_stage_idx = 0; + + warp_mma1.transform( + warp_transformed_frag_A1[0], + warp_transformed_frag_B1[0], + FragmentAScaler::apply(warp_loaded_frag_A1[0], warp_loaded_frag_A1_scale[0]), + warp_loaded_frag_B1[0]); + + // tf32x3 kernels use staging accumulation. warp_mma uses a temporary + // accumulator and this temporary accumulator is added to the final + // accumulator once in every mainloop iteration. + plus plus_accum; + + FragmentC1 tmp_accum; + + if (platform::is_same::value || + platform::is_same::value) { + tmp_accum.clear(); + } + + // + // Mainloop + // + + CUTLASS_PRAGMA_UNROLL + for (int gemm_k_iterations_1 = gemm_k_iterations_1_ - (Base::kStages - 1); + gemm_k_iterations_1 > (-Base::kStages + 1); + gemm_k_iterations_1--) { + // + // Loop over GEMM K dimension + // + + // Computes a warp-level GEMM on data held in shared memory + // Each "warp_mma_k" refers to a warp-level matrix multiply-accumulate + CUTLASS_PRAGMA_UNROLL + for (int warp_mma_k = 0; warp_mma_k < Base::kWarpGemmIterations1; ++warp_mma_k) { + // Load warp-level tile from accumulator fragment (A) + // or shared memory (operand B) + this->warp_tile_iterator_B_.set_kgroup_index((warp_mma_k + 1) % + Base::kWarpGemmIterations1); + // skip warp tile loading for the last kgroup (we are out of the buf) + if (gemm_k_iterations_1 > (-Base::kStages + 2) || + warp_mma_k < Base::kWarpGemmIterations1 - 1) { + warp_tile_iterator_A1_.load(warp_loaded_frag_A1[(warp_mma_k + 1) % 2]); + warp_tile_iterator_A1_scale_.load( + warp_loaded_frag_A1_scale[(warp_mma_k + 1) % 2]); + this->warp_tile_iterator_B_.load(warp_loaded_frag_B1[(warp_mma_k + 1) % 2]); + } + ++warp_tile_iterator_A1_; + ++warp_tile_iterator_A1_scale_; + ++this->warp_tile_iterator_B_; + + if (warp_mma_k > 0) + warp_mma1.transform( + warp_transformed_frag_A1[warp_mma_k % 2], + warp_transformed_frag_B1[warp_mma_k % 2], + FragmentAScaler::apply(warp_loaded_frag_A1[warp_mma_k % 2], + warp_loaded_frag_A1_scale[warp_mma_k % 2]), + warp_loaded_frag_B1[warp_mma_k % 2]); + + if (platform::is_same::value || + platform::is_same::value) { + warp_mma1(tmp_accum, + warp_transformed_frag_A1[warp_mma_k % 2], + warp_transformed_frag_B1[warp_mma_k % 2], + tmp_accum); + + if (warp_mma_k == 0) { + accum = plus_accum(accum, tmp_accum); + tmp_accum.clear(); + } + } else { + warp_mma1(accum, + warp_transformed_frag_A1[warp_mma_k % 2], + warp_transformed_frag_B1[warp_mma_k % 2], + accum); + } + + // Issue global->shared copies for the this stage + if (warp_mma_k < Base::kWarpGemmIterations1 - 1) { + int group_start_iteration_B1; + + group_start_iteration_B1 = warp_mma_k * Detail::kAccessesPerGroupB1; + + if (!kSmemContainsEntireB) { + copy_tiles_and_advance_1(iterator_B1, group_start_iteration_B1); + } + } + + if (warp_mma_k + 2 == Base::kWarpGemmIterations1) { + int group_start_iteration_B1; + group_start_iteration_B1 = (warp_mma_k + 1) * Detail::kAccessesPerGroupB1; + + if (!kSmemContainsEntireB) { + copy_tiles_and_advance_1(iterator_B1, group_start_iteration_B1); + } + + // Inserts a memory fence between stages of cp.async instructions. + cutlass::arch::cp_async_fence(); + + // Waits until kStages-2 stages have committed. + arch::cp_async_wait(); + __syncthreads(); + + // Move to the next stage + iterator_B1.add_tile_offset({1, 0}); + + this->smem_iterator_B1_.add_tile_offset({1, 0}); + + // Add negative offsets to return iterators to the 'start' of the + // circular buffer in shared memory + if (!kSmemContainsEntireB) { + if (smem_write_stage_idx == (Base::kStages - 1)) { + this->smem_iterator_B1_.add_tile_offset({-Base::kStages, 0}); + smem_write_stage_idx = 0; + } else { + ++smem_write_stage_idx; + } + + if (smem_read_stage_idx == (Base::kStages - 1)) { + this->warp_tile_iterator_B_.add_tile_offset( + {-Base::kStages * Policy1::kPartitionsK * + Base::kWarpGemmIterations1, + 0}); + smem_read_stage_idx = 0; + } else { + ++smem_read_stage_idx; + } + } + + iterator_B1.set_residual_tile(gemm_k_iterations_1 == 2); + iterator_B1.clear_mask(gemm_k_iterations_1 == 1); + } + + // Do any conversions feeding the first stage at the end of the loop so + // we can start right away on mma instructions + if (warp_mma_k + 1 == Base::kWarpGemmIterations1) + warp_mma1.transform( + warp_transformed_frag_A1[(warp_mma_k + 1) % 2], + warp_transformed_frag_B1[(warp_mma_k + 1) % 2], + FragmentAScaler::apply(warp_loaded_frag_A1[(warp_mma_k + 1) % 2], + warp_loaded_frag_A1_scale[(warp_mma_k + 1) % 2]), + warp_loaded_frag_B1[(warp_mma_k + 1) % 2]); + } + } + + if (platform::is_same::value || + platform::is_same::value) { + accum = plus_accum(accum, tmp_accum); + } + } +}; + +template +struct DefaultWarpIteratorAFromSharedMemory {}; + +// TensorOp - Ampere half +template +struct DefaultWarpIteratorAFromSharedMemory< + cutlass::gemm::GemmShape<32, 32, 32>, + cutlass::gemm::GemmShape<16, 8, 8>, + RegularWarpIterator, + Policy, + typename platform::enable_if<(sizeof_bits::value == 16 && + Policy::Operator::Policy::OpDelta::kRow == 1)>::type> { + static constexpr auto kWarpSize = 32; + using OpDelta = typename Policy::Operator::Policy::OpDelta; + using WarpShape = cutlass::MatrixShape<32, 32>; + + using WarpIterator = + cutlass::gemm::warp::WarpIteratorFromSmem; +}; + +// TensorOp - Ampere f32 +template +struct DefaultWarpIteratorAFromSharedMemory< + WarpShape, + cutlass::gemm::GemmShape<16, 8, 8>, + RegularWarpIterator, + Policy, + typename platform::enable_if<(sizeof_bits::value != 16 || + Policy::Operator::Policy::OpDelta::kRow != 1)>::type> { + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + static constexpr auto kWarpSize = 32; + using OpDelta = typename Policy::Operator::Policy::OpDelta; + + using WarpIterator = cutlass::gemm::warp::MmaTensorOpMultiplicandTileAccessIterator< + cutlass::MatrixShape, + cutlass::gemm::Operand::kA, + typename RegularWarpIterator::Element, + cutlass::layout::RowMajor, + cutlass::MatrixShape, + OpDelta::kRow, + kWarpSize>; +}; + +// TensorOp - Volta +template +struct DefaultWarpIteratorAFromSharedMemory, + RegularWarpIterator, + Policy> { + using InstructionShape = cutlass::gemm::GemmShape<16, 16, 4>; + static constexpr auto kWarpSize = 32; + using OpDelta = typename Policy::Operator::Policy::OpDelta; + + using WarpIterator = cutlass::gemm::warp::MmaVoltaTensorOpMultiplicandTileIterator< + cutlass::MatrixShape<32, 32>, // MatrixShape, + cutlass::gemm::Operand::kA, + typename RegularWarpIterator::Element, + cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise<16, 32>, + cutlass::MatrixShape<16, 4>, + OpDelta::kRow, + kWarpSize>; +}; + +// Simt +template +struct DefaultWarpIteratorAFromSharedMemory, + RegularWarpIterator, + Policy> { + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + static constexpr auto kWarpSize = 32; + + // We just use the same iterator, as we reproduced the same shared-memory + // schema. Just modify it to handle non-complete tiles. + using WarpIterator = RegularWarpIterator; +}; + +// Converts a "regular" Mma into their counterpart from shared memory +template +struct DefaultMmaFromSharedMemory; + +// Mma pipelined +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Iterates over tiles of A operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorA_, + /// Iterates over tiles of A operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorA_, + /// Iterates over tiles of B operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorB_, + /// Iterates over tiles of B operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorB_, + /// Data type of accumulator matrix + typename ElementC_, + /// Data type of accumulator matrix + typename LayoutC_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Transformation applied to A operand + typename TransformA_, + /// Transformation applied to B operand + typename TransformB_, + typename AccumulatorSharedStorage_, + /// whether or not to apply elementwise multiplication of operand A by + /// another matrix in shared memory before usage in A @ B + bool kScaleOperandA, + bool kTransposeA> +struct DefaultMmaFromSharedMemory, + AccumulatorSharedStorage_, + kScaleOperandA, + kTransposeA> { + static constexpr int kWarpSize = 32; + using SmemAccumulatorLayout = cutlass::layout::RowMajor; + + using RegularMma = MmaPipelined; + + using WarpShape = typename Policy_::Operator::Shape; + using InstructionShape = typename Policy_::Operator::InstructionShape; + using ArchMmaOperator = typename Policy_::Operator; + + static constexpr bool kIsTransposedA = false; + using WarpIteratorA = + typename DefaultWarpIteratorAFromSharedMemory::WarpIterator; + using IteratorB = + typename cutlass::transform::threadblock::MakeIteratorResidualLast::Iterator; + + using Mma = + typename cutlass::gemm::threadblock::MmaPipelinedFromSharedMemory; +}; + +template < + /// Size of the Gemm problem - concept: gemm::GemmShape<> + typename Shape_, + /// Iterates over tiles of A operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorA_, + /// Iterates over tiles of A operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorA_, + /// Cache operation for operand A + cutlass::arch::CacheOperation::Kind CacheOpA, + /// Iterates over tiles of B operand in global memory + // (concept: ReadableTileIterator | ForwardTileIterator | + // MaskedTileIterator) + typename IteratorB_, + /// Iterates over tiles of B operand in shared memory + /// (concept: WriteableTileIterator | RandomAccessTileIterator) + typename SmemIteratorB_, + /// Cache operation for operand B + cutlass::arch::CacheOperation::Kind CacheOpB, + /// Data type of accumulator matrix + typename ElementC_, + /// Data type of accumulator matrix + typename LayoutC_, + /// Policy describing tuning details (concept: MmaPolicy) + typename Policy_, + /// Number of stages, + int Stages, + /// Use zfill or predicate for out-of-bound cp.async + SharedMemoryClearOption SharedMemoryClear, + typename AccumulatorSharedStorage_, + /// whether or not to apply elementwise multiplication of operand A by + /// another matrix in shared memory before usage in A @ B + bool kScaleOperandA, + bool kTransposeA> +struct DefaultMmaFromSharedMemory, + AccumulatorSharedStorage_, + kScaleOperandA, + kTransposeA> { + static constexpr int kWarpSize = 32; + + using RegularMma = MmaMultistage; + + using WarpShape = typename Policy_::Operator::Shape; + using InstructionShape = typename Policy_::Operator::InstructionShape; + using WarpIteratorA_ = + typename DefaultWarpIteratorAFromSharedMemory::WarpIterator; + using WarpIteratorTranspose = TransposeWarpIterator; + static constexpr bool kIsTransposedA = WarpIteratorTranspose::kSupportsTranspose && kTransposeA; + using WarpIteratorA = typename platform:: + conditional::type; + + static int constexpr kMaxK = kIsTransposedA ? AccumulatorSharedStorage_::Shape::kM + : AccumulatorSharedStorage_::Shape::kN; + // Reduce the number of stages if we don't need that many + static int constexpr kStagesMax = (kMaxK + int(Shape_::kK) - 1) / int(Shape_::kK); + static int constexpr kStages = cutlass::const_min(Stages, kStagesMax); + + using IteratorB = + typename cutlass::transform::threadblock::MakeIteratorResidualLast::Iterator; + using Mma = typename cutlass::gemm::threadblock::MmaMultistageFromSharedMemory< + Shape_, + WarpIteratorA, + kScaleOperandA, + AccumulatorSharedStorage_, + IteratorB, + SmemIteratorB_, + RegularMma::kCacheOpB, + ElementC_, + LayoutC_, + Policy_, + kStages, + kMaxK>; +}; + +///////////////////////////////////////////////////////////////////////////////////////////////// + +template +struct B2bGemm; + +// Tensor Cores >= Sm75 specialization (Ampere ...) +template < /// Size of the matrix to load (concept: MatrixShape) + typename Shape_, + /// Element type + typename Element_, + /// Layout of operand in memory + typename Layout_, + /// Shape of one matrix product operation (concept: MatrixShape) + typename InstructionShape_, + /// Interval between adjacent *MMA instructions (in units of MMA + /// instructions, concept: MatrixShape) + typename OpDelta_, + typename Operator, + typename scalar_t, + typename WarpShape_, + typename ThreadblockShape_> +struct B2bGemm< + cutlass::gemm::warp:: + MmaTensorOpAccumulatorTileIterator, + Operator, + scalar_t, + WarpShape_, + ThreadblockShape_> { + using IteratorC = typename cutlass::gemm::warp:: + MmaTensorOpAccumulatorTileIterator; + using FragmentC = typename IteratorC::Fragment; + using InstructionShape = InstructionShape_; + using WarpShape = WarpShape_; + using ThreadblockShape = ThreadblockShape_; + using accum_t = Element_; + using lse_scalar_t = float; + + using SmemAccumulatorLayout = cutlass::layout::RowMajor; + + // Iterator to load accumulators (results of matmul in registers) + using FragmentIteratorAccumulator = cutlass::epilogue::warp::FragmentIteratorTensorOp< + WarpShape, + InstructionShape, + accum_t, + typename Operator::Policy::Operator::FragmentC, + cutlass::layout::RowMajor>; + + // Iterator to store to shared-memory + using SmemIteratorD0 = + typename cutlass::epilogue::warp::TileIteratorTensorOp; + using AccumulatorSharedStorage = + cutlass::gemm::threadblock::AccumulatorSharedStorage; + // We need to provide an operation for the epilogue. Let's create an + // operation that does nothing (ScaleType::Nothing), just converts + // from accum_t (float) -> scalar_t (can be half) + using OutputOpNoOp = cutlass::epilogue::thread::LinearCombination< + typename SmemIteratorD0::Element, // ElementOutput + FragmentIteratorAccumulator::Fragment::kElements, + accum_t, // ElementAccumulator + typename SmemIteratorD0::Element, // ElementCompute + cutlass::epilogue::thread::ScaleType::Nothing>; + using Epilogue = cutlass::epilogue::threadblock::EpilogueSmemAccumulator< + SmemIteratorD0, + FragmentIteratorAccumulator, + SmemIteratorD0, // ScaleBiasIterator + // - not used + OutputOpNoOp>; + + // Epilogue 2: with LSE (for backwards pass) + static int const kElementsPerAccess = 2; // TODO: Why 2? + using IteratorAccumulatorLSE = cutlass::transform::threadblock::VectorIterator< + cutlass::transform::threadblock::PredicatedVectorAccessIterator< + // Shape + cutlass::MatrixShape, + // WarpShape + cutlass::MatrixShape, + lse_scalar_t, + cutlass::layout::RowMajor, + kElementsPerAccess>>; + using EpilogueOpApplyLSE = cutlass::epilogue::thread::ApplyLogSumExp< + scalar_t, // ElementOutput_ + lse_scalar_t, // ElementLSE_ + accum_t, // ElementAccumulator_ + accum_t, // ElementCompute_ + 128 / cutlass::sizeof_bits::value + // FragmentIteratorAccumulator::Fragment::kElements + // InstructionShape::kM * InstructionShape::kN / 32 + >; + using EpilogueWithLSE = + cutlass::epilogue::threadblock::EpilogueSmemAccumulator; + + static void CUTLASS_DEVICE accumToSmem(AccumulatorSharedStorage& shared_storage, + FragmentC const& accum, + int lane_id, + cutlass::MatrixCoord const& tile_coords) + { + SmemIteratorD0 smem_iterator_attn(shared_storage.accum_ref(), lane_id); + smem_iterator_attn.add_tile_offset( + tile_coords * cutlass::MatrixCoord{SmemIteratorD0::TileIterations::kRow, + SmemIteratorD0::TileIterations::kColumn}); + Epilogue epilogue; + epilogue(OutputOpNoOp({}), smem_iterator_attn, accum); + } + + static void CUTLASS_DEVICE accumApplyLSEToSmem(AccumulatorSharedStorage& shared_storage, + FragmentC& accum, + lse_scalar_t const* lse, + int32_t lse_extents, + int thread_id, + int warp_id, + int lane_id, + cutlass::MatrixCoord const& tile_coords) + { + constexpr int32_t kAlignLSE = 32; + IteratorAccumulatorLSE iterator_lse( + lse, + {(int32_t)0, (int32_t)ceil_div(lse_extents, kAlignLSE) * kAlignLSE}, + thread_id, + warp_id, + cutlass::MatrixCoord{0, 0} // offset + ); + + SmemIteratorD0 smem_iterator_attn(shared_storage.accum_ref(), lane_id); + smem_iterator_attn.add_tile_offset( + tile_coords * cutlass::MatrixCoord{SmemIteratorD0::TileIterations::kRow, + SmemIteratorD0::TileIterations::kColumn}); + EpilogueWithLSE epilogue; + EpilogueOpApplyLSE minus_lse_exp({}); + epilogue(minus_lse_exp, + smem_iterator_attn, + accum, + // scale - unused + iterator_lse, + // bias + iterator_lse); + } +}; + +// Volta Specialization +// only supported for f16 +template +struct B2bGemm, + float, + cutlass::layout::RowMajor, + cutlass::gemm::GemmShape<16, 16, 4>, + cutlass::MatrixShape<1, 1>>, + Operator, + cutlass::half_t, + WarpShape_, + ThreadblockShape_> { + using IteratorC = cutlass::gemm::warp::MmaVoltaTensorOpAccumulatorTileIterator< + cutlass::MatrixShape<32, 32>, + float, + cutlass::layout::RowMajor, + cutlass::gemm::GemmShape<16, 16, 4>, + cutlass::MatrixShape<1, 1>>; + using scalar_t = cutlass::half_t; + using accum_t = IteratorC::Element; + using WarpShape = WarpShape_; + using ThreadblockShape = ThreadblockShape_; + using FragmentC = IteratorC::Fragment; + using lse_scalar_t = float; + + using SmemAccumulatorLayout = cutlass::layout::RowMajor; + using SmemIteratorD0 = + cutlass::epilogue::warp::TileIteratorVoltaTensorOp, + scalar_t, + SmemAccumulatorLayout>; + + // // Storage in shared-memory for Q.Kt + using AccumulatorSharedStorage = cutlass::gemm::threadblock::AccumulatorSharedStorage< + ThreadblockShape, + scalar_t, + cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise< + 16, + 32>, // typename SmemIteratorD0::TensorLayout, + cutlass::MatrixShape<0, 0> // Padding + >; + + using OutputLayout = cutlass::layout::RowMajorVoltaTensorOpMultiplicandCrosswise<16, 32>; + using TensorRef = cutlass::TensorRef; + using Policy = typename IteratorC::Policy; + using Element = accum_t; + // Those are MmaVoltaTensorOpAccumulatorTileIterator private fields + // Let's copy their values + static int const kElementsPerPartial = 4; + using EleShapePerPatial = + typename cutlass::platform::conditional::value, + cutlass::MatrixShape<2, 2>, + cutlass::MatrixShape<1, 4>>::type; + static int const kElementsPerMma = 8; + static int const kAccumulatorPatials = 2; + using QuadShapePerPatialMma = cutlass::MatrixShape<4, 4>; + + static void CUTLASS_DEVICE accumToSmem(AccumulatorSharedStorage& shared_storage, + FragmentC const& accum, + int lane_id, + cutlass::MatrixCoord const& tile_coords) + { + // ctor - from MmaVoltaTensorOpAccumulatorTileIterator + TensorRef ref_(shared_storage.accum_ref()); + int quad = (lane_id >> 2); + int lane_in_quad = (lane_id & 3); + int accum_m, accum_n; + + if (cutlass::platform::is_same::value) { + // (quad[2],quad[0])+lane_in_quad[0] + accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + (lane_in_quad & 1); + // (quad[1])+lane_in_quad[1] + accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials + + (lane_in_quad & 2); + } else { + accum_m = (((quad & 0x4) >> 1) + (quad & 0x1)) * 8 + lane_in_quad; // (quad[2],quad[0]) + accum_n = ((quad >> 1) & 0x1) * kElementsPerPartial * kAccumulatorPatials; + } + cutlass::MatrixCoord lane_offset(accum_m, accum_n); + + // Tile offset + ref_.add_coord_offset(tile_coords * cutlass::MatrixCoord({IteratorC::Shape::kRow, + IteratorC::Shape::kColumn})); + + using AccessType = cutlass::Array; + + // store - from MmaVoltaTensorOpAccumulatorTileIterator + CUTLASS_PRAGMA_UNROLL + for (int tile_n = 0; tile_n < Policy::TileIterations::kColumn; ++tile_n) { + CUTLASS_PRAGMA_UNROLL + for (int tile_m = 0; tile_m < Policy::TileIterations::kRow; ++tile_m) { + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Policy::MmaIterations::kColumn; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Policy::MmaIterations::kRow; ++mma_m) { + int mma_accum_start = (((tile_n * Policy::TileIterations::kRow + tile_m) * + Policy::MmaIterations::kColumn + + mma_n) * + Policy::MmaIterations::kRow + + mma_m) * + kElementsPerMma; + + CUTLASS_PRAGMA_UNROLL + for (int p = 0; p < kAccumulatorPatials; ++p) { + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < EleShapePerPatial::kRow; ++m) { + int accum_m = tile_m * Policy::InterleavedTile::kRow + + mma_m * QuadShapePerPatialMma::kRow + m * 2; + int accum_n = tile_n * Policy::InterleavedTile::kColumn + + mma_n * QuadShapePerPatialMma::kColumn + + p * Policy::InterleavedTile::kColumn / 2; + int r = (accum_m + lane_offset.row()); + AccessType to_store; + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < EleShapePerPatial::kColumn; ++n) { + int idx = mma_accum_start + p * kElementsPerPartial + + m * EleShapePerPatial::kColumn + n; + int c = (accum_n + n + lane_offset.column()); + to_store[n] = scalar_t(accum[idx]); + } + int c = (accum_n + lane_offset.column()); + assert(r < 32); + assert(c < 32); + *reinterpret_cast(ref_.data() + ref_.offset({r, c})) = + to_store; + } + } + } + } + } + } + } + + static void CUTLASS_DEVICE accumApplyLSEToSmem(AccumulatorSharedStorage& shared_storage, + typename IteratorC::Fragment& accum, + lse_scalar_t const* lse, + int lse_extent, + int thread_id, + int warp_id, + int lane_id, + cutlass::MatrixCoord const& tile_coords) + { + // Non-optimized way to apply LSE to registers + // NOTE: accum is attn.T + // TODO: Optimize for each architecture + static constexpr int WarpSize = 32; + using AccumLambdaIterator = + typename DefaultMmaAccumLambdaIterator::Iterator; + auto lane_offset = AccumLambdaIterator::get_lane_offset(lane_id, warp_id, tile_coords); + + cutlass::Array lse_prefetched; + lse_prefetched.clear(); + int rowIdx = 0; + int colIdx = 0; + AccumLambdaIterator::iterateRows( + lane_offset, + [&](int accum_m) { + ++rowIdx; + colIdx = 0; + }, + [&](int accum_m, int accum_n, int idx) { + if (rowIdx == 1) { + lse_prefetched[colIdx] = accum_n < lse_extent + ? lse[accum_n] + : platform::numeric_limits::infinity(); + } + accum[idx] = expf(accum[idx] - lse_prefetched[colIdx]); + ++colIdx; + }, + [&](int accum_m) {}); + accumToSmem(shared_storage, accum, lane_id, tile_coords); + } +}; + +// Simt Specialization +// for f32 on Sm70-Sm75 and f16/f32 below + +template +struct B2bGemm, + cutlass::gemm::Operand::kC, + float, + cutlass::layout::RowMajor, + OperatorPolicy, + 1, + 1>, + Operator, + scalar_t, + WarpShape_, + ThreadblockShape_> { + using IteratorC = cutlass::gemm::warp::MmaSimtTileIterator, + cutlass::gemm::Operand::kC, + float, + cutlass::layout::RowMajor, + OperatorPolicy, + 1, + 1>; + using accum_t = typename IteratorC::Element; + using WarpShape = WarpShape_; + using ThreadblockShape = ThreadblockShape_; + using FragmentC = typename IteratorC::Fragment; + using lse_scalar_t = float; + + // Storage in shared-memory for Q.Kt + using AccumulatorSharedStorage = + cutlass::gemm::threadblock::AccumulatorSharedStorage // Padding + >; + + static void CUTLASS_DEVICE accumToSmem(AccumulatorSharedStorage& shared_storage, + FragmentC const& accum, + int lane_id, + cutlass::MatrixCoord const& tile_coords) + { + using Policy = typename IteratorC::Policy; + using Element = typename IteratorC::Element; + using Iterations = typename IteratorC::Iterations; + using Delta = typename IteratorC::Delta; + + auto ref_ = shared_storage.accum_ref(); + // ctor - MmaSimtTileIterator + // compute offset based on thread ID and lane layout + typename Policy::LaneLayout lane_layout = Policy::get_lane_layout(); + + MatrixCoord lane_offset = lane_layout.inverse(lane_id) * + MatrixCoord(Policy::LaneMmaShape::kM, Policy::LaneMmaShape::kN); + + ref_.add_coord_offset(lane_offset); + + // Tile offset + ref_.add_coord_offset(tile_coords * cutlass::MatrixCoord({IteratorC::Shape::kRow, + IteratorC::Shape::kColumn})); + + // store - MmaSimtTileIterator + CUTLASS_PRAGMA_UNROLL + for (int mma_n = 0; mma_n < Iterations::kColumn; ++mma_n) { + CUTLASS_PRAGMA_UNROLL + for (int n = 0; n < Policy::LaneMmaShape::kN; ++n) { + CUTLASS_PRAGMA_UNROLL + for (int mma_m = 0; mma_m < Iterations::kRow; ++mma_m) { + CUTLASS_PRAGMA_UNROLL + for (int m = 0; m < Policy::LaneMmaShape::kM; ++m) { + int r = Policy::LaneMmaShape::kM * (mma_m * Policy::WarpShape::kRow) + m; + int c = mma_n * Delta::kColumn + n; + int idx = n + Policy::LaneMmaShape::kN * + (mma_n + Iterations::kColumn * + (m + mma_m * Policy::LaneMmaShape::kM)); + ref_.at({r, c}) = scalar_t(accum[idx]); + } + } + } + } + } + + static void CUTLASS_DEVICE accumApplyLSEToSmem(AccumulatorSharedStorage& shared_storage, + typename IteratorC::Fragment& accum, + lse_scalar_t const* lse, + int lse_extent, + int thread_id, + int warp_id, + int lane_id, + cutlass::MatrixCoord const& tile_coords) + { + // Non-optimized way to apply LSE to registers + // NOTE: accum is attn.T + // TODO: Optimize for each architecture + static constexpr int WarpSize = 32; + using AccumLambdaIterator = + typename DefaultMmaAccumLambdaIterator::Iterator; + auto lane_offset = AccumLambdaIterator::get_lane_offset(lane_id, warp_id, tile_coords); + + cutlass::Array lse_prefetched; + lse_prefetched.clear(); + int rowIdx = 0; + int colIdx = 0; + AccumLambdaIterator::iterateRows( + lane_offset, + [&](int accum_m) { + ++rowIdx; + colIdx = 0; + }, + [&](int accum_m, int accum_n, int idx) { + if (rowIdx == 1) { + lse_prefetched[colIdx] = accum_n < lse_extent + ? lse[accum_n] + : platform::numeric_limits::infinity(); + } + accum[idx] = expf(accum[idx] - lse_prefetched[colIdx]); + ++colIdx; + }, + [&](int accum_m) {}); + accumToSmem(shared_storage, accum, lane_id, tile_coords); + } +}; + +} // namespace threadblock +} // namespace gemm +} // namespace cutlass + +///////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/epilogue_predicated_tile_iterator.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/epilogue_predicated_tile_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..667f1982d30d295498b952f0a681af29796a1521 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/epilogue_predicated_tile_iterator.h @@ -0,0 +1,691 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights + *reserved. SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + *this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + *LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + *CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + *INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + *CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + *ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + *POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/*! \file + \brief Epilogue iterator that supports prefetching + + Mostly copied from "cutlass/epilogue/threadblock/predicated_tile_iterator.h" +*/ + +#pragma once + +#include "cutlass/arch/arch.h" +#include "cutlass/arch/memory.h" +#include "cutlass/array.h" +#include "cutlass/cutlass.h" +#include "cutlass/epilogue/threadblock/output_tile_thread_map.h" +#include "cutlass/epilogue/threadblock/predicated_tile_iterator_params.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/tensor.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/transform/pitch_linear_thread_map.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { + +//////////////////////////////////////////////////////////////////////////////// + +namespace epilogue { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// Tile iterator used to load and store output tile from global memory in +/// epilogue. +/// +/// Satisfies: ReadableTileIterator | PredicatedTileIterator | +/// ForwardTileIterator +/// +template +class PredicatedTileIteratorPrefetch { +public: + using ThreadMap = ThreadMap_; + using Shape = typename ThreadMap::Shape; + + using Element = Element_; + + using Layout = layout::RowMajor; + using TensorRef = TensorRef; + using ConstTensorRef = typename TensorRef::ConstTensorRef; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + using TensorCoord = MatrixCoord; + + static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; + static int const kThreads = ThreadMap::kThreads; + static int const kIterations = ThreadMap::Count::kTile; + + static_assert(ThreadMap::Iterations::kRow > 0, "ThreadMap::Iterations::kRow must be > 0"); + static_assert(ThreadMap::Iterations::kGroup > 0, "ThreadMap::Iterations::kGroup must be > 0"); + static_assert(ThreadMap::Iterations::kCluster > 0, + "ThreadMap::Iterations::kCluster must be > 0"); + static_assert(ThreadMap::Iterations::kColumn > 0, "ThreadMap::Iterations::kColumn must be > 0"); + + /// Fragment object + using Fragment = Array; + + /// Memory access size + using AccessType = AlignedArray; + + // + // Parameters struct + // + + /// Uses a non-template class + struct Params : PredicatedTileIteratorParams { + using Base = PredicatedTileIteratorParams; + + CUTLASS_HOST_DEVICE + Params() {} + + CUTLASS_HOST_DEVICE + Params(Layout const& layout) + : PredicatedTileIteratorParams( + layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess, + make_OutputTileThreadMapDesc()) + { + } + + CUTLASS_HOST_DEVICE + Params(Base const& base) : Base(base) {} + }; + + /// Mask object + struct Mask { + static int const kCount = ThreadMap::Iterations::kColumn; + + /// Predicate state + bool predicates[kCount]; + + // + // Mask + // + CUTLASS_HOST_DEVICE + Mask() { enable(); } + + ///< Efficiently disables all accesses guarded by mask + CUTLASS_HOST_DEVICE void clear() + { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kCount; ++i) { predicates[i] = false; } + } + + ///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask + CUTLASS_DEVICE void enable() + { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kCount; ++i) { predicates[i] = true; } + } + }; + +private: + // + // Data members + // + + /// Parameters structure containing reference and precomputed state. + PredicatedTileIteratorParams params_; + + /// Byte-level pointer + uint8_t* byte_pointer_; + + /// Array of boolean values to contain steady-state predicates + Mask mask_; + + /// Extent of the matrix tile in rows + Index extent_row_; + + /// Extent of the matrix tile in rows + Index extent_column_; + + /// A thread's starting row position (assuming steady-state predicates have + /// been computed) + Index thread_start_row_; + + /// A thread's starting column + Index thread_start_column_; + + /// Internal state counter + int state_[3]; + + /// Scatter indices + int const* indices_; + + // + // Static asserts about internal strides + // + + static_assert(sizeof(extent_row_) == 4, "Expected 32b extents"); + static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents"); + static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides"); + +private: + // + // Methods + // + +public: + // + // Methods + // + + /// Constructor + CUTLASS_DEVICE + PredicatedTileIteratorPrefetch(PredicatedTileIteratorParams const& params, + Element* pointer, + TensorCoord extent, + int thread_idx, + TensorCoord threadblock_offset = TensorCoord(), + int const* indices = nullptr) + : params_(params), indices_(indices) + { + TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset; + + extent_row_ = extent.row(); + extent_column_ = extent.column(); + + thread_start_row_ = thread_offset.row(); + thread_start_column_ = thread_offset.column(); + + // Initialize predicates + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) { + mask_.predicates[c] = + ((thread_offset.column() + ThreadMap::Delta::kColumn * c) < extent.column()); + } + + // Null pointer performs no accesses + if (!pointer) { mask_.clear(); } + + if (ScatterD && !indices) { mask_.clear(); } + + // Initialize pointer + byte_pointer_ = reinterpret_cast(pointer) + + LongIndex(thread_offset.row()) * LongIndex(params_.stride) + + LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess; + + if (ScatterD) { + byte_pointer_ = + reinterpret_cast(pointer) + + LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess; + } + + // Initialize internal state counter + state_[0] = state_[1] = state_[2] = 0; + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) + { + byte_pointer_ += pointer_offset * sizeof_bits::value / 8; + } + + CUTLASS_DEVICE + void prefetch_all() + { + CUTLASS_PRAGMA_UNROLL + for (int iter = 0; iter < kIterations; ++iter) { + prefetch(); + ++(*this); + } + } + + CUTLASS_DEVICE + void prefetch() + { + uint8_t* byte_pointer = byte_pointer_; + + CUTLASS_PRAGMA_UNROLL + for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { + CUTLASS_PRAGMA_UNROLL + for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { + int row_offset = row * ThreadMap::Delta::kRow + + group * ThreadMap::Delta::kGroup + + cluster * ThreadMap::Delta::kCluster; + + AccessType* memory_pointer = reinterpret_cast(byte_pointer); + + CUTLASS_PRAGMA_UNROLL + for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { + // on windows using unsigned long here gives the error + // error: asm operand type size(4) does not match + // type/size implied by constraint 'l' + uint64_t addr = + (uint64_t)((void*)&memory_pointer[column * ThreadMap::Delta::kColumn / + kElementsPerAccess]); + asm volatile("prefetch.global.L1 [ %1 ];" : "=l"(addr) : "l"(addr)); + } + + if (row + 1 < ThreadMap::Iterations::kRow) { + if (!ScatterD) { byte_pointer += params_.increment_row; } + } + } + + if (group + 1 < ThreadMap::Iterations::kGroup) { + byte_pointer += params_.increment_group; + } + } + + if (cluster + 1 < ThreadMap::Iterations::kCluster) { + byte_pointer += params_.increment_cluster; + } + } + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_byte_offset(Fragment& frag, int64_t byte_offset) const + { + uint8_t* byte_pointer = byte_pointer_; + AccessType* frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { + CUTLASS_PRAGMA_UNROLL + for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { + int frag_row_idx = + (row + ThreadMap::Iterations::kRow * + (group + ThreadMap::Iterations::kGroup * cluster)); + + int row_offset = row * ThreadMap::Delta::kRow + + group * ThreadMap::Delta::kGroup + + cluster * ThreadMap::Delta::kCluster; + + bool row_guard = ((row_offset + thread_start_row_) < extent_row_); + + AccessType* memory_pointer = + reinterpret_cast(byte_pointer + byte_offset); + + if (ScatterD && row_guard) { + assert(indices_); + + memory_pointer = reinterpret_cast( + byte_pointer + byte_offset + + LongIndex(indices_[row_offset + thread_start_row_]) * + LongIndex(params_.stride)); + } + + CUTLASS_PRAGMA_UNROLL + for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { + bool guard = row_guard && mask_.predicates[column]; + + cutlass::arch::global_load( + frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], + (void*)&memory_pointer[column * ThreadMap::Delta::kColumn / + kElementsPerAccess], + guard); + } + + if (row + 1 < ThreadMap::Iterations::kRow) { + if (!ScatterD) { byte_pointer += params_.increment_row; } + } + } + + if (group + 1 < ThreadMap::Iterations::kGroup) { + byte_pointer += params_.increment_group; + } + } + + if (cluster + 1 < ThreadMap::Iterations::kCluster) { + byte_pointer += params_.increment_cluster; + } + } + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment& frag) const { load_with_byte_offset(frag, 0); } + + /// Stores a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const& frag, int64_t byte_offset) const + { + uint8_t* byte_pointer = byte_pointer_; + AccessType const* frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { + CUTLASS_PRAGMA_UNROLL + for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { + int frag_row_idx = + (row + ThreadMap::Iterations::kRow * + (group + ThreadMap::Iterations::kGroup * cluster)); + + int row_offset = row * ThreadMap::Delta::kRow + + group * ThreadMap::Delta::kGroup + + cluster * ThreadMap::Delta::kCluster; + + bool row_guard = ((row_offset + thread_start_row_) < extent_row_); + + AccessType* memory_pointer = + reinterpret_cast(byte_pointer + byte_offset); + + if (ScatterD && row_guard) { + assert(indices_); + + memory_pointer = reinterpret_cast( + byte_pointer + byte_offset + + LongIndex(indices_[row_offset + thread_start_row_]) * + LongIndex(params_.stride)); + } + + CUTLASS_PRAGMA_UNROLL + for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { + bool guard = row_guard && mask_.predicates[column]; + + if (UseCUDAStore) { + if (guard) { + memory_pointer[column * ThreadMap::Delta::kColumn / + kElementsPerAccess] = + frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + + column]; + } + } else { + cutlass::arch::global_store( + frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], + (void*)&memory_pointer[column * ThreadMap::Delta::kColumn / + kElementsPerAccess], + guard); + } + } + + if (row + 1 < ThreadMap::Iterations::kRow) { + if (!ScatterD) { byte_pointer += params_.increment_row; } + } + } + + if (group + 1 < ThreadMap::Iterations::kGroup) { + byte_pointer += params_.increment_group; + } + } + + if (cluster + 1 < ThreadMap::Iterations::kCluster) { + byte_pointer += params_.increment_cluster; + } + } + } + + /// Stores a fragment to memory + CUTLASS_DEVICE + void store(Fragment const& frag) const { store_with_byte_offset(frag, 0); } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void downsample_load_with_byte_offset(Fragment& frag, + int64_t byte_offset, + int convolution_P, + int convolution_Q, + int add_P, + int add_Q, + int problem_N) const + { + uint8_t* byte_pointer = byte_pointer_; + AccessType* frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { + CUTLASS_PRAGMA_UNROLL + for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { + int frag_row_idx = + (row + ThreadMap::Iterations::kRow * + (group + ThreadMap::Iterations::kGroup * cluster)); + + int row_offset = row * ThreadMap::Delta::kRow + + group * ThreadMap::Delta::kGroup + + cluster * ThreadMap::Delta::kCluster; + + bool row_guard = ((row_offset + thread_start_row_) < extent_row_); + + int output_row = row_offset + thread_start_row_; + int output_N = output_row / (convolution_P * convolution_Q); + int output_PQ = output_row % (convolution_P * convolution_Q); + int output_P = output_PQ / convolution_Q; + int output_Q = output_PQ % convolution_Q; + + int input_row = output_N * 2 * convolution_P * 2 * convolution_Q + + (2 * output_P + add_P) * 2 * convolution_Q + 2 * output_Q + + add_Q; + + int64_t byte_offset = (input_row - output_row) * problem_N * sizeof(float); + + AccessType* memory_pointer = + reinterpret_cast(byte_pointer + byte_offset); + + CUTLASS_PRAGMA_UNROLL + for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { + bool guard = row_guard && mask_.predicates[column]; + + cutlass::arch::global_load( + frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], + (void*)&memory_pointer[column * ThreadMap::Delta::kColumn / + kElementsPerAccess], + guard); + } + + if (row + 1 < ThreadMap::Iterations::kRow) { + byte_pointer += params_.increment_row; + } + } + + if (group + 1 < ThreadMap::Iterations::kGroup) { + byte_pointer += params_.increment_group; + } + } + + if (cluster + 1 < ThreadMap::Iterations::kCluster) { + byte_pointer += params_.increment_cluster; + } + } + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void upsample_load_with_byte_offset(Fragment& frag, + int64_t byte_offset, + int convolution_P, + int convolution_Q, + int add_P, + int add_Q, + int problem_N) const + { + uint8_t* byte_pointer = byte_pointer_; + AccessType* frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { + CUTLASS_PRAGMA_UNROLL + for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { + int frag_row_idx = + (row + ThreadMap::Iterations::kRow * + (group + ThreadMap::Iterations::kGroup * cluster)); + + int row_offset = row * ThreadMap::Delta::kRow + + group * ThreadMap::Delta::kGroup + + cluster * ThreadMap::Delta::kCluster; + + bool row_guard = ((row_offset + thread_start_row_) < extent_row_); + + int output_row = row_offset + thread_start_row_; + int output_N = output_row / (convolution_P * convolution_Q); + int output_PQ = output_row % (convolution_P * convolution_Q); + int output_P = output_PQ / convolution_Q; + int output_Q = output_PQ % convolution_Q; + int row_add_P = add_P; + int row_add_Q = add_Q; + if (output_P > convolution_P - 2) row_add_P = 0; + if (output_Q > convolution_Q - 2) row_add_Q = 0; + + int input_row = output_N * (convolution_P / 2) * (convolution_Q / 2) + + ((output_P + row_add_P) / 2) * (convolution_Q / 2) + + (output_Q + row_add_Q) / 2; + + int64_t byte_offset = (input_row - output_row) * problem_N * sizeof(float); + + AccessType* memory_pointer = + reinterpret_cast(byte_pointer + byte_offset); + + CUTLASS_PRAGMA_UNROLL + for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { + bool guard = row_guard && mask_.predicates[column]; + + cutlass::arch::global_load( + frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], + (void*)&memory_pointer[column * ThreadMap::Delta::kColumn / + kElementsPerAccess], + guard); + } + + if (row + 1 < ThreadMap::Iterations::kRow) { + byte_pointer += params_.increment_row; + } + } + + if (group + 1 < ThreadMap::Iterations::kGroup) { + byte_pointer += params_.increment_group; + } + } + + if (cluster + 1 < ThreadMap::Iterations::kCluster) { + byte_pointer += params_.increment_cluster; + } + } + } + + CUTLASS_DEVICE + MatrixCoord thread_start() const + { + return MatrixCoord(thread_start_row_, thread_start_column_); + } + + /// Need to get the thread start row from the tile iterator + CUTLASS_DEVICE + int32_t thread_start_row() const { return thread_start_row_; } + + /// Need to get the thread start row from the tile iterator + CUTLASS_DEVICE + int32_t thread_start_column() const { return thread_start_column_; } + + /// Extent of the matrix in rows + CUTLASS_DEVICE + Index extent_row() const { return extent_row_; } + + /// Extent of the matrix in columns + CUTLASS_DEVICE + Index extent_column() const { return extent_column_; } + + /// Advances to the next position to load or store + CUTLASS_HOST_DEVICE + PredicatedTileIteratorPrefetch& operator++() + { + ++state_[0]; + + if (!ScatterD) { byte_pointer_ += params_.advance_row; } + + thread_start_row_ += ThreadMap::Shape::kRow; + + if (state_[0] == ThreadMap::Count::kRow) { + state_[0] = 0; + ++state_[1]; + byte_pointer_ += params_.advance_group; + + thread_start_row_ += + (ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow; + + if (state_[1] == ThreadMap::Count::kGroup) { + state_[1] = 0; + ++state_[2]; + byte_pointer_ += params_.advance_cluster; + + thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup * + ThreadMap::Count::kRow * ThreadMap::Shape::kRow; + + if (state_[2] == ThreadMap::Count::kCluster) { + state_[2] = 0; + byte_pointer_ += params_.advance_tile; + } + } + } + + return *this; + } + + ///< Efficiently disables all accesses guarded by mask + CUTLASS_DEVICE void clear_mask() { mask_.clear(); } + + ///< Efficiently enables all accesses guarded by mask + CUTLASS_DEVICE void enable_mask() { mask_.enable(); } + + ///< Sets the mask + CUTLASS_DEVICE void get_mask(Mask& mask) const { mask = mask_; } + + ///< Sets the mask + CUTLASS_DEVICE void set_mask(Mask const& mask) { mask_ = mask; } +}; + +template +struct MakePrefetchableIterator { + using Iterator = PredicatedTileIteratorPrefetch; +}; + +/////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace epilogue +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/make_residual_last.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/make_residual_last.h new file mode 100644 index 0000000000000000000000000000000000000000..ff0e324c3a6c0899d8439d92658515fb255f2dd4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/make_residual_last.h @@ -0,0 +1,91 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holdvr nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include "predicated_tile_access_iterator_residual_last.h" +#include "predicated_tile_iterator_residual_last.h" + +namespace cutlass { +namespace transform { +namespace threadblock { + +template +struct MakeIteratorResidualLast; + +template +struct MakeIteratorResidualLast< + PredicatedTileIterator> { + using Iterator = PredicatedTileIteratorResidualLast; +}; + +template +struct MakeIteratorResidualLast> { + using Iterator = PredicatedTileAccessIteratorResidualLast; +}; +} // namespace threadblock +} // namespace transform +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_access_iterator_residual_last.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_access_iterator_residual_last.h new file mode 100644 index 0000000000000000000000000000000000000000..7f6a2430845a16aac5891257c5b0600c5226e9a9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_access_iterator_residual_last.h @@ -0,0 +1,1964 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights + *reserved. SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + *this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + *LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + *CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + *INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + *CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + *ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + *POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/*! \file + \brief Templates calculating the address and predicates to the load of tiles + from pitch-linear rank=2 tensors. + + This iterator uses masks to guard out-of-bounds accesses. The first tile + this iterator visits maybe partial, then the remaining tiles are complete. + So, we only need to compute the predicates twice, once before the first tile + and once for the remaining full tiles which can share the same predicates. + + A precomputed "Params" object minimizes the amount of state that must be + stored in registers, and integer addition is used to advance the pointer + through memory. +*/ + +#pragma once + +#include "cutlass/array.h" +#include "cutlass/coord.h" +#include "cutlass/cutlass.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/predicate_vector.h" +#include "cutlass/tensor_ref.h" +#include "cutlass/tensor_view.h" +#include "cutlass/transform/threadblock/predicated_tile_access_iterator_params.h" + +//////////////////////////////////////////////////////////////////////////////// + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// PredicatedTileAccessIteratorResidualLast +/// +template +class PredicatedTileAccessIteratorResidualLast; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIteratorResidualLast for pitch-linear +/// data. +/// +template +class PredicatedTileAccessIteratorResidualLast { +public: + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::PitchLinear; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element*; + using NonConstPointer = typename platform::remove_const::type*; + + using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates; + + static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; + + static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), + "Vectors implied by the thread map must be divisible by the access type."); + + using Mask = typename UnderlyingPredicates::Mask; + + /// Uses a non-template class + struct Params : PredicatedTileAccessIteratorParams { + using Base = PredicatedTileAccessIteratorParams; + + // Default ctor + CUTLASS_HOST_DEVICE + Params() {} + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const& layout) + : Base(layout.stride(0), + MakePredicatedTileAccessIteratorDesc()()) + { + } + + CUTLASS_HOST_DEVICE + Params(Base const& base) : Base(base) {} + }; + +private: + /// Internal pointer type permits fast address arithmetic + using BytePointer = char*; + +private: + // + // Data members + // + + UnderlyingPredicates the_predicates; + Mask residual_tile_mask; + + /// Parameters object with precomputed internal state + Params params_; + + /// Internal pointer to first access of tile + BytePointer pointer_; + + /// Below is used when Gather is turned on. We need to record strided_offset + /// and contiguous_offset separated to compute the offset by using + /// + /// offset = contiguous_offset + indices[strided_offset] + /// + + /// Gather indices + int const* indices_; + + Index gather_offset_strided; + +private: + /// Computes predicates based on internally tracked per-thread offset. + CUTLASS_DEVICE + void compute_predicates_( + /// Extent of the matrix window + TensorCoord extent, + /// optionally, simplify predicate calculation during 'steady state' phase + bool is_steady_state = false) + { + the_predicates.compute_predicates_(extent, is_steady_state); + } + +public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast( + /// Precomputed parameters object + Params const& params, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const& threadblock_offset, + /// Gather indices + int const* indices = nullptr) + : params_(params), + pointer_(reinterpret_cast(const_cast(pointer))), + the_predicates(extent), + indices_(indices) + { + the_predicates.set_predicates(thread_id, threadblock_offset); + the_predicates.get_mask(residual_tile_mask); + + // Working around a weird compiler bug happening on P100 for the backward. + // I've seen together: the_predicates.predicates_[0] = 14 (instead of 15) + // residual_tile_mask[0] = 15 (correct) + // + // Adding prints when the value is calculated (in `compute_predicates_`) + // sometimes removes the bug. The consequence is that we skip some + // element of a tensor, leading to wrong results + // Setting `compute_predicates_`'s second argument (`is_steady_state`) to + // true also seems to get rid of the bug - at the cost of twice as many + // comparisons. +#if !defined(__CUDA_ARCH__) || (__CUDA_ARCH__ >= 700) + constexpr bool kWorkAroundCompilerBug = false; +#else + constexpr bool kWorkAroundCompilerBug = true; +#endif + the_predicates.compute_predicates_(extent, true && !kWorkAroundCompilerBug); + + // update internal pointers + Layout layout(params_.stride_); + + if (!Gather) { + add_pointer_offset(layout(the_predicates.thread_offset_)); + } else { + gather_offset_strided = the_predicates.thread_offset_.strided(); + add_pointer_offset(layout(make_Coord(the_predicates.thread_offset_.contiguous(), 0))); + } + } + + /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock + /// offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast( + /// Precomputed parameters object + Params const& params, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id) + : PredicatedTileAccessIteratorResidualLast(params, + pointer, + extent, + thread_id, + make_Coord(0, 0)) + { + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { the_predicates.set_iteration_index(index); } + + CUTLASS_HOST_DEVICE + void set_residual_tile(bool is_residual_tile) + { + if (is_residual_tile) { the_predicates.set_mask(residual_tile_mask); } + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) + { + pointer_ += sizeof_bits::value * pointer_offset / 8; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_DEVICE + void add_tile_offset(TensorCoord const& tile_offset) + { + if (!Gather) { + if (kAdvanceRank) { + pointer_ += params_.inc_advance_ * LongIndex(tile_offset.strided()); + pointer_ += Shape::kContiguous * tile_offset.contiguous(); + } else { + pointer_ += params_.inc_advance_ * LongIndex(tile_offset.contiguous()); + pointer_ += Shape::kStrided * tile_offset.strided(); + } + } else { + add_pointer_offset(Shape::kContiguous * tile_offset.contiguous()); + gather_offset_strided += Shape::kStrided * tile_offset.strided(); + } + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType* get() const + { + if (Gather) { + assert(indices_); + + if (!valid()) { return nullptr; } + + LongIndex contiguous_offset = + the_predicates.iteration_contiguous_ * + (ThreadMap::Delta::kContiguous * sizeof_bits::value / 8) + + the_predicates.iteration_vector_; + int strided_index = gather_offset_strided + + the_predicates.iteration_strided_ * ThreadMap::Delta::kStrided; + + LongIndex strided_offset = indices_[strided_index] * LongIndex(params_.stride_) * + sizeof_bits::value / 8; + + return reinterpret_cast(pointer_ + contiguous_offset + strided_offset); + } + + return reinterpret_cast( + pointer_ + the_predicates.iteration_contiguous_ * + (ThreadMap::Delta::kContiguous * sizeof_bits::value) / + 8) + + the_predicates.iteration_vector_; + } + + /// Increment and return an instance to self. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast& operator++() + { + the_predicates.operator++(); + + ++the_predicates.iteration_vector_; + if (the_predicates.iteration_vector_ < kAccessesPerVector) { return *this; } + + the_predicates.iteration_vector_ = 0; + ++the_predicates.iteration_contiguous_; + + if (the_predicates.iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { + return *this; + } + + // Enter here only if (iteration_contiguous_ == + // ThreadMap::Iteration::kContiguous) + the_predicates.iteration_contiguous_ = 0; + ++the_predicates.iteration_strided_; + + if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) { + if (!Gather) { pointer_ += params_.inc_strided_; } + + return *this; + } + + // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) + // which means we enter the next tile. + the_predicates.iteration_strided_ = 0; + + if (!Gather) { + // advance to next tile + pointer_ += params_.inc_next_; + + // now return to start tile - if the iterator is subsequently advanced, + // this subtraction as well as the subsequent integer addition are both + // elided by the compiler. + pointer_ -= params_.inc_advance_; + } + + return *this; + } + + /// Increment and return an instance to self. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast operator++(int) + { + PredicatedTileAccessIteratorResidualLast self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { the_predicates.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { the_predicates.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const& mask) { the_predicates.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask& mask) { the_predicates.get_mask(mask); } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() const { return the_predicates.valid(); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIteratorResidualLast for column-major +/// data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileAccessIteratorResidualLast { +public: + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element*; + using NonConstPointer = typename platform::remove_const::type*; + + using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast< + layout::PitchLinearShape, + Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 0 : 1), + ThreadMap, + AccessType, + Gather>; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileAccessIteratorResidualLast; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + /// Default ctor + CUTLASS_HOST_DEVICE + Params() {} + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))){}; + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {} + }; + +private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + +public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast( + ///< Precomputed parameters object + Params const& params, + ///< Pointer to start of tensor + Pointer pointer, + ///< Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id, + ///< Initial offset of threadblock + TensorCoord const& threadblock_offset, + int const* indices = nullptr ///< gather/scatter indices, note no support for + ///< gather/scatter at this specialization + ) + : iterator_(params.params_, + pointer, + layout::PitchLinearCoord(extent.row(), extent.column()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column()), + indices) + { + } + + /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock + /// offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast( + Params const& params, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileAccessIteratorResidualLast(params, + pointer, + extent, + thread_id, + make_Coord(0, 0)) + { + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + CUTLASS_HOST_DEVICE + void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) + { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const& tile_offset) + { + iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType* get() const { return reinterpret_cast(iterator_.get()); } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast& operator++() + { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast operator++(int) + { + PredicatedTileAccessIteratorResidualLast self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const& mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask& mask) { iterator_.get_mask(mask); } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { return iterator_.valid(); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIteratorResidualLast for row-major +/// data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileAccessIteratorResidualLast { +public: + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element*; + using NonConstPointer = typename platform::remove_const::type*; + + using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast< + layout::PitchLinearShape, + Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 1 : 0), + ThreadMap, + AccessType, + Gather>; + + static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileAccessIteratorResidualLast; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + /// Default ctor + CUTLASS_HOST_DEVICE + Params() {} + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))){}; + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {} + }; + +private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + +public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast( + ///< Precomputed parameters object + Params const& params, + ///< Pointer to start of tensor + Pointer pointer, + ///< Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id, + ///< Initial offset of threadblock + TensorCoord const& threadblock_offset, + /// Gather indices + int const* indices = nullptr) + : iterator_(params.params_, + pointer, + layout::PitchLinearCoord(extent.column(), extent.row()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row()), + indices) + { + } + + /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock + /// offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast( + Params const& params, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileAccessIteratorResidualLast(params, + pointer, + extent, + thread_id, + make_Coord(0, 0)) + { + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + CUTLASS_HOST_DEVICE + void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) + { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const& tile_offset) + { + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType* get() const { return reinterpret_cast(iterator_.get()); } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast& operator++() + { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast operator++(int) + { + PredicatedTileAccessIteratorResidualLast self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const& mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask& mask) { iterator_.get_mask(mask); } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { return iterator_.valid(); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIteratorResidualLast for affine rank 2 +/// data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileAccessIteratorResidualLast, + AdvanceRank, + ThreadMap_, + AccessType_, + false> { +public: + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::AffineRankN<2>; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element*; + using NonConstPointer = typename platform::remove_const::type*; + + using UnderlyingPredicates = PredicatedTileAccessIteratorPredicates; + + static int const kAccessesPerVector = ThreadMap::kElementsPerAccess / AccessType::kElements; + + static_assert(!(ThreadMap::kElementsPerAccess % AccessType::kElements), + "Vectors implied by the thread map must be divisible by the access type."); + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingPredicates::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + public: + friend PredicatedTileAccessIteratorResidualLast; + + private: + /// stride of pitch-linear layout (units of Element) + Coord stride_; + /// amount (in byte) to increment pointer to move to next access along + /// contiguous dimension + LongIndex inc_contiguous_; + /// amount (in byte) to increment pointer from first access of current + /// contiguous dimension to first access of next one. + LongIndex inc_strided_; + /// amount (in byte) to increment pointer from last access of current + /// contiguous dimension to first access of next one. + LongIndex inc_next_strided_; + /// amount (in byte) to increment pointer from last access to first access + /// of next tile + LongIndex inc_next_; + /// amount (in byte) to increment pointer from first access of current tile + /// to first access of next tile + LongIndex inc_advance_; + + public: + // Default ctor + CUTLASS_HOST_DEVICE + Params() : stride_(0), inc_contiguous_(0), inc_strided_(0), inc_next_(0), inc_advance_(0) {} + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const& layout) : stride_({layout.stride(0), layout.stride(1)}) + { + inc_contiguous_ = (LongIndex(stride_[0]) * ThreadMap::Delta::kContiguous) * + sizeof_bits::value / 8; + + inc_strided_ = (LongIndex(stride_[1]) * ThreadMap::Delta::kStrided) * + sizeof_bits::value / 8; + + inc_next_strided_ = + inc_strided_ - LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_; + + if (kAdvanceRank) { + // advance along strided dimension + inc_advance_ = + Shape::kStrided * LongIndex(stride_[1]) * sizeof_bits::value / 8; + } else { + // advance along contiguous dimension + inc_advance_ = Shape::kContiguous * stride_[0] * sizeof_bits::value / 8; + } + + inc_next_ = inc_advance_ - + LongIndex(ThreadMap::Iterations::kContiguous - 1) * inc_contiguous_ - + LongIndex(ThreadMap::Iterations::kStrided - 1) * inc_strided_; + }; + }; + +private: + /// Internal pointer type permits fast address arithmetic + using BytePointer = char*; + + // + // Data members + // + + /// Parameters object with precomputed internal state + Params params_; + + /// Internal pointer to first access of tile + BytePointer pointer_; + + UnderlyingPredicates the_predicates; + Mask residual_tile_mask; + +private: + /// Computes predicates based on internally tracked per-thread offset. + CUTLASS_DEVICE + void compute_predicates_( + /// Extent of the matrix window + TensorCoord extent, + /// optionally, simplify predicate calculation during 'steady state' phase + bool is_steady_state = false) + { + the_predicates.compute_predicates_(extent, is_steady_state); + } + +public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast( + ///< Precomputed parameters object + Params const& params, + ///< Pointer to start of tensor + Pointer pointer, + ///< Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id, + ///< Initial offset of threadblock + TensorCoord const& threadblock_offset, + int const* indices = nullptr ///< gather/scatter indices, note no support for + ///< gather/scatter at this specialization + ) + : params_(params), + pointer_(reinterpret_cast(const_cast(pointer))), + the_predicates(extent) + { + the_predicates.set_predicates(thread_id, threadblock_offset); + + // update internal pointers + Layout layout(params_.stride_); + add_pointer_offset(layout(the_predicates.thread_offset_)); + } + + /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock + /// offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast( + Params const& params, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileAccessIteratorResidualLast(params, + pointer, + extent, + thread_id, + make_Coord(0, 0)) + { + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { the_predicates.set_iteration_index(index); } + + CUTLASS_HOST_DEVICE + void set_residual_tile(bool is_residual_tile) + { + if (is_residual_tile) { the_predicates.set_mask(residual_tile_mask); } + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) + { + pointer_ += sizeof_bits::value * pointer_offset / 8; + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const& tile_offset) + { + if (kAdvanceRank) { + pointer_ += params_.inc_advance_ * LongIndex(tile_offset[1]); + pointer_ += Shape::kContiguous * tile_offset[0]; + } else { + pointer_ += params_.inc_advance_ * LongIndex(tile_offset[0]); + pointer_ += Shape::kStrided * tile_offset[1]; + } + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType* get() const + { + return reinterpret_cast(pointer_) + the_predicates.iteration_vector_; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast& operator++() + { + the_predicates.operator++(); + ++the_predicates.iteration_vector_; + if (the_predicates.iteration_vector_ < kAccessesPerVector) { return *this; } + + the_predicates.iteration_vector_ = 0; + ++the_predicates.iteration_contiguous_; + + if (the_predicates.iteration_contiguous_ < ThreadMap::Iterations::kContiguous) { + pointer_ += params_.inc_contiguous_; + return *this; + } + + // Enter here only if (iteration_contiguous_ == + // ThreadMap::Iteration::kContiguous) + the_predicates.iteration_contiguous_ = 0; + ++the_predicates.iteration_strided_; + + if (the_predicates.iteration_strided_ < ThreadMap::Iterations::kStrided) { + pointer_ += params_.inc_next_strided_; + return *this; + } + + // Enter here only if (iteration_stride_ == ThreadMap::Iteration::kStrided) + // which means we enter the next tile. + the_predicates.iteration_strided_ = 0; + + // advance to next tile + pointer_ += params_.inc_next_; + + // now return to start tile - if the iterator is subsequently advanced, this + // subtraction as well as the subsequent integer addition are both elided by + // the compiler. + pointer_ -= params_.inc_advance_; + + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast operator++(int) + { + PredicatedTileAccessIteratorResidualLast self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { the_predicates.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { the_predicates.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const& mask) { the_predicates.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask& mask) { the_predicates.get_mask(mask); } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { return the_predicates.valid(); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIteratorResidualLast for affine rank 2 +/// column-major data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileAccessIteratorResidualLast { +public: + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::AffineRank2ColumnMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element*; + using NonConstPointer = typename platform::remove_const::type*; + + // Map to the underlying AffineRankN<2> layout + using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast< + layout::PitchLinearShape, + Element, + layout::AffineRankN<2>, + (kAdvanceRank == 0 ? 0 : 1), + ThreadMap, + AccessType>; + + static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileAccessIteratorResidualLast; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + /// Default ctor + CUTLASS_HOST_DEVICE + Params() {} + + /// Construct the Params object given an AffineRankN<2> tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const& layout) + : params_(layout::AffineRankN<2>(layout.stride(0), layout.stride(1))){}; + }; + +private: + // + // Data members + // + + /// Underlying AffineRankN<2> tile iterator + UnderlyingIterator iterator_; + +public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast( + ///< Precomputed parameters object + Params const& params, + ///< Pointer to start of tensor + Pointer pointer, + ///< Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id, + ///< Initial offset of threadblock + TensorCoord const& threadblock_offset, + int const* indices = nullptr ///< gather/scatter indices, note no support for + ///< gather/scatter at this specialization + ) + : iterator_(params.params_, + pointer, + layout::PitchLinearCoord(extent.row(), extent.column()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column())) + { + } + + /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock + /// offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast( + Params const& params, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileAccessIteratorResidualLast(params, + pointer, + extent, + thread_id, + make_Coord(0, 0)) + { + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + CUTLASS_HOST_DEVICE + void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) + { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const& tile_offset) + { + iterator_.add_tile_offset(make_Coord(tile_offset.row(), tile_offset.column())); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType* get() const { return reinterpret_cast(iterator_.get()); } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast& operator++() + { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast operator++(int) + { + PredicatedTileAccessIteratorResidualLast self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const& mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask& mask) { iterator_.get_mask(mask); } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { return iterator_.valid(); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIteratorResidualLast for affine rank-2 +/// row-major data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileAccessIteratorResidualLast { +public: + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::AffineRank2RowMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element*; + using NonConstPointer = typename platform::remove_const::type*; + + // Map to the underlying AffineRankN<2> layout + using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast< + layout::PitchLinearShape, + Element, + layout::AffineRankN<2>, + (kAdvanceRank == 0 ? 1 : 0), + ThreadMap, + AccessType>; + + static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileAccessIteratorResidualLast; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + /// Default ctor + CUTLASS_HOST_DEVICE + Params() {} + + /// Construct the Params object given an AffineRankN<2> tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const& layout) + : params_(layout::AffineRankN<2>(layout.stride(1), layout.stride(0))){}; + }; + +private: + // + // Data members + // + + /// Underlying AffineRankN<2> tile iterator + UnderlyingIterator iterator_; + +public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast( + ///< Precomputed parameters object + Params const& params, + ///< Pointer to start of tensor + Pointer pointer, + ///< Extent of tensor + TensorCoord extent, + ///< ID of each participating thread + int thread_id, + ///< Initial offset of threadblock + TensorCoord const& threadblock_offset, + int const* indices = nullptr ///< gather/scatter indices, note no support for + ///< gather/scatter at this specialization + ) + : iterator_(params.params_, + pointer, + layout::PitchLinearCoord(extent.column(), extent.row()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())) + { + } + + /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock + /// offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast( + Params const& params, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileAccessIteratorResidualLast(params, + pointer, + extent, + thread_id, + make_Coord(0, 0)) + { + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + CUTLASS_HOST_DEVICE + void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) + { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const& tile_offset) + { + iterator_.add_tile_offset(make_Coord(tile_offset.column(), tile_offset.row())); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType* get() const { return reinterpret_cast(iterator_.get()); } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast& operator++() + { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast operator++(int) + { + PredicatedTileAccessIteratorResidualLast self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const& mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask& mask) { iterator_.get_mask(mask); } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { return iterator_.valid(); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIteratorResidualLast for column-major +/// interleaved data. It is mapped to the congruous layout. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// + +template +class PredicatedTileAccessIteratorResidualLast, + AdvanceRank, + ThreadMap_, + AccessType_, + false> { +public: + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + static int const kInterleavedK = InterleavedK; + using Layout = layout::ColumnMajorInterleaved; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element*; + using NonConstPointer = typename platform::remove_const::type*; + + using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast< + layout::PitchLinearShape, + Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 0 : 1), + ThreadMap, + AccessType>; + + static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileAccessIteratorResidualLast; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + CUTLASS_HOST_DEVICE + Params() {} + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {} + + CUTLASS_HOST_DEVICE + Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {} + }; + +private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + +public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast( + /// Precomputed parameters object + Params const& params, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const& threadblock_offset, + int const* indices = nullptr ///< gather/scatter indices, note no support for + ///< gather/scatter at this specialization + ) + : iterator_(params.params_, + pointer, + layout::PitchLinearCoord(extent.row() * kInterleavedK, + extent.column() / kInterleavedK), + thread_id, + layout::PitchLinearCoord(threadblock_offset.row() * kInterleavedK, + threadblock_offset.column() / kInterleavedK)) + { + } + + /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock + /// offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast( + Params const& params, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileAccessIteratorResidualLast(params, + pointer, + extent, + thread_id, + make_Coord(0, 0)) + { + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + CUTLASS_HOST_DEVICE + void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) + { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const& tile_offset) + { + iterator_.add_tile_offset({tile_offset.row(), tile_offset.column()}); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType* get() const { return reinterpret_cast(iterator_.get()); } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast& operator++() + { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast operator++(int) + { + PredicatedTileAccessIteratorResidualLast self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const& mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask& mask) { iterator_.get_mask(mask); } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { return iterator_.valid(); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileAccessIteratorResidualLast for row-major +/// interleaved data. +// It is mapped to the congruous layout. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileAccessIteratorResidualLast, + AdvanceRank, + ThreadMap_, + AccessType_, + false> { +public: + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + static int const kInterleavedK = InterleavedK; + using Layout = layout::RowMajorInterleaved; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + using AccessType = AccessType_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element*; + using NonConstPointer = typename platform::remove_const::type*; + + using UnderlyingIterator = PredicatedTileAccessIteratorResidualLast< + layout::PitchLinearShape, + Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 1 : 0), + ThreadMap, + AccessType>; + + static int const kAccessesPerVector = UnderlyingIterator::kAccessesPerVector; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileAccessIteratorResidualLast; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + CUTLASS_HOST_DEVICE + Params() {} + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {} + + CUTLASS_HOST_DEVICE + Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {} + }; + +private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + +public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast( + /// Precomputed parameters object + Params const& params, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const& threadblock_offset, + int const* indices = nullptr ///< gather/scatter indices, note no support for + ///< gather/scatter at this specialization + ) + : iterator_(params.params_, + pointer, + layout::PitchLinearCoord(extent.column() * kInterleavedK, + extent.row() / kInterleavedK), + thread_id, + layout::PitchLinearCoord(threadblock_offset.column() * kInterleavedK, + threadblock_offset.row() / kInterleavedK)) + { + } + + /// Construct a PredicatedTileAccessIteratorResidualLast with zero threadblock + /// offset + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast( + Params const& params, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileAccessIteratorResidualLast(params, + pointer, + extent, + thread_id, + make_Coord(0, 0)) + { + } + + /// Overrides the internal iteration index + CUTLASS_HOST_DEVICE + void set_iteration_index(int index) { iterator_.set_iteration_index(index); } + + CUTLASS_HOST_DEVICE + void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) + { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + void add_tile_offset(TensorCoord const& tile_offset) + { + iterator_.add_tile_offset({tile_offset.column(), tile_offset.row()}); + } + + /// Returns a pointer + CUTLASS_HOST_DEVICE + AccessType* get() const { return reinterpret_cast(iterator_.get()); } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast& operator++() + { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileAccessIteratorResidualLast operator++(int) + { + PredicatedTileAccessIteratorResidualLast self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const& mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask& mask) { iterator_.get_mask(mask); } + + /// Returns whether access is valid or not + CUTLASS_HOST_DEVICE + bool valid() { return iterator_.valid(); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_iterator_atomic.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_iterator_atomic.h new file mode 100644 index 0000000000000000000000000000000000000000..8d4173f1a6a2e5a073d7da4c129f471d4f394632 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_iterator_atomic.h @@ -0,0 +1,886 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once +#include +#include +#include +namespace cutlass { +namespace epilogue { +namespace threadblock { + +template +struct atomic_store {}; + +template +struct atomic_store::value>::type> { + using Element = typename AccessType::Element; + static const int kCount = AccessType::kElements; + + CUTLASS_DEVICE + atomic_store(AccessType const& D, void* ptr, bool pred_guard) + { + static_assert(!(kCount % 2), "kCount must be even"); + half2* p = reinterpret_cast(ptr); + uint const* data = reinterpret_cast(&D); + asm volatile( + "{\n" + " .reg .pred p;\n" + " setp.ne.b32 p, %0, 0;\n" + : + : "r"((int)pred_guard)); + for (int i = 0; i < kCount / 2; i++) { + asm volatile(" @p red.relaxed.global.add.noftz.f16x2 [%0], %1;\n" + : + : "l"(p + i), "r"(data[i])); + } + asm volatile("}\n" ::); + } +}; + +template +struct atomic_store::value>::type> { + using Element = typename AccessType::Element; + static const int kCount = AccessType::kElements; + + CUTLASS_DEVICE + atomic_store(AccessType const& D, void* ptr, bool pred_guard) + { + Element* p = reinterpret_cast(ptr); + uint const* data = reinterpret_cast(&D); + asm volatile( + "{\n" + " .reg .pred p;\n" + " setp.ne.b32 p, %0, 0;\n" + : + : "r"((int)pred_guard)); + for (int i = 0; i < kCount; i++) { + asm volatile(" @p red.relaxed.global.add.f32 [%0], %1;\n" + : + : "l"(p + i), "r"(data[i])); + } + asm volatile("}\n" ::); + } +}; + +template +class PredicatedTileIteratorAffineRankNAtomic { +public: + using ThreadMap = ThreadMap_; + using Shape = typename ThreadMap::Shape; + + using Element = Element_; + + using Layout = layout::AffineRankN; + using TensorRef = TensorRef; + using TensorView = TensorView; + using ConstTensorRef = typename TensorRef::ConstTensorRef; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + using TensorCoord = typename Layout::TensorCoord; + + static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; + static int const kThreads = ThreadMap::kThreads; + static int const kIterations = ThreadMap::Count::kTile; + + static_assert(ThreadMap::Iterations::kRow > 0, "ThreadMap::Iterations::kRow must be > 0"); + static_assert(ThreadMap::Iterations::kGroup > 0, "ThreadMap::Iterations::kGroup must be > 0"); + static_assert(ThreadMap::Iterations::kCluster > 0, + "ThreadMap::Iterations::kCluster must be > 0"); + static_assert(ThreadMap::Iterations::kColumn > 0, "ThreadMap::Iterations::kColumn must be > 0"); + static_assert(!(Layout::kRank % 2), + "Layout rank must be even. This assumes the first half of the " + "modes correspond to the 'row' " + "and the second half of the modes correspond to the 'column'"); + + static bool const kBigEndian = false; + + /// Fragment object + using Fragment = Array; + + /// Memory access size + using AccessType = AlignedArray; + + // + // Parameters struct + // + + /// Parameters structure + struct Params { + // + // Data members + // + + Layout layout; + + /// Stride in units of bytes along M modes + Coord stride_m; + + /// Stride in units of bytes along N modes + Coord stride_n; + + /// Fast divmod objects divided by tensor extents + FastDivmod divmod_m[(Layout::kRank == 2) ? 1 : (Layout::kRank / 2 - 1)]; + + /// Fast divmod objects divided by tensor extents + FastDivmod divmod_n[(Layout::kRank == 2) ? 1 : (Layout::kRank / 2 - 1)]; + + int64_t rank2_inc_col; + int64_t rank2_inc_row; + + // + // Methods + // + CUTLASS_HOST_DEVICE + Params() {} + + CUTLASS_HOST_DEVICE + Params(TensorCoord const& extent, Layout const& layout_) : layout(layout_) + { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Layout::kRank / 2; ++i) { + stride_m[i] = OffsetBytes(layout_.stride()[i]); + stride_n[i] = OffsetBytes(layout_.stride()[i + Layout::kRank / 2]); + } + + if (kBigEndian) { + // "Big Endian" scheme + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Layout::kRank / 2 - 1; ++i) { + divmod_m[i] = FastDivmod(extent[i + 1]); + divmod_n[i] = FastDivmod(extent[i + Layout::kRank / 2 + 1]); + } + } else { + // "Little Endian" scheme + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Layout::kRank / 2 - 1; ++i) { + divmod_m[i] = FastDivmod(extent[i]); + divmod_n[i] = FastDivmod(extent[i + Layout::kRank / 2]); + } + } + } + + CUTLASS_HOST_DEVICE + Params(Layout const& layout_) : layout(layout_) + { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < Layout::kRank / 2; ++i) { + stride_m[i] = OffsetBytes(layout_.stride()[i]); + stride_n[i] = OffsetBytes(layout_.stride()[i + Layout::kRank / 2]); + } + + rank2_inc_col = ThreadMap::Delta::kColumn * stride_n[0]; + rank2_inc_row = ThreadMap::Delta::kRow * stride_m[0]; + } + }; + + /// Mask object + struct Mask { + static int const kCount = ThreadMap::Iterations::kColumn; + + /// Predicate state + bool predicates[kCount]; + + // + // Mask + // + CUTLASS_HOST_DEVICE + Mask() { enable(); } + + ///< Efficiently disables all accesses guarded by mask + CUTLASS_HOST_DEVICE void clear() + { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kCount; ++i) { predicates[i] = false; } + } + + ///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask + CUTLASS_DEVICE void enable() + { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kCount; ++i) { predicates[i] = true; } + } + }; + +private: + // + // Data members + // + + /// Parameters structure containing reference and precomputed state. + Params params_; + + /// Byte-level pointer + uint8_t* byte_pointer_; + + /// Array of boolean values to contain steady-state predicates + Mask mask_; + + /// Extent of the matrix tile in rows + Index extent_row_; + + /// Extent of the matrix tile in columns + Index extent_col_; + + /// A thread's starting row position (assuming steady-state predicates have + /// been computed) + Index thread_start_row_; + + /// A thread's starting column position (assuming steady-state predicates have + /// been computed) + Index thread_start_column_; + + /// Internal state counter + int state_[3]; + + /// Offsets in columns, cached for performance + int64_t offset_modes_n_[ThreadMap::Iterations::kColumn]; + + // + // Static asserts about internal strides + // + + static_assert(sizeof(extent_row_) == 4, "Expected 32b extents"); + static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents"); + +private: + // + // Methods + // + +public: + // + // Methods + // + + /// Constructor + CUTLASS_DEVICE + PredicatedTileIteratorAffineRankNAtomic( + Params const& params, + Element* pointer, + MatrixCoord extent, + int thread_idx, + MatrixCoord threadblock_offset = MatrixCoord(), + int const* indices = nullptr ///< gather/scatter indices, note no support for + ///< gather/scatter at this specialization + ) + : params_(params) + { + MatrixCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset; + + extent_row_ = extent.row(); + extent_col_ = extent.column(); + + thread_start_row_ = thread_offset.row(); + thread_start_column_ = thread_offset.column(); + + if (Layout::kRank > 2) { + // Initialize predicates + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) { + // + // Compute coordinate and decompose into N modes + // + + int coord_n = thread_start_column_ + c * ThreadMap::Delta::kColumn; + + mask_.predicates[c] = coord_n < extent.column(); + + Coord modes_n; + + int64_t offset_modes_n = 0; + + if (kBigEndian) { + modes_n = CoordinateDecomposition(coord_n, params_.divmod_n); + + offset_modes_n = dot(modes_n, params_.stride_n); + } else { + modes_n = CoordinateDecompositionLittleEndian( + coord_n, params_.divmod_n); + + offset_modes_n = dot(modes_n, params_.stride_n); + } + + offset_modes_n_[c] = offset_modes_n; + } + + if (!pointer) { mask_.clear(); } + } + + // Initialize pointer + byte_pointer_ = reinterpret_cast(pointer); + + // Initialize internal state counter + state_[0] = state_[1] = state_[2] = 0; + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) + { + byte_pointer_ += pointer_offset * sizeof_bits::value / 8; + } + + /// Stores a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const& frag, int64_t byte_offset) + { + uint8_t* byte_pointer = byte_pointer_; + AccessType const* frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { + CUTLASS_PRAGMA_UNROLL + for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { + int row_begin = thread_start_row_ + group * ThreadMap::Delta::kGroup + + cluster * ThreadMap::Delta::kCluster; + int64_t offset_modes_m = row_begin * params_.stride_m[0]; + + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { + int frag_row_idx = + (row + ThreadMap::Iterations::kRow * + (group + ThreadMap::Iterations::kGroup * cluster)); + + // + // Compute coordinate and decompose into M modes + // + + int coord_m = row * ThreadMap::Delta::kRow + row_begin; + + Coord modes_m; + + if (Layout::kRank > 2) { + if (kBigEndian) { + modes_m = CoordinateDecomposition(coord_m, + params_.divmod_m); + } else { + modes_m = CoordinateDecompositionLittleEndian( + coord_m, params_.divmod_m); + } + + offset_modes_m = dot(modes_m, params_.stride_m); + } + + // + // Compute the offset due to modes M + // + + bool row_guard = (coord_m < extent_row_); + int64_t offset_modes_n = thread_start_column_ * params_.stride_n[0]; + + CUTLASS_PRAGMA_UNROLL + for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { + // + // Compute coordinate and decompose into N modes + // + + if (Layout::kRank > 2) { offset_modes_n = offset_modes_n_[column]; } + + // + // Compute the pointer and access + // + bool guard; + if (Layout::kRank > 2) { + guard = row_guard && mask_.predicates[column]; + } else { + guard = (coord_m < extent_row_) && + ((thread_start_column_ + ThreadMap::Delta::kColumn * column) < + extent_col_); + } + + atomic_store( + frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], + (void*)(byte_pointer + offset_modes_m + offset_modes_n + byte_offset), + guard); + + if (Layout::kRank == 2) { offset_modes_n += params_.rank2_inc_col; } + } + + if (Layout::kRank == 2) { offset_modes_m += params_.rank2_inc_row; } + } + } + } + } + + /// Stores a fragment to memory + CUTLASS_DEVICE + void store(Fragment const& frag) { store_with_byte_offset(frag, 0); } + + CUTLASS_DEVICE + void load(Fragment& frag) {} + + /// Advances to the next position to load or store + CUTLASS_HOST_DEVICE + PredicatedTileIteratorAffineRankNAtomic& operator++() + { + ++state_[0]; + thread_start_row_ += ThreadMap::Shape::kRow; + + if (state_[0] == ThreadMap::Count::kRow) { + state_[0] = 0; + ++state_[1]; + + thread_start_row_ += + (ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow; + + if (state_[1] == ThreadMap::Count::kGroup) { + state_[1] = 0; + ++state_[2]; + + thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup * + ThreadMap::Count::kRow * ThreadMap::Shape::kRow; + + if (state_[2] == ThreadMap::Count::kCluster) { state_[2] = 0; } + } + } + + return *this; + } + + ///< Efficiently disables all accesses guarded by mask + CUTLASS_DEVICE void clear_mask() { mask_.clear(); } + + ///< Efficiently enables all accesses guarded by mask + CUTLASS_DEVICE void enable_mask() { mask_.enable(); } + + ///< Sets the mask + CUTLASS_DEVICE void get_mask(Mask& mask) { mask = mask_; } + + ///< Sets the mask + CUTLASS_DEVICE void set_mask(Mask const& mask) { mask_ = mask; } +}; + +template +class PredicatedTileIteratorAtomic { +public: + using ThreadMap = ThreadMap_; + using Shape = typename ThreadMap::Shape; + + using Element = Element_; + + using Layout = layout::RowMajor; + using TensorRef = TensorRef; + using ConstTensorRef = typename TensorRef::ConstTensorRef; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + using TensorCoord = MatrixCoord; + + static int const kElementsPerAccess = ThreadMap::kElementsPerAccess; + static int const kThreads = ThreadMap::kThreads; + static int const kIterations = ThreadMap::Count::kTile; + + static bool constexpr PermuteD = !layout::is_trivial_permute; + + static_assert(ThreadMap::Iterations::kRow > 0, "ThreadMap::Iterations::kRow must be > 0"); + static_assert(ThreadMap::Iterations::kGroup > 0, "ThreadMap::Iterations::kGroup must be > 0"); + static_assert(ThreadMap::Iterations::kCluster > 0, + "ThreadMap::Iterations::kCluster must be > 0"); + static_assert(ThreadMap::Iterations::kColumn > 0, "ThreadMap::Iterations::kColumn must be > 0"); + + /// Fragment object + using Fragment = Array; + + /// Memory access size + using AccessType = AlignedArray; + + // + // Parameters struct + // + + /// Uses a non-template class + struct Params : PredicatedTileIteratorParams { + using Base = PredicatedTileIteratorParams; + + CUTLASS_HOST_DEVICE + Params() {} + + CUTLASS_HOST_DEVICE + Params(Layout const& layout) + : PredicatedTileIteratorParams( + layout.stride(0) * int(sizeof(AccessType)) / kElementsPerAccess, + make_OutputTileThreadMapDesc()) + { + } + + CUTLASS_HOST_DEVICE + Params(Base const& base) : Base(base) {} + }; + + /// Mask object + struct Mask { + static int const kCount = ThreadMap::Iterations::kColumn; + + /// Predicate state + bool predicates[kCount]; + + // + // Mask + // + CUTLASS_HOST_DEVICE + Mask() { enable(); } + + ///< Efficiently disables all accesses guarded by mask + CUTLASS_HOST_DEVICE void clear() + { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kCount; ++i) { predicates[i] = false; } + } + + ///< CUTLASS_HOST_DEVICE enables all accesses guarded by mask + CUTLASS_DEVICE void enable() + { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kCount; ++i) { predicates[i] = true; } + } + }; + +private: + // + // Data members + // + + /// Parameters structure containing reference and precomputed state. + PredicatedTileIteratorParams params_; + + /// Byte-level pointer. This pointer is usually for both load() and store(), + /// unless PermuteD is performed. When having PermuteD, byte_pointer_ is only + /// for load(). + uint8_t* byte_pointer_; + + /// Byte-level pointer for store(). Due to PermuteD Op, store_byte_pointer_ + /// may be with different address computation compared to byte_pointer_. + uint8_t* store_byte_pointer_; + + /// Array of boolean values to contain steady-state predicates + Mask mask_; + + /// Extent of the matrix tile in rows + Index extent_row_; + + /// Extent of the matrix tile in rows + Index extent_column_; + + /// A thread's starting row position (assuming steady-state predicates have + /// been computed) + Index thread_start_row_; + + /// A thread's starting column + Index thread_start_column_; + + /// Internal state counter + int state_[3]; + + /// Scatter indices + int const* indices_; + + /// PermuteDLayout + PermuteDLayout permute_layout_; + + // + // Static asserts about internal strides + // + + static_assert(sizeof(extent_row_) == 4, "Expected 32b extents"); + static_assert(sizeof(thread_start_row_) == 4, "Expected 32b extents"); + static_assert(sizeof(PredicatedTileIteratorParams::stride) == 8, "Expected 64b strides"); + +private: + // + // Methods + // + +public: + // + // Methods + // + + /// Constructor + CUTLASS_DEVICE + PredicatedTileIteratorAtomic(PredicatedTileIteratorParams const& params, + Element* pointer, + TensorCoord extent, + int thread_idx, + TensorCoord threadblock_offset = TensorCoord(), + int const* indices = nullptr) + : params_(params), + indices_(indices), + permute_layout_(PitchLinearCoord(extent.column(), extent.row()), + params_.stride * kElementsPerAccess / sizeof(AccessType)) + { + TensorCoord thread_offset = ThreadMap::initial_offset(thread_idx) + threadblock_offset; + + extent_row_ = extent.row(); + extent_column_ = extent.column(); + + thread_start_row_ = thread_offset.row(); + thread_start_column_ = thread_offset.column(); + + // Initialize predicates + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kColumn; ++c) { + mask_.predicates[c] = + ((thread_offset.column() + ThreadMap::Delta::kColumn * c) < extent.column()); + } + + // Null pointer performs no accesses + if (!pointer) { mask_.clear(); } + + if (ScatterD && !indices) { mask_.clear(); } + + // Initialize byte_pointer_ + byte_pointer_ = reinterpret_cast(pointer) + + LongIndex(thread_offset.row()) * LongIndex(params_.stride) + + LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess; + + if (ScatterD) { + byte_pointer_ = + reinterpret_cast(pointer) + + LongIndex(thread_offset.column()) * sizeof(AccessType) / kElementsPerAccess; + } + + // store_byte_pointer_ is set to be the same with byte_pointer_ unless + // PermuteD is used. + store_byte_pointer_ = PermuteD ? reinterpret_cast(pointer) : byte_pointer_; + + // Initialize internal state counter + state_[0] = state_[1] = state_[2] = 0; + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) + { + store_byte_pointer_ += pointer_offset * sizeof_bits::value / 8; + byte_pointer_ += pointer_offset * sizeof_bits::value / 8; + } + + /// Stores a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const& frag, int64_t byte_offset) const + { + uint8_t* byte_pointer = store_byte_pointer_; + AccessType const* frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int cluster = 0; cluster < ThreadMap::Iterations::kCluster; ++cluster) { + CUTLASS_PRAGMA_UNROLL + for (int group = 0; group < ThreadMap::Iterations::kGroup; ++group) { + CUTLASS_PRAGMA_UNROLL + for (int row = 0; row < ThreadMap::Iterations::kRow; ++row) { + int frag_row_idx = + (row + ThreadMap::Iterations::kRow * + (group + ThreadMap::Iterations::kGroup * cluster)); + + int row_offset = row * ThreadMap::Delta::kRow + + group * ThreadMap::Delta::kGroup + + cluster * ThreadMap::Delta::kCluster; + + bool row_guard = ((row_offset + thread_start_row_) < extent_row_); + + AccessType* memory_pointer = + reinterpret_cast(byte_pointer + byte_offset); + + if (ScatterD && row_guard) { + assert(indices_); + + memory_pointer = reinterpret_cast( + byte_pointer + byte_offset + + LongIndex(indices_[row_offset + thread_start_row_]) * + LongIndex(params_.stride)); + } + + CUTLASS_PRAGMA_UNROLL + for (int column = 0; column < ThreadMap::Iterations::kColumn; ++column) { + bool guard = row_guard && mask_.predicates[column]; + + if (PermuteD) { + int col_offset = column * ThreadMap::Delta::kColumn; + + int col = col_offset + thread_start_column_; + int row = row_offset + thread_start_row_; + + // Locate memory_pointer + memory_pointer = reinterpret_cast( + byte_pointer + byte_offset + + permute_layout_(PitchLinearCoord(col, row)) * sizeof(AccessType) / + kElementsPerAccess); + } + atomic_store( + frag_ptr[frag_row_idx * ThreadMap::Iterations::kColumn + column], + (void*)&memory_pointer[0], + guard); + + if (!PermuteD) { + memory_pointer += (ThreadMap::Delta::kColumn / kElementsPerAccess); + } + } + + if (row + 1 < ThreadMap::Iterations::kRow) { + if (!ScatterD && !PermuteD) { byte_pointer += params_.increment_row; } + } + } + + if (group + 1 < ThreadMap::Iterations::kGroup) { + byte_pointer += params_.increment_group; + } + } + + if (cluster + 1 < ThreadMap::Iterations::kCluster) { + byte_pointer += params_.increment_cluster; + } + } + } + + /// Stores a fragment to memory + CUTLASS_DEVICE + void store(Fragment const& frag) const { store_with_byte_offset(frag, 0); } + + CUTLASS_DEVICE + void load(Fragment& frag) {} + + CUTLASS_DEVICE + MatrixCoord thread_start() const + { + return MatrixCoord(thread_start_row_, thread_start_column_); + } + + /// Need to get the thread start row from the tile iterator + CUTLASS_DEVICE + int32_t thread_start_row() const { return thread_start_row_; } + + /// Need to get the thread start row from the tile iterator + CUTLASS_DEVICE + int32_t thread_start_column() const { return thread_start_column_; } + + /// Extent of the matrix in rows + CUTLASS_DEVICE + Index extent_row() const { return extent_row_; } + + /// Extent of the matrix in columns + CUTLASS_DEVICE + Index extent_column() const { return extent_column_; } + + /// Advances to the next position to load or store + CUTLASS_HOST_DEVICE + PredicatedTileIteratorAtomic& operator++() + { + ++state_[0]; + + if (!ScatterD && !PermuteD) { store_byte_pointer_ += params_.advance_row; } + + if (!ScatterD) { byte_pointer_ += params_.advance_row; } + + thread_start_row_ += ThreadMap::Shape::kRow; + + if (state_[0] == ThreadMap::Count::kRow) { + state_[0] = 0; + ++state_[1]; + byte_pointer_ += params_.advance_group; + store_byte_pointer_ += params_.advance_group; + + thread_start_row_ += + (ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * ThreadMap::Count::kRow; + + if (state_[1] == ThreadMap::Count::kGroup) { + state_[1] = 0; + ++state_[2]; + byte_pointer_ += params_.advance_cluster; + store_byte_pointer_ += params_.advance_cluster; + + thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup * + ThreadMap::Count::kRow * ThreadMap::Shape::kRow; + + if (state_[2] == ThreadMap::Count::kCluster) { + state_[2] = 0; + byte_pointer_ += params_.advance_tile; + store_byte_pointer_ += params_.advance_tile; + + thread_start_row_ += ThreadMap::Shape::kGroup * ThreadMap::Shape::kRow * + ThreadMap::Shape::kCluster * ThreadMap::Shape::kTile; + } + } + } + + return *this; + } + + /// Advances a number of positions to load or store + CUTLASS_HOST_DEVICE + PredicatedTileIteratorAtomic& operator+=(int increment) + { + // Row + state_[0] += increment; + int increment_row = state_[0] / ThreadMap::Count::kRow; + state_[0] = state_[0] % ThreadMap::Count::kRow; + + byte_pointer_ += (params_.advance_row * increment); + store_byte_pointer_ += (params_.advance_row * increment); + thread_start_row_ += (ThreadMap::Shape::kRow * increment); + + // Group + state_[1] += increment_row; + int increment_group = state_[1] / ThreadMap::Count::kGroup; + state_[1] = state_[1] % ThreadMap::Count::kGroup; + + byte_pointer_ += (params_.advance_group * increment_row); + store_byte_pointer_ += (params_.advance_group * increment_row); + thread_start_row_ += (ThreadMap::Shape::kGroup - 1) * ThreadMap::Shape::kRow * + ThreadMap::Count::kRow * increment_row; + + // Cluster + state_[2] += increment_group; + int increment_cluster = state_[2] / ThreadMap::Count::kCluster; + state_[2] = state_[2] % ThreadMap::Count::kCluster; + + byte_pointer_ += (params_.advance_cluster * increment_group); + store_byte_pointer_ += (params_.advance_cluster * increment_group); + thread_start_row_ += ThreadMap::Count::kGroup * ThreadMap::Shape::kGroup * + ThreadMap::Count::kRow * ThreadMap::Shape::kRow * increment_group; + + // Tile + byte_pointer_ += (params_.advance_tile * increment_cluster); + store_byte_pointer_ += (params_.advance_tile * increment_cluster); + thread_start_row_ += ThreadMap::Shape::kGroup * ThreadMap::Shape::kRow * + ThreadMap::Shape::kCluster * ThreadMap::Shape::kTile * + increment_cluster; + + return *this; + } + + ///< Efficiently disables all accesses guarded by mask + CUTLASS_DEVICE void clear_mask() { mask_.clear(); } + + ///< Efficiently enables all accesses guarded by mask + CUTLASS_DEVICE void enable_mask() { mask_.enable(); } + + ///< Sets the mask + CUTLASS_DEVICE void get_mask(Mask& mask) const { mask = mask_; } + + ///< Sets the mask + CUTLASS_DEVICE void set_mask(Mask const& mask) { mask_ = mask; } +}; + +} // namespace threadblock +} // namespace epilogue +} // namespace cutlass diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_iterator_residual_last.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_iterator_residual_last.h new file mode 100644 index 0000000000000000000000000000000000000000..629047dbb057384ac8f0d7fa5d557e0c070cd830 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/predicated_tile_iterator_residual_last.h @@ -0,0 +1,1938 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights + *reserved. SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + *this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + *LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + *CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + *INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + *CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + *ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + *POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/*! \file + \brief Templates implementing loading of tiles from pitch-linear rank=2 + tensors. + + This iterator uses masks to guard out-of-bounds accesses. The first tile + this iterator visits maybe partial, then the remaining tiles are complete. + So, we only need to compute the predicates twice, once before the first tile + and once for the remaining full tiles which can share the same predicates. + + A precomputed "Params" object minimizes the amount of state that must be + stored in registers, and integer addition is used to advance the pointer + through memory. +*/ + +#pragma once + +#include "cutlass/arch/memory.h" +#include "cutlass/transform/threadblock/predicated_tile_access_iterator.h" + +//////////////////////////////////////////////////////////////////////////////// + +namespace cutlass { +namespace transform { +namespace threadblock { + +//////////////////////////////////////////////////////////////////////////////// + +/// PredicatedTileIteratorResidualLast +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +/// Regular tile iterator using a precomputed control structure to minimize +/// register liveness and integer arithmetic. +/// +/// Layout is assumed to be invariant at the time the precomputed "Params" +/// object is constructed. +/// +/// Base pointer and tensor extents may be specified at the time the iterator is +/// constructed. Subsequently, they are assumed to be immutable. +/// +/// Adding a logical coordinate offset may be performed at the time the iterator +/// is constructed. Subsequent additions to logical coordinate offset may be +/// performed but are relatively expensive. +/// +/// Visitation order is intended to first visit a "residual" tile that may be +/// partially full in both the advance dimension and the steady-state dimension. +/// This is assumed to be the last tile in the iteration sequence. Advancing an +/// iterator that has just been constructed moves to the first tile that is full +/// in the advance dimension and recomputes predicates. Subsequent accesses may +/// be performed without updating internal predicates and are efficient in terms +/// of live register state and pointer arithmetic instructions. +/// +/// To be efficient, this assumes the iterator will be dereferenced and advanced +/// at least once outside any looping structure to minimize integer arithmetic. +/// +/// Accesses out of bounds are safe so long as `clear_mask()` is called prior to +/// dereferencing the iterator. +/// +/// +/// Example: +/// +/// An efficient pipeline structure may be constructed as follows: +/// +// template +// __global__ void kernel( +// typename Iterator::Params params, +// typename Iterator::Element *ptr, +// TensorCoord extent) { +// +// typename Iterator::Fragment fragment; +// +// TensorCoord threadblock_offset(0, 0); +// +// Iterator iter(params, ptr, extent, threadIdx.x, threadblock_offsets); +// +// +// fragment = *iter; // load "residue" tile first +// ++iter; // advance to first "steady state" tile and update +// internal masks +// +// +// #pragma unroll +// for (int i = Remaining - 1; i >= 0; --i) { +// +// f(fragment); +// +// if (!i) { +// iter.clear_mask(); // light-weight operation to clear masks - +// subsequent loads become NO-OPs. +// } +// +// fragment = *iter; // load tile during "steady state" phase +// ++iter; // advance to next tile - lightweight due to +// steady-state masks +// } +// } +// +// void host(TensorView view) { +// +// using Iterator = +// transform::threadblock::PredicatedTileIteratorResidualLast; +// +// typename Iterator::Params params(view.layout()); +// +// kernel(params, view.data()); +// } +/// +/// +template +class PredicatedTileIteratorResidualLast; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIteratorResidualLast for pitch-linear data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileIteratorResidualLast { +public: + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::PitchLinear; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element*; + using NonConstPointer = typename platform::remove_const::type*; + + /// Type used for internal memory accesses + using AccessType = + AlignedArray::value / 8)>; + + /// Underlying iterator to compute the addresses + using TileAccessIterator = PredicatedTileAccessIteratorResidualLast; + + static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector; + + /// Fragment object to be loaded or stored + using Fragment = + cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename TileAccessIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + public: + using Base = typename TileAccessIterator::Params::Base; + + friend PredicatedTileIteratorResidualLast; + + private: + /// Parameters object + typename TileAccessIterator::Params params_; + + public: + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const& layout) : params_(layout) {} + + CUTLASS_HOST_DEVICE + Params() {} + + CUTLASS_HOST_DEVICE + Params(Base const& base) : params_(base) {} + }; + +private: + /// Internal pointer type permits fast address arithmetic + using BytePointer = char*; + +private: + // + // Data members + // + + /// Data member to the tile access iterator + TileAccessIterator address_iterator_; + +public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast( + /// Precomputed parameters object + Params const& params, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const& threadblock_offset, + /// Gather indices + int const* indices = nullptr) + : address_iterator_(params.params_, pointer, extent, thread_id, threadblock_offset, indices) + { + } + + /// Construct a PredicatedTileIteratorResidualLast with zero threadblock + /// offset + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0)) + { + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) + { + address_iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast& operator++() + { + if (kAdvanceRank) + address_iterator_.add_tile_offset({0, 1}); + else + address_iterator_.add_tile_offset({1, 0}); + + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast operator++(int) + { + PredicatedTileIteratorResidualLast self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); } + + CUTLASS_HOST_DEVICE + void set_residual_tile(bool enable) { address_iterator_.set_residual_tile(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { address_iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const& mask) { address_iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask& mask) { address_iterator_.get_mask(mask); } + + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment& frag, Index pointer_offset) + { + load_with_byte_offset(frag, pointer_offset * sizeof_bits::value / 8); + } + + CUTLASS_DEVICE + void load_with_byte_offset(Fragment& frag, LongIndex byte_offset) + { + AccessType* frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < kAccessesPerVector; ++v) { + int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous); + + address_iterator_.set_iteration_index(idx); + char const* byte_ptr = + reinterpret_cast(address_iterator_.get()) + byte_offset; + + AccessType const* access_ptr = reinterpret_cast(byte_ptr); + + cutlass::arch::global_load( + frag_ptr[idx], access_ptr, address_iterator_.valid()); + + ++address_iterator_; + } + } + } + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment& frag) { load_with_byte_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const& frag, Index pointer_offset) + { + store_with_byte_offset(frag, pointer_offset * sizeof_bits::value / 8); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset) + { + address_iterator_.set_iteration_index(0); + AccessType const* frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < kAccessesPerVector; ++v) { + int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous); + + char* byte_ptr = reinterpret_cast(address_iterator_.get()) + byte_offset; + AccessType* access_ptr = reinterpret_cast(byte_ptr); + + if (address_iterator_.valid()) { *access_ptr = frag_ptr[idx]; } + ++address_iterator_; + } + } + } + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const& frag) { store_with_byte_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIteratorResidualLast for pitch-linear data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileIteratorResidualLast { +public: + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::ColumnMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element*; + using NonConstPointer = typename platform::remove_const::type*; + + using UnderlyingIterator = + PredicatedTileIteratorResidualLast, + Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 0 : 1), + ThreadMap, + AccessSize, + Gather>; + + using AccessType = typename UnderlyingIterator::AccessType; + + /// Fragment object to be loaded or stored + using Fragment = + cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileIteratorResidualLast; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + CUTLASS_HOST_DEVICE + Params() {} + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {} + + CUTLASS_HOST_DEVICE + Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {} + }; + +private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + +public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast( + Params const& params, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id, ///< ID of each participating thread + TensorCoord const& threadblock_offset, ///< Initial offset of threadblock + int const* indices = nullptr ///< gather/scatter indices, note no support for + ///< gather/scatter at this specialization + ) + : iterator_(params.params_, + pointer, + layout::PitchLinearCoord(extent.row(), extent.column()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column()), + indices) + { + } + + /// Construct a PredicatedTileIteratorResidualLast with zero threadblock + /// offset + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0)) + { + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) + { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast& operator++() + { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast operator++(int) + { + PredicatedTileIteratorResidualLast self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + CUTLASS_HOST_DEVICE + void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const& mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask& mask) { iterator_.get_mask(mask); } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment& frag, Index pointer_offset) + { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_byte_offset(Fragment& frag, LongIndex byte_offset) + { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment& frag) { load_with_pointer_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const& frag, Index pointer_offset) + { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset) + { + iterator_.store_with_byte_offset(frag, byte_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIteratorResidualLast for pitch-linear data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileIteratorResidualLast { +public: + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::RowMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element*; + using NonConstPointer = typename platform::remove_const::type*; + + using UnderlyingIterator = + PredicatedTileIteratorResidualLast, + Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 1 : 0), + ThreadMap, + AccessSize, + Gather>; + + using AccessType = typename UnderlyingIterator::AccessType; + + /// Fragment object to be loaded or stored + using Fragment = + cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileIteratorResidualLast; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + CUTLASS_HOST_DEVICE + Params() {} + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {} + + CUTLASS_HOST_DEVICE + Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {} + }; + +private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + +public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast( + Params const& params, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id, ///< ID of each participating thread + TensorCoord const& threadblock_offset, ///< Initial offset of threadblock + int const* indices = nullptr ///< Gather indices + ) + : iterator_(params.params_, + pointer, + layout::PitchLinearCoord(extent.column(), extent.row()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row()), + indices) + { + } + + /// Construct a PredicatedTileIteratorResidualLast with zero threadblock + /// offset + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0)) + { + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) + { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast& operator++() + { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast operator++(int) + { + PredicatedTileIteratorResidualLast self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + CUTLASS_HOST_DEVICE + void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const& mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask& mask) { iterator_.get_mask(mask); } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment& frag, Index pointer_offset) + { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_byte_offset(Fragment& frag, LongIndex byte_offset) + { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment& frag) { load_with_pointer_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const& frag, Index pointer_offset) + { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset) + { + iterator_.store_with_byte_offset(frag, byte_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIteratorResidualLast for affine rank-2 data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileIteratorResidualLast, + AdvanceRank, + ThreadMap_, + AccessSize, + false> { +public: + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::AffineRankN<2>; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element*; + using NonConstPointer = typename platform::remove_const::type*; + + /// Type used for internal memory accesses + using AccessType = + AlignedArray::value / 8)>; + + /// Underlying iterator to compute the addresses + using TileAccessIterator = PredicatedTileAccessIteratorResidualLast; + + static int const kAccessesPerVector = TileAccessIterator::kAccessesPerVector; + + /// Fragment object to be loaded or stored + using Fragment = + cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename TileAccessIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + public: + friend PredicatedTileIteratorResidualLast; + + private: + /// Parameters object + typename TileAccessIterator::Params params_; + + public: + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const& layout) : params_(layout) {} + + CUTLASS_HOST_DEVICE + Params() {} + }; + +private: + /// Internal pointer type permits fast address arithmetic + using BytePointer = char*; + +private: + // + // Data members + // + + /// Data member to the tile access iterator + TileAccessIterator address_iterator_; + +public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast( + /// Precomputed parameters object + Params const& params, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const& threadblock_offset, + int const* indices = nullptr ///< gather/scatter indices, note no support for + ///< gather/scatter at this specialization + ) + : address_iterator_(params.params_, pointer, extent, thread_id, threadblock_offset) + { + } + + /// Construct a PredicatedTileIteratorResidualLast with zero threadblock + /// offset + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0)) + { + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) + { + address_iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast& operator++() + { + if (kAdvanceRank) + address_iterator_.add_tile_offset(make_Coord(0, 1)); + else + address_iterator_.add_tile_offset(make_Coord(1, 0)); + + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast operator++(int) + { + PredicatedTileIteratorResidualLast self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { address_iterator_.clear_mask(enable); } + + CUTLASS_HOST_DEVICE + void set_residual_tile(bool enable) { address_iterator_.set_residual_tile(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { address_iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const& mask) { address_iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask& mask) { address_iterator_.get_mask(mask); } + + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment& frag, Index pointer_offset) + { + load_with_byte_offset(frag, pointer_offset * sizeof_bits::value / 8); + } + + CUTLASS_DEVICE + void load_with_byte_offset(Fragment& frag, LongIndex byte_offset) + { + AccessType* frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < kAccessesPerVector; ++v) { + int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous); + + address_iterator_.set_iteration_index(idx); + char const* byte_ptr = + reinterpret_cast(address_iterator_.get()) + byte_offset; + + AccessType const* access_ptr = reinterpret_cast(byte_ptr); + + cutlass::arch::global_load( + frag_ptr[idx], access_ptr, address_iterator_.valid()); + + ++address_iterator_; + } + } + } + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment& frag) { load_with_byte_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const& frag, Index pointer_offset) + { + store_with_byte_offset(frag, pointer_offset * sizeof_bits::value / 8); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset) + { + address_iterator_.set_iteration_index(0); + AccessType const* frag_ptr = reinterpret_cast(&frag); + + CUTLASS_PRAGMA_UNROLL + for (int s = 0; s < ThreadMap::Iterations::kStrided; ++s) { + CUTLASS_PRAGMA_UNROLL + for (int c = 0; c < ThreadMap::Iterations::kContiguous; ++c) { + CUTLASS_PRAGMA_UNROLL + for (int v = 0; v < kAccessesPerVector; ++v) { + int idx = v + kAccessesPerVector * (c + s * ThreadMap::Iterations::kContiguous); + + char* byte_ptr = reinterpret_cast(address_iterator_.get()) + byte_offset; + AccessType* access_ptr = reinterpret_cast(byte_ptr); + + if (address_iterator_.valid()) { *access_ptr = frag_ptr[idx]; } + ++address_iterator_; + } + } + } + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const& frag) { store_with_byte_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIteratorResidualLast for affine rank 2 +/// column-major data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileIteratorResidualLast { +public: + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::AffineRank2ColumnMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element*; + using NonConstPointer = typename platform::remove_const::type*; + + // Map to the underlying AffineRankN<2> layout + using UnderlyingIterator = + PredicatedTileIteratorResidualLast, + Element, + layout::AffineRankN<2>, + (kAdvanceRank == 0 ? 0 : 1), + ThreadMap, + AccessSize>; + + using AccessType = typename UnderlyingIterator::AccessType; + + /// Fragment object to be loaded or stored + using Fragment = + cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileIteratorResidualLast; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + CUTLASS_HOST_DEVICE + Params() {} + + /// Construct the Params object given an AffineRankN<2> tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const& layout) + : params_(layout::AffineRankN<2>(layout.stride(0), layout.stride(1))) + { + } + }; + +private: + // + // Data members + // + + /// Underlying AffineRankN<2> tile iterator + UnderlyingIterator iterator_; + +public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast( + Params const& params, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id, ///< ID of each participating thread + TensorCoord const& threadblock_offset, ///< Initial offset of threadblock + int const* indices = nullptr ///< gather/scatter indices, note no support for + ///< gather/scatter at this specialization + ) + : iterator_(params.params_, + pointer, + layout::PitchLinearCoord(extent.row(), extent.column()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.row(), threadblock_offset.column())) + { + } + + /// Construct a PredicatedTileIteratorResidualLast with zero threadblock + /// offset + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0)) + { + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) + { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast& operator++() + { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast operator++(int) + { + PredicatedTileIteratorResidualLast self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + CUTLASS_HOST_DEVICE + void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const& mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask& mask) { iterator_.get_mask(mask); } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment& frag, Index pointer_offset) + { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_byte_offset(Fragment& frag, LongIndex byte_offset) + { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment& frag) { load_with_pointer_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const& frag, Index pointer_offset) + { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset) + { + iterator_.store_with_byte_offset(frag, byte_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIteratorResidualLast for affine rank 2 +/// row-major data. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileIteratorResidualLast { +public: + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + using Layout = layout::AffineRank2RowMajor; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element*; + using NonConstPointer = typename platform::remove_const::type*; + + // Map to the underlying AffineRankN<2> layout + using UnderlyingIterator = + PredicatedTileIteratorResidualLast, + Element, + layout::AffineRankN<2>, + (kAdvanceRank == 0 ? 1 : 0), + ThreadMap, + AccessSize>; + + using AccessType = typename UnderlyingIterator::AccessType; + + /// Fragment object to be loaded or stored + using Fragment = + cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileIteratorResidualLast; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + CUTLASS_HOST_DEVICE + Params() {} + + /// Construct the Params object given an AffineRankN<2> tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const& layout) + : params_(layout::AffineRankN<2>(layout.stride(1), layout.stride(0))) + { + } + }; + +private: + // + // Data members + // + + /// Underlying AffineRankN<2> tile iterator + UnderlyingIterator iterator_; + +public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast( + Params const& params, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id, ///< ID of each participating thread + TensorCoord const& threadblock_offset, ///< Initial offset of threadblock + int const* indices = nullptr ///< gather/scatter indices, note no support for + ///< gather/scatter at this specialization + ) + : iterator_(params.params_, + pointer, + layout::PitchLinearCoord(extent.column(), extent.row()), + thread_id, + layout::PitchLinearCoord(threadblock_offset.column(), threadblock_offset.row())) + { + } + + /// Construct a PredicatedTileIteratorResidualLast with zero threadblock + /// offset + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0)) + { + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) + { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast& operator++() + { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast operator++(int) + { + PredicatedTileIteratorResidualLast self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + CUTLASS_HOST_DEVICE + void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const& mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask& mask) { iterator_.get_mask(mask); } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment& frag, Index pointer_offset) + { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_byte_offset(Fragment& frag, LongIndex byte_offset) + { + iterator_.load_with_byte_offset(frag, byte_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment& frag) { load_with_pointer_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const& frag, Index pointer_offset) + { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_byte_offset(Fragment const& frag, LongIndex byte_offset) + { + iterator_.store_with_byte_offset(frag, byte_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIteratorResidualLast for interleaved data. +/// It is mapped to the congruous layout. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// + +template +class PredicatedTileIteratorResidualLast, + AdvanceRank, + ThreadMap_, + AccessSize, + false> { +public: + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + static int const kInterleavedK = InterleavedK; + using Layout = layout::ColumnMajorInterleaved; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element*; + using NonConstPointer = typename platform::remove_const::type*; + + using UnderlyingIterator = PredicatedTileIteratorResidualLast< + layout::PitchLinearShape, + Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 0 : 1), + ThreadMap, + AccessSize>; + + using AccessType = typename UnderlyingIterator::AccessType; + + /// Fragment object to be loaded or stored + using Fragment = + cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileIteratorResidualLast; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + CUTLASS_HOST_DEVICE + Params() {} + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {} + + CUTLASS_HOST_DEVICE + Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {} + }; + +private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + +public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast( + /// Precomputed parameters object + Params const& params, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const& threadblock_offset, + int const* indices = nullptr ///< gather/scatter indices, note no support for + ///< gather/scatter at this specialization + ) + : iterator_(params.params_, + pointer, + layout::PitchLinearCoord(extent.row() * kInterleavedK, + extent.column() / kInterleavedK), + thread_id, + layout::PitchLinearCoord(threadblock_offset.row() * kInterleavedK, + threadblock_offset.column() / kInterleavedK)) + { + } + + /// Construct a PredicatedTileIteratorResidualLast with zero threadblock + /// offset + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0)) + { + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) + { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast& operator++() + { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast operator++(int) + { + PredicatedTileIteratorResidualLast self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + CUTLASS_HOST_DEVICE + void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const& mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask& mask) { iterator_.get_mask(mask); } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment& frag, Index pointer_offset) + { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment& frag) { load_with_pointer_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const& frag, Index pointer_offset) + { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +/// Specialization of PredicatedTileIteratorResidualLast for interleaved-32 +/// data. It is mapped to the congruous layout. +/// +/// Satisfies: ForwardTileIteratorConcept | +/// ReadableContiguousTileIteratorConcept | +/// WriteableContiguousTileIteratorConcept | +/// MaskedTileIteratorConcept +/// +template +class PredicatedTileIteratorResidualLast, + AdvanceRank, + ThreadMap_, + AccessSize, + false> { +public: + static_assert(AdvanceRank == 0 || AdvanceRank == 1, + "Specialization for pitch-linear iterator may along advance along the " + "contiguous(rank=0) or strided(rank=1) dimension."); + + using Shape = Shape_; + using Element = Element_; + static int const kInterleavedK = InterleavedK; + using Layout = layout::RowMajorInterleaved; + static int const kAdvanceRank = AdvanceRank; + using ThreadMap = ThreadMap_; + + using Index = typename Layout::Index; + using LongIndex = typename Layout::LongIndex; + + using TensorRef = TensorRef; + using TensorView = TensorView; + using TensorCoord = typename Layout::TensorCoord; + + using Pointer = Element*; + using NonConstPointer = typename platform::remove_const::type*; + + using UnderlyingIterator = PredicatedTileIteratorResidualLast< + layout::PitchLinearShape, + Element, + layout::PitchLinear, + (kAdvanceRank == 0 ? 1 : 0), + ThreadMap, + AccessSize>; + + using AccessType = typename UnderlyingIterator::AccessType; + + /// Fragment object to be loaded or stored + using Fragment = + cutlass::Array; + + /// Predicate vector stores mask to guard accesses + using Mask = typename UnderlyingIterator::Mask; + + /// Parameters object is precomputed state and is host-constructible + class Params { + private: + friend PredicatedTileIteratorResidualLast; + + /// Parameters object + typename UnderlyingIterator::Params params_; + + public: + CUTLASS_HOST_DEVICE + Params() {} + + /// Construct the Params object given a pitch-linear tensor's layout + CUTLASS_HOST_DEVICE + Params(Layout const& layout) : params_(layout::PitchLinear(layout.stride(0))) {} + + CUTLASS_HOST_DEVICE + Params(typename UnderlyingIterator::Params::Base const& base) : params_(base) {} + }; + +private: + // + // Data members + // + + /// Underlying pitch-linear tile iterator + UnderlyingIterator iterator_; + +public: + /// Constructs a TileIterator from its precomputed state, threadblock offset, + /// and thread ID + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast( + /// Precomputed parameters object + Params const& params, + /// Pointer to start of tensor + Pointer pointer, + /// Extent of tensor + TensorCoord extent, + /// ID of each participating thread + int thread_id, + /// Initial offset of threadblock + TensorCoord const& threadblock_offset, + int const* indices = nullptr ///< gather/scatter indices, note no support for + ///< gather/scatter at this specialization + ) + : iterator_(params.params_, + pointer, + layout::PitchLinearCoord(extent.column() * kInterleavedK, + extent.row() / kInterleavedK), + thread_id, + layout::PitchLinearCoord(threadblock_offset.column() * kInterleavedK, + threadblock_offset.row() / kInterleavedK)) + { + } + + /// Construct a PredicatedTileIteratorResidualLast with zero threadblock + /// offset + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast(Params const& params, ///< Precomputed parameters object + Pointer pointer, ///< Pointer to start of tensor + TensorCoord extent, ///< Extent of tensor + int thread_id ///< ID of each participating thread + ) + : PredicatedTileIteratorResidualLast(params, pointer, extent, thread_id, make_Coord(0, 0)) + { + } + + /// Adds a pointer offset in units of Element + CUTLASS_HOST_DEVICE + void add_pointer_offset(LongIndex pointer_offset) + { + iterator_.add_pointer_offset(pointer_offset); + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast& operator++() + { + ++iterator_; + return *this; + } + + /// Advances to the next tile in memory. + /// + /// The first time this method is called, predicates are updated, and the + /// iterator's internal pointer is reverted to the first "steady state" tile. + /// Subsequent calls are lightweight and must only update the internal + /// pointer. + CUTLASS_HOST_DEVICE + PredicatedTileIteratorResidualLast operator++(int) + { + PredicatedTileIteratorResidualLast self(*this); + operator++(); + return self; + } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void clear_mask(bool enable = true) { iterator_.clear_mask(enable); } + + CUTLASS_HOST_DEVICE + void set_residual_tile(bool enable) { iterator_.set_residual_tile(enable); } + + /// Clears the predicate set efficiently + CUTLASS_HOST_DEVICE + void enable_mask() { iterator_.enable_mask(); } + + /// Sets the predicate mask, overriding value stored in predicate iterator + CUTLASS_HOST_DEVICE + void set_mask(Mask const& mask) { iterator_.set_mask(mask); } + + /// Gets the mask + CUTLASS_HOST_DEVICE + void get_mask(Mask& mask) { iterator_.get_mask(mask); } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load_with_pointer_offset(Fragment& frag, Index pointer_offset) + { + iterator_.load_with_pointer_offset(frag, pointer_offset); + } + + /// Loads a fragment from memory + CUTLASS_DEVICE + void load(Fragment& frag) { load_with_pointer_offset(frag, 0); } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store_with_pointer_offset(Fragment const& frag, Index pointer_offset) + { + iterator_.store_with_pointer_offset(frag, pointer_offset); + } + + /// Store a fragment to memory + CUTLASS_DEVICE + void store(Fragment const& frag) { store_with_pointer_offset(frag, 0); } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace threadblock +} // namespace transform +} // namespace cutlass + +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/transpose_warp_iterator.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/transpose_warp_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..2435c07f8989dcd6ad9c99a2fd530fe6092b1e08 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/transpose_warp_iterator.h @@ -0,0 +1,57 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holdvr nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include "warp_iterator_from_smem.h" + +template +struct TransposeWarpIterator { + using Iterator = char; + static bool constexpr kSupportsTranspose = false; +}; + +template < + /// Operand identity + cutlass::gemm::Operand Operand, + /// Data type of A elements + typename Element, + bool kTranspose> +struct TransposeWarpIterator< + cutlass::gemm::warp::WarpIteratorFromSmem> { + using Iterator = cutlass::gemm::warp::WarpIteratorFromSmem; + static bool constexpr kSupportsTranspose = true; +}; diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/warp_iterator_from_smem.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/warp_iterator_from_smem.h new file mode 100644 index 0000000000000000000000000000000000000000..7dd59832b4b0f278408552de38849b676d6b668a --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/iterators/warp_iterator_from_smem.h @@ -0,0 +1,269 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights + *reserved. SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + *this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holder nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + *LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + *CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + *INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + *CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + *ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + *POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/*! \file + \brief Inspired from + "cutlass/gemm/warp/mma_tensor_op_tile_access_iterator.h" Loads tiles of GEMM + operands from a RowMajor shared-memory layout into registers to use by A100 + TensorCores. + + The difference with "mma_tensor_op_tile_access_iterator.h" is that: + (1) We use "ldmatrix" to load tiles, rather than manual loads (slightly + faster) (2) We support to transpose the operand (eg read `A.transpose()` when + the shared memory holds `A`) + + This is only implemented for the specific shapes. +*/ +#pragma once + +#include + +//////////////////////////////////////////////////////////////////////////////// +namespace cutlass { +namespace gemm { +namespace warp { + +template < + /// Operand identity + Operand Operand_, + /// Data type of A elements + typename Element_, + bool kTranspose = false> +class WarpIteratorFromSmem { +public: + /// Shape of tile to load (concept: MatrixShape) + using Shape = cutlass::MatrixShape<32, 32>; + + /// Operand tag + static Operand const kOperand = Operand_; + + /// Basic check + static_assert( + kOperand == Operand::kA || kOperand == Operand::kB, + "WarpIteratorFromSmem may only be instantiated for A or B operands to warp-level Mma."); + + /// Element type + using Element = Element_; + static_assert(sizeof_bits::value == 16, "Only supported for half"); + + /// Layout of source tile + using Layout = cutlass::layout::RowMajor; + + /// Shape of one matrix product operation (concept: MatrixShape) + using InstructionShape = cutlass::MatrixShape<16, 8>; + + /// Delta between *MMA operations (in units of *MMA operations, concept: + /// MatrixShape) + static int const kOpDelta = 1; + + /// Number of participating threads + static int const kThreads = 32; + + /// TensorRef type for loading element from a tensor + using TensorRef = TensorRef; + + /// Index type + using Index = typename TensorRef::Index; + + /// Long Index type + using LongIndex = typename TensorRef::LongIndex; + + /// Coordinate for an element in the tensor + using TensorCoord = typename TensorRef::TensorCoord; + + /// Number of elements accessed per Shared Memory load + static int const kElementsPerAccess = + (sizeof_bits::value >= 32 ? 1 : 32 / sizeof_bits::value); + + using InstructionCount = MatrixShape; + + static int const kIterations = (kOperand == Operand::kA) ? InstructionCount::kColumn + : InstructionCount::kRow; + +public: + // + // Derived quantities + // + + /// Fragment object holding a thread's part of a tile + using Fragment = + Array; + + /// Memory access type + // using AccessType = AlignedArray; + using AccessType = Array; + + static int constexpr kWarpShapeDivisibleInner = + (kOperand == Operand::kA ? InstructionShape::kColumn : InstructionShape::kRow); + static int constexpr kAccessesInner = (kWarpShapeDivisibleInner / kElementsPerAccess) / 4; + static int const kTilesPerInstruction = InstructionShape::kRow / 8; + +private: + /// Underlying tensor reference + TensorRef ref_; + + /// Origin + MatrixCoord origin_; + + /// Iterations in a tile + int iterations_; + +public: + /// Constructor from TensorRef + CUTLASS_HOST_DEVICE + WarpIteratorFromSmem(TensorRef const& ref, int lane_id) + : WarpIteratorFromSmem(ref, {Shape::kRow, Shape::kColumn}, lane_id) + { + } + CUTLASS_HOST_DEVICE + WarpIteratorFromSmem(TensorRef const& ref, TensorCoord extent, int lane_id) + : ref_(ref), iterations_(0) + { + int ldsm_vec_num = (lane_id >> 3); + if (kOperand == Operand::kA) { + origin_ = MatrixCoord(lane_id % 8, 0); + static_assert(InstructionCount::kRow * kAccessesInner * kTilesPerInstruction == 4, ""); + CUTLASS_PRAGMA_UNROLL + for (int inst_m_idx = 0; inst_m_idx < InstructionCount::kRow; ++inst_m_idx) { + CUTLASS_PRAGMA_UNROLL + for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) { + CUTLASS_PRAGMA_UNROLL + for (int access_m_idx = 0; access_m_idx < kTilesPerInstruction; + ++access_m_idx) { + int access_idx = + access_m_idx + + kTilesPerInstruction * (inner_idx + kAccessesInner * inst_m_idx); + + MatrixCoord offset(access_m_idx * 8 + inst_m_idx * InstructionShape::kRow, + inner_idx * 4 * kElementsPerAccess); + + if (access_idx == ldsm_vec_num) { + if (kTranspose) { offset = MatrixCoord(offset.column(), offset.row()); } + origin_ += offset; + } + } + } + } + } else { + origin_ = MatrixCoord(0, lane_id % 8); + static_assert(InstructionCount::kColumn * kAccessesInner == 4, ""); + CUTLASS_PRAGMA_UNROLL + for (int inst_n_idx = 0; inst_n_idx < InstructionCount::kColumn; ++inst_n_idx) { + CUTLASS_PRAGMA_UNROLL + for (int inner_idx = 0; inner_idx < kAccessesInner; ++inner_idx) { + int access_idx = inner_idx + kAccessesInner * inst_n_idx; + + MatrixCoord offset(inner_idx * 4 * kElementsPerAccess, inst_n_idx * 8); + + if (access_idx == ldsm_vec_num) { + if (kTranspose) { offset = MatrixCoord(offset.column(), offset.row()); } + origin_ += offset; + } + } + } + } + + ref_.add_coord_offset(origin_); + } + + /// Advances an iterator along logical dimensions of matrix in units of whole + /// tiles + CUTLASS_HOST_DEVICE + WarpIteratorFromSmem& add_tile_offset(TensorCoord const& tile_offset) + { + TensorCoord coord_offset(tile_offset.row() * Shape::kRow, + tile_offset.column() * Shape::kColumn); + if (kTranspose) { coord_offset = TensorCoord{coord_offset.column(), coord_offset.row()}; } + origin_ += coord_offset; + + ref_.add_coord_offset(coord_offset); + + return *this; + } + + /// Advances the iterator along the advance dimension + CUTLASS_DEVICE + void advance() + { + if (kOperand == Operand::kA) { + add_tile_offset({0, 1}); + } else { + add_tile_offset({1, 0}); + } + + iterations_ = 0; + } + + /// increase iterations in a tile + CUTLASS_HOST_DEVICE + WarpIteratorFromSmem& operator++() + { + iterations_++; + + if (iterations_ >= kIterations) advance(); + + return *this; + } + + /// Loads a fragment from memory at the location pointed to by the iterator. + CUTLASS_DEVICE + void load(Fragment& frag) const + { + AccessType* access_ptr = reinterpret_cast(&frag); + using LoadLayout = + typename platform::conditional::type; + + MatrixCoord offset; + if (kOperand == Operand::kA) { + offset = MatrixCoord(0, iterations_ * InstructionShape::kColumn); + } else { + offset = MatrixCoord(iterations_ * InstructionShape::kRow, 0); + } + if (kTranspose) { offset = MatrixCoord(offset.column(), offset.row()); } + cutlass::arch::ldsm(access_ptr[0], ref_.data() + ref_.offset(offset)); + } +}; + +//////////////////////////////////////////////////////////////////////////////// + +} // namespace warp +} // namespace gemm +} // namespace cutlass +//////////////////////////////////////////////////////////////////////////////// diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/transform/bias_broadcast.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/transform/bias_broadcast.h new file mode 100644 index 0000000000000000000000000000000000000000..0f15a43574cfe3eba123e03b7835227275e762f4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/transform/bias_broadcast.h @@ -0,0 +1,148 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +// This does nothing. +template +struct BroadcastNoLoad { + using Fragment = + cutlass::Array; + static const bool kEnable = false; + CUTLASS_DEVICE static void load(Fragment& frag, + scalar_t* ptr, + int thread_id, + const cutlass::MatrixCoord& extent, + int stride) + { + } + CUTLASS_DEVICE static scalar_t* + advance(scalar_t* ptr, int B_id, int N_id, int H_id, int strideB, int strideN, int strideH) + { + return ptr; + } +}; + +// This is to load the bias matrix from the global memory with on-the-fly +// broadcast. The shape in global memory is [B, N, 1, 1, L]. Each time we load +// the last dimension as a L row vector, and we further broadcast the L vector +// to a tile of size [L, L] by repeating the L vector L times +template +struct BroadcastA : public BroadcastNoLoad { + using Base = BroadcastNoLoad; + static const bool kEnable = true; + using layout = cutlass::layout::AffineRank2RowMajor; + + using GmemTileIterator = cutlass::transform::threadblock:: + PredicatedTileIterator; + using Fragment = typename GmemTileIterator::Fragment; + + CUTLASS_DEVICE static void load(Fragment& frag, + scalar_t* ptr, + int thread_id, + const cutlass::MatrixCoord& extent, + int stride) + { + GmemTileIterator iter({layout(0, 1)}, ptr, extent, thread_id); + iter.load(frag); + } + + CUTLASS_DEVICE static scalar_t* + advance(scalar_t* ptr, int B_id, int N_id, int H_id, int strideB, int strideN, int strideH) + { + return ptr + B_id * strideB + N_id * strideN; + } +}; + +// This is to load the bias matrix from the global memory with on-the-fly +// broadcast. The shape in global memory is [B, 1, H, L, L]. Each time we load +// a [L, L] matrix. Different N use the same bias matrix when B and H are the +// same. +template +struct BroadcastB : public BroadcastNoLoad { + using Base = BroadcastNoLoad; + static const bool kEnable = true; + using layout = cutlass::layout::RowMajor; + + using GmemTileIterator = cutlass::transform::threadblock:: + PredicatedTileIterator; + using Fragment = typename GmemTileIterator::Fragment; + + CUTLASS_DEVICE static void load(Fragment& frag, + scalar_t* ptr, + int thread_id, + const cutlass::MatrixCoord& extent, + int stride) + { + GmemTileIterator iter({layout(stride)}, ptr, extent, thread_id); + iter.load(frag); + } + + CUTLASS_DEVICE static scalar_t* + advance(scalar_t* ptr, int B_id, int N_id, int H_id, int strideB, int strideN, int strideH) + { + return ptr + B_id * strideB + H_id * strideH; + } +}; + +template + class Broadcast1_, + template + class Broadcast2_> +struct AttentionBiasEpilogue { + using ThreadMap = cutlass::transform::PitchLinearStripminedThreadMap< + cutlass::layout::PitchLinearShape, + kThreads, + 1>; + + using Broadcast1 = Broadcast1_; + using Broadcast2 = Broadcast2_; + + Broadcast1 broadcast1; + Broadcast2 broadcast2; + + using Ref = cutlass::TensorRef; + using SmemTileIterator = cutlass::transform::threadblock:: + RegularTileIterator; + + CUTLASS_DEVICE void operator()(const Ref& ref, + scalar_t* ptr1, + scalar_t* ptr2, + int thread_id, + const cutlass::MatrixCoord& extent, + int stride) + { + static_assert(Broadcast1::Fragment::kElements == Broadcast2::Fragment::kElements, + "The two broadcast fragments must have the same number of " + "elements"); + typename SmemTileIterator::Fragment frag; + frag.clear(); + float* frag_ptr = reinterpret_cast(&frag); + if (Broadcast1::kEnable) { + typename Broadcast1::Fragment frag1; + frag1.clear(); + broadcast1.load(frag1, ptr1, thread_id, extent, stride); + scalar_t* frag1_ptr = reinterpret_cast(&frag1); + for (int i = 0; i < Broadcast1::Fragment::kElements; ++i) { + frag_ptr[i] += static_cast(frag1_ptr[i]); + } + } + if (Broadcast2::kEnable) { + typename Broadcast2::Fragment frag2; + frag2.clear(); + broadcast2.load(frag2, ptr2, thread_id, extent, stride); + scalar_t* frag2_ptr = reinterpret_cast(&frag2); + for (int i = 0; i < Broadcast2::Fragment::kElements; ++i) { + frag_ptr[i] += static_cast(frag2_ptr[i]); + } + } + SmemTileIterator iter(ref, thread_id); + iter.store(frag); + __syncthreads(); + } +}; diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/transform/tile_smem_loader.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/transform/tile_smem_loader.h new file mode 100644 index 0000000000000000000000000000000000000000..5f3f0ee507e53bb0ad7a0f8fc3fcd25982c177f7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/transform/tile_smem_loader.h @@ -0,0 +1,93 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights + *reserved. SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + *this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holdvr nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + *ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE + *LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + *CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + *SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + *INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + *CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + *ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + *POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once +#include +#include "cutlass/aligned_buffer.h" +#include "cutlass/array.h" +#include "cutlass/coord.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/pitch_linear.h" +#include "cutlass/numeric_types.h" +#include "cutlass/platform/platform.h" +#include "cutlass/transform/pitch_linear_thread_map.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" +#include "cutlass/transform/threadblock/regular_tile_iterator.h" + +template // thread access width in elements +class TileSmemLoader { +public: + using Shape = ThreadblockTileShape; + using SmemTile = cutlass::AlignedBuffer; + + using ThreadMap = cutlass::transform::PitchLinearStripminedThreadMap< + cutlass::layout::PitchLinearShape, // strided + Threads, // Threads + ElementsPerAccess>; // ElementsPerAccess + + using GmemTileIterator = cutlass::transform::threadblock::PredicatedTileIterator< + ThreadblockTileShape, // Shape + scalar_t, // Element + cutlass::layout::RowMajor, // Layout + 0, // AdvanceRank + ThreadMap>; // ThreadMap + + using SmemTileIterator = + cutlass::transform::threadblock::RegularTileIterator; // ThreadMap + + using Fragment = typename GmemTileIterator::Fragment; + + /// load a tile from global memory into shared memory + CUTLASS_DEVICE + static void load(GmemTileIterator tile_load_iter, SmemTileIterator tile_store_iter) + { + Fragment tb_frag; + tb_frag.clear(); + tile_load_iter.load(tb_frag); + tile_store_iter.store(tb_frag); + + __syncthreads(); + } +}; diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/cpu_lion.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/cpu_lion.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a0562eac9c4aa4632898a858b04108ee01580f17 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/cpu_lion.cpp @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "cpu_lion.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("lion_update", &ds_lion_step, "DeepSpeed CPU Lion update (C++)"); + m.def("lion_update_copy", + &ds_lion_step_plus_copy, + "DeepSpeed CPU Lion update and param copy (C++)"); + m.def("create_lion", &create_lion_optimizer, "DeepSpeed CPU Lion (C++)"); + m.def("destroy_lion", &destroy_lion_optimizer, "DeepSpeed CPU Lion destroy (C++)"); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/cpu_lion_impl.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/cpu_lion_impl.cpp new file mode 100644 index 0000000000000000000000000000000000000000..28314cf5b6e1b4dbb97d26d2608cb2c17c432abd --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/cpu_lion_impl.cpp @@ -0,0 +1,268 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include +#include +#include +#include +#include +#include +#include "cpu_lion.h" + +#if defined(__ENABLE_CUDA__) +#include +#include "cublas_v2.h" +#include "cuda.h" +#include "curand.h" +#include "custom_cuda_layers.h" +#endif + +static std::unordered_map> s_optimizers; + +// C++ interface + +void Lion_Optimizer::Step_1(float* _params, + float* grads, + float* _exp_avg, + size_t _param_size, + ds_half_precision_t* dev_params, + bool half_precision) +{ + size_t rounded_size = 0; +#if defined(__AVX512__) or defined(__AVX256__) + Step_AVX<1>(&rounded_size, _params, grads, _exp_avg, _param_size, dev_params, half_precision); +#endif + if (_param_size > rounded_size) { + float betta1_minus1 = 1 - _betta1; + float betta2_minus1 = 1 - _betta2; + + float alpha = _alpha; + float after_decay = 1 - alpha * _weight_decay; + ds_half_precision_t* grads_cast_h; + ds_half_precision_t* params_cast_h; + if (half_precision) { + grads_cast_h = reinterpret_cast(grads); + params_cast_h = reinterpret_cast(_params); + } + + for (size_t t = rounded_size; t < _param_size; t += TILE) { + size_t copy_size = TILE; + if ((t + TILE) > _param_size) copy_size = _param_size - t; + size_t offset = copy_size + t; +#if defined(__ENABLE_CUDA__) + if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); } +#elif defined(__ENABLE_CANN__) + if ((t / TILE) >= 2) { aclrtSynchronizeStream(_streams[_buf_index].stream()); } +#endif +#pragma omp parallel for + for (size_t k = t; k < offset; k++) { + float grad = half_precision ? (float)grads_cast_h[k] : grads[k]; + float param = half_precision ? (float)params_cast_h[k] : _params[k]; + float momentum = _exp_avg[k]; + float tmp = momentum * _betta1; + tmp = grad * betta1_minus1 + tmp; + // Rely on portable C++ methods to manipulate the sign bit of a floating-point + // number. + tmp = -std::copysignf(alpha, tmp); + if (_weight_decay > 0) { + param = param * after_decay + tmp; + } else { + param = param + tmp; + } + momentum = momentum * _betta2; + momentum = grad * betta2_minus1 + momentum; +#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) + if (dev_params) _doubled_buffer[_buf_index][k - t] = param; +#endif + if (half_precision) + params_cast_h[k] = (ds_half_precision_t)param; + else + _params[k] = param; + _exp_avg[k] = momentum; + } +#if defined(__ENABLE_CUDA__) + if (dev_params) { + launch_param_update( + _doubled_buffer[_buf_index], dev_params + t, (copy_size), _streams[_buf_index]); + + _buf_index = !_buf_index; + } +#elif defined(__ENABLE_CANN__) + if (dev_params) { + size_t memcpy_size = copy_size * sizeof(_doubled_buffer[_buf_index][0]); + aclrtMemcpy(dev_params + t, + memcpy_size, + _doubled_buffer[_buf_index], + memcpy_size, + aclrtMemcpyKind::ACL_MEMCPY_HOST_TO_DEVICE); + + _buf_index = !_buf_index; + } +#endif + } + } +} + +void Lion_Optimizer::Step_4(float* _params, + float* grads, + float* _exp_avg, + size_t _param_size, + ds_half_precision_t* dev_params, + bool half_precision) +{ + size_t rounded_size = 0; +#if defined(__AVX512__) or defined(__AVX256__) + Step_AVX<4>(&rounded_size, _params, grads, _exp_avg, _param_size, dev_params, half_precision); +#endif + if (_param_size > rounded_size) + Step_1((_params + rounded_size), + (grads + rounded_size), + (_exp_avg + rounded_size), + (_param_size - rounded_size), + (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), + half_precision); +} + +int create_lion_optimizer(int optimizer_id, + float alpha, + float betta1, + float betta2, + float weight_decay, + bool should_log) +{ + auto opt = std::make_shared(alpha, betta1, betta2, weight_decay); + + s_optimizers[optimizer_id] = opt; + + if (should_log) { + std::string avx_type = ""; +#if defined(__AVX512__) + avx_type = "AVX512"; +#else +#if defined(__AVX256__) + avx_type = "AVX2"; +#else + avx_type = "scalar"; +#endif +#endif + + printf("Lion Optimizer #%d is created with %s arithmetic capability.\n", + optimizer_id, + avx_type.c_str()); + printf("Config: alpha=%f, betas=(%f, %f), weight_decay=%f\n", + alpha, + betta1, + betta2, + weight_decay); + } + + return 0; +} + +void Lion_Optimizer::Step_8(float* _params, + float* grads, + float* _exp_avg, + size_t _param_size, + ds_half_precision_t* dev_params, + bool half_precision) +{ + size_t rounded_size = 0; +#if defined(__AVX512__) or defined(__AVX256__) + Step_AVX<8>(&rounded_size, _params, grads, _exp_avg, _param_size, dev_params, half_precision); +#endif + if (_param_size > rounded_size) + Step_4((_params + rounded_size), + (grads + rounded_size), + (_exp_avg + rounded_size), + (_param_size - rounded_size), + (dev_params != nullptr ? (dev_params + rounded_size) : dev_params), + half_precision); +} + +int ds_lion_step(int optimizer_id, + size_t step, + float lr, + float beta1, + float beta2, + float weight_decay, + torch::Tensor& params, + torch::Tensor& grads, + torch::Tensor& exp_avg) +{ + auto params_c = params.contiguous(); + auto grads_c = grads.contiguous(); + auto exp_avg_c = exp_avg.contiguous(); + + // assert(params.options().dtype() == grads.options().dtype()); + + float* params_ptr = (float*)params_c.data_ptr(); + float* grads_ptr = (float*)grads_c.data_ptr(); + float* exp_avg_ptr = (float*)exp_avg_c.data_ptr(); + + std::shared_ptr opt = + std::static_pointer_cast(s_optimizers[optimizer_id]); + opt->IncrementStep(step, beta1, beta2); + opt->update_state(lr, weight_decay); + + opt->Step_8(params_ptr, + grads_ptr, + exp_avg_ptr, + params_c.numel(), + nullptr, + (params.options().dtype() == at::kHalf)); + +#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) + opt->SynchronizeStreams(); +#endif + return 0; +} + +int ds_lion_step_plus_copy(int optimizer_id, + size_t step, + float lr, + float beta1, + float beta2, + float weight_decay, + torch::Tensor& params, + torch::Tensor& grads, + torch::Tensor& exp_avg, + torch::Tensor& gpu_params) +{ +#if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__) + auto params_c = params.contiguous(); + auto gpu_params_c = gpu_params.contiguous(); + auto exp_avg_c = exp_avg.contiguous(); + auto grads_c = grads.contiguous(); + + float* params_ptr = (float*)params_c.data_ptr(); + float* grads_ptr = (float*)grads_c.data_ptr(); + ds_half_precision_t* gpu_params_ptr = (ds_half_precision_t*)gpu_params_c.data_ptr(); + float* exp_avg_ptr = (float*)exp_avg_c.data_ptr(); + + std::shared_ptr opt = + std::static_pointer_cast(s_optimizers[optimizer_id]); + opt->IncrementStep(step, beta1, beta2); + opt->update_state(lr, weight_decay); + opt->Step_8(params_ptr, + grads_ptr, + exp_avg_ptr, + params_c.numel(), + gpu_params_ptr, + (params.options().dtype() == at::kHalf)); + + opt->SynchronizeStreams(); +#else + assert(false); +#endif + return 0; +} + +int destroy_lion_optimizer(int optimizer_id) +{ + s_optimizers.erase(optimizer_id); + + return 0; +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/fused_lion_frontend.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/fused_lion_frontend.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e523f97ca3098444a07b21394cf6a773ddcc700d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/fused_lion_frontend.cpp @@ -0,0 +1,22 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include + +void multi_tensor_lion_cuda(int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, + const float lr, + const float beta1, + const float beta2, + const int step, + const float weight_decay); + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("multi_tensor_lion", + &multi_tensor_lion_cuda, + "Compute and apply gradient update to parameters for Lion optimizer"); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/multi_tensor_apply.cuh b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/multi_tensor_apply.cuh new file mode 100644 index 0000000000000000000000000000000000000000..12f41cb49c6bf505db48f1d21e312578f19da836 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/multi_tensor_apply.cuh @@ -0,0 +1,132 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Copyright NVIDIA/apex +This file is adapted from fused adam in NVIDIA/apex, commit a109f85 +*/ + +#include +#include +#include +#include +#include +#include "compat.h" + +#include + +// #include + +// This header is the one-stop shop for all your multi-tensor apply needs. + +// TODO: Kernel arg size limit may be <4KB for some other cards (ie Jetson) +constexpr int depth_to_max_tensors[5] = {110, 64, 48, 36, 30}; +constexpr int depth_to_max_blocks[5] = {320, 320, 320, 320, 320}; + +template +struct TensorListMetadata { + void* addresses[n][depth_to_max_tensors[n - 1]]; + int sizes[depth_to_max_tensors[n - 1]]; + unsigned char block_to_tensor[depth_to_max_blocks[n - 1]]; + int block_to_chunk[depth_to_max_blocks[n - 1]]; // I fear this needs to be a full int. + int start_tensor_this_launch; +}; + +template +__global__ void multi_tensor_apply_kernel(int chunk_size, + volatile int* noop_flag, + T tl, + U callable, + ArgTypes... args) +{ + // Hand the chunk information to the user-supplied functor to process however it likes. + callable(chunk_size, noop_flag, tl, args...); +} + +template +void multi_tensor_apply(int block_size, + int chunk_size, + const at::Tensor& noop_flag, + const std::vector>& tensor_lists, + T callable, + ArgTypes... args) +{ + TORCH_CHECK(tensor_lists.size() == depth, "tensor_lists.size() != depth"); + int len0 = tensor_lists[0].size(); + TORCH_CHECK(len0 > 0, "tensor_lists[0].size() is not > 0"); + auto ref_device = tensor_lists[0][0].device(); + TORCH_CHECK(ref_device.type() == at::kCUDA, "expected input to be on cuda"); + for (int l = 0; l < tensor_lists.size(); l++) // No range-based for because I need indices + { + TORCH_CHECK(tensor_lists[l].size() == len0, "Size mismatch among tensor lists"); + for (int t = 0; t < tensor_lists[l].size(); t++) { + // TODO: Print which tensor fails. + bool contiguous_memory = tensor_lists[l][t].is_contiguous(); +#ifdef VERSION_GE_1_5 + contiguous_memory = (contiguous_memory || + tensor_lists[l][t].is_contiguous(at::MemoryFormat::ChannelsLast)); +#endif + TORCH_CHECK(contiguous_memory, "A tensor was not contiguous."); + TORCH_CHECK(tensor_lists[l][t].device() == ref_device, + "A tensor was not on the same device as the first tensor"); + TORCH_CHECK(tensor_lists[l][t].numel() == tensor_lists[0][t].numel(), "Size mismatch"); + } + } + + int ntensors = tensor_lists[0].size(); + + TensorListMetadata tl; + + const at::cuda::OptionalCUDAGuard device_guard(device_of(tensor_lists[0][0])); + auto stream = at::cuda::getCurrentCUDAStream(); + + tl.start_tensor_this_launch = 0; + int loc_block_info = 0; + int loc_tensor_info = 0; + for (int t = 0; t < ntensors; t++) { + tl.sizes[loc_tensor_info] = tensor_lists[0][t].numel(); + for (int d = 0; d < depth; d++) + tl.addresses[d][loc_tensor_info] = tensor_lists[d][t].data_ptr(); + loc_tensor_info++; + + int chunks_this_tensor = (tensor_lists[0][t].numel() + chunk_size - 1) / chunk_size; + + for (int chunk = 0; chunk < chunks_this_tensor; chunk++) { + // std::cout << chunks_this_tensor << std::endl; + tl.block_to_tensor[loc_block_info] = loc_tensor_info - 1; + tl.block_to_chunk[loc_block_info] = chunk; + loc_block_info++; + + bool tensors_full = (loc_tensor_info == depth_to_max_tensors[depth - 1] && + chunk == chunks_this_tensor - 1); + bool blocks_full = (loc_block_info == depth_to_max_blocks[depth - 1]); + bool last_chunk = (t == ntensors - 1 && chunk == chunks_this_tensor - 1); + if (tensors_full || blocks_full || last_chunk) { + // using accscalar_t = acc_type; + multi_tensor_apply_kernel<<>>( + chunk_size, noop_flag.DATA_PTR(), tl, callable, args...); + + AT_CUDA_CHECK(cudaGetLastError()); + + // Reset. The control flow possibilities here make my brain hurt. + loc_block_info = 0; + if (chunk == chunks_this_tensor - 1) { + // std::cout << "Hit case 1 " << cond1 << " " << cond2 << " " << cond3 << + // std::endl; + loc_tensor_info = 0; + tl.start_tensor_this_launch = t + 1; + } else { + // std::cout << "Hit case 2 " << cond1 << " " << cond2 << " " << cond3 << + // std::endl; + tl.sizes[0] = tl.sizes[loc_tensor_info - 1]; + for (int d = 0; d < depth; d++) + tl.addresses[d][0] = tl.addresses[d][loc_tensor_info - 1]; + loc_tensor_info = 1; + tl.start_tensor_this_launch = t; + } + } + } + } +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/multi_tensor_lion.cu b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/multi_tensor_lion.cu new file mode 100644 index 0000000000000000000000000000000000000000..f5fe6dfdd7d0c3fa9200da8592d0a3f56b138428 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/lion/multi_tensor_lion.cu @@ -0,0 +1,126 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Copyright NVIDIA/apex +This file is adapted from fused adam in NVIDIA/apex, commit a109f85 +*/ + +#include +#include +#include +#include +// Another possibility: +// #include + +#include + +#include "multi_tensor_apply.cuh" +#include "type_shim.h" + +#define BLOCK_SIZE 512 +#define ILP 4 + +using MATH_T = float; + +template +struct LionFunctor { + __device__ __forceinline__ void operator()(int chunk_size, + volatile int* noop_gmem, + TensorListMetadata<3>& tl, + const float beta1, + const float beta2, + const float lr, + const float decay) + { + // I'd like this kernel to propagate infs/nans. + // if(*noop_gmem == 1) + // return; + + int tensor_loc = tl.block_to_tensor[blockIdx.x]; + + // potentially use to pass in list of scalar + // int tensor_num = tl.start_tensor_this_launch + tensor_loc; + + int chunk_idx = tl.block_to_chunk[blockIdx.x]; + int n = tl.sizes[tensor_loc]; + + T* g = (T*)tl.addresses[0][tensor_loc]; + g += chunk_idx * chunk_size; + + T* p = (T*)tl.addresses[1][tensor_loc]; + p += chunk_idx * chunk_size; + + T* m = (T*)tl.addresses[2][tensor_loc]; + m += chunk_idx * chunk_size; + + n -= chunk_idx * chunk_size; + + MATH_T after_decay = 1.0f - lr * decay; + + // see note in multi_tensor_scale_kernel.cu + for (int i_start = 0; i_start < n && i_start < chunk_size; i_start += blockDim.x * ILP) { + MATH_T r_g[ILP]; + MATH_T r_p[ILP]; + MATH_T r_m[ILP]; +#pragma unroll + for (int ii = 0; ii < ILP; ii++) { + int i = i_start + threadIdx.x + ii * blockDim.x; + if (i < n && i < chunk_size) { + r_g[ii] = g[i]; + r_p[ii] = p[i]; + r_m[ii] = m[i]; + } else { + r_g[ii] = MATH_T(0); + r_p[ii] = MATH_T(0); + r_m[ii] = MATH_T(0); + } + } +#pragma unroll + for (int ii = 0; ii < ILP; ii++) { + MATH_T c = beta1 * r_m[ii] + (1 - beta1) * r_g[ii]; + MATH_T update = c > 0 ? (-lr) : lr; + r_p[ii] = r_p[ii] * after_decay + update; + r_m[ii] = beta2 * r_m[ii] + (1 - beta2) * r_g[ii]; + } +#pragma unroll + for (int ii = 0; ii < ILP; ii++) { + int i = i_start + threadIdx.x + ii * blockDim.x; + if (i < n && i < chunk_size) { + p[i] = r_p[ii]; + m[i] = r_m[ii]; + } + } + } + } +}; + +void multi_tensor_lion_cuda(int chunk_size, + at::Tensor noop_flag, + std::vector> tensor_lists, + const float lr, + const float beta1, + const float beta2, + const int step, + const float weight_decay) +{ + using namespace at; + + // Assume single type across p,g,m1,m2 now + DISPATCH_DOUBLE_FLOAT_AND_HALF(tensor_lists[0][0].scalar_type(), + 0, + "lion", + multi_tensor_apply<3>(BLOCK_SIZE, + chunk_size, + noop_flag, + tensor_lists, + LionFunctor(), + beta1, + beta2, + lr, + weight_decay);) + + AT_CUDA_CHECK(cudaGetLastError()); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/spatial/csrc/opt_bias_add.cu b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/spatial/csrc/opt_bias_add.cu new file mode 100644 index 0000000000000000000000000000000000000000..d831b372b65f398b43c5cd343b2bf2db67562f56 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/spatial/csrc/opt_bias_add.cu @@ -0,0 +1,149 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include "memory_access_utils.h" +#include "spatial_cuda_layers.h" + +/* +Fused bias add variants +*/ + +namespace badd_opt { +constexpr int threads = 256; +constexpr int steps = 2; +constexpr int granularity = 16; +constexpr int vals_per_h = granularity / sizeof(__half); +constexpr int vals_per_h2 = granularity / sizeof(__half2); +constexpr int vals_per_block = threads * steps * vals_per_h; +constexpr int stride = vals_per_h * threads; +} // namespace badd_opt + +__global__ void opt_bias_add(__half* result, + const __half* activation, + const __half* bias, + int seq_len, + int channels) +{ + const int id = blockIdx.x * badd_opt::vals_per_block + threadIdx.x * badd_opt::vals_per_h; + const int stride = badd_opt::vals_per_h * badd_opt::threads; + + for (int i = 0; i < badd_opt::steps; i++) { + if (id + i * badd_opt::stride < seq_len * channels) { + __half2 act_buffer[badd_opt::vals_per_h2]; + __half2 bias_buffer[badd_opt::vals_per_h2]; + + mem_access::load_global(act_buffer, + activation + id + i * stride); + mem_access::load_global(bias_buffer, + bias + ((id + i * stride) % channels)); + + for (int j = 0; j < badd_opt::vals_per_h2; j++) { act_buffer[j] += bias_buffer[j]; } + + mem_access::store_global(result + id + i * stride, act_buffer); + } + } +} + +__global__ void opt_bias_add_add(__half* result, + const __half* activation, + const __half* bias, + const __half* other, + int seq_len, + int channels) +{ + const int id = blockIdx.x * badd_opt::vals_per_block + threadIdx.x * badd_opt::vals_per_h; + const int stride = badd_opt::vals_per_h * badd_opt::threads; + + for (int i = 0; i < badd_opt::steps; i++) { + if (id + i * badd_opt::stride < seq_len * channels) { + __half2 act_buffer[badd_opt::vals_per_h2]; + __half2 bias_buffer[badd_opt::vals_per_h2]; + __half2 other_buffer[badd_opt::vals_per_h2]; + + mem_access::load_global(act_buffer, + activation + id + i * stride); + mem_access::load_global(bias_buffer, + bias + ((id + i * stride) % channels)); + mem_access::load_global(other_buffer, other + id + i * stride); + + for (int j = 0; j < badd_opt::vals_per_h2; j++) { + act_buffer[j] += bias_buffer[j] + other_buffer[j]; + } + + mem_access::store_global(result + id + i * stride, act_buffer); + } + } +} + +__global__ void opt_bias_add_bias_add(__half* result, + const __half* activation, + const __half* bias, + const __half* other, + const __half* other_bias, + int seq_len, + int channels) +{ + const int id = blockIdx.x * badd_opt::vals_per_block + threadIdx.x * badd_opt::vals_per_h; + const int stride = badd_opt::vals_per_h * badd_opt::threads; + + for (int i = 0; i < badd_opt::steps; i++) { + if (id + i * badd_opt::stride < seq_len * channels) { + __half2 act_buffer[badd_opt::vals_per_h2]; + __half2 bias_buffer[badd_opt::vals_per_h2]; + __half2 other_buffer[badd_opt::vals_per_h2]; + __half2 other_bias_buffer[badd_opt::vals_per_h2]; + + mem_access::load_global(act_buffer, + activation + id + i * stride); + mem_access::load_global(bias_buffer, + bias + ((id + i * stride) % channels)); + mem_access::load_global(other_buffer, other + id + i * stride); + mem_access::load_global( + other_bias_buffer, other_bias + ((id + i * stride) % channels)); + + for (int j = 0; j < badd_opt::vals_per_h2; j++) { + act_buffer[j] = + (act_buffer[j] + bias_buffer[j]) + (other_buffer[j] + other_bias_buffer[j]); + } + + mem_access::store_global(result + id + i * stride, act_buffer); + } + } +} + +void launch_opt_bias_add(__half* result, + const __half* activation, + const __half* bias, + const __half* other, + const __half* other_bias, + int batch_size, + int seq_len, + int channels, + cudaStream_t stream) +{ + // Should evaluate `true` for reasonable hidden sizes + assert(channels % badd_opt::vals_per_h == 0); + + const int effective_seq_len = batch_size * seq_len; + const int vals = effective_seq_len * channels; + + dim3 block(badd_opt::threads); + dim3 grid((vals + badd_opt::vals_per_block - 1) / badd_opt::vals_per_block); + + if (!other) { + // We shouldn't have a bias if there's no activation + assert(!other_bias); + + opt_bias_add<<>>( + result, activation, bias, effective_seq_len, channels); + } else if (!other_bias) { + opt_bias_add_add<<>>( + result, activation, bias, other, effective_seq_len, channels); + } else { + opt_bias_add_bias_add<<>>( + result, activation, bias, other, other_bias, effective_seq_len, channels); + } +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/spatial/csrc/pt_binding.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/spatial/csrc/pt_binding.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cbf6636a6ee4ac5769a88b7cab7d2a222f00d5cc --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/spatial/csrc/pt_binding.cpp @@ -0,0 +1,112 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include +#include +#include +#include "spatial_cuda_layers.h" + +ChannelsLastProblem dimension_problem(at::Tensor& input) +{ + ChannelsLastProblem dims; + + if (input.dim() == 4) { + // In some sense this is unsafe (and a reflection of the assumptions made inside + // the C10 options checker). Basically, there's no great way to be sure that + // a tensor is in channels last because a 1x1 image will appear to be in channels + // last even when it isn't. + assert(input.is_contiguous(at::MemoryFormat::ChannelsLast)); + dims.batch_size = input.size(0); + dims.seq_len = input.size(2) * input.size(3); + dims.channels = input.size(1); + } else { + assert(input.is_contiguous()); + dims.batch_size = input.size(0); + dims.seq_len = input.size(1); + dims.channels = input.size(2); + } + + return dims; +} + +at::Tensor seq_unroll_bias_add(at::Tensor& input, at::Tensor& bias) +{ + assert(input.dtype() == at::kHalf); + + // TODO(cmikeh2): Should probably refactor this into a more portable + // description, since it does generalize for channels-last + ChannelsLastProblem problem = dimension_problem(input); + + auto output = at::empty_like(input); + + launch_opt_bias_add((__half*)output.data_ptr(), + (const __half*)input.data_ptr(), + (const __half*)bias.data_ptr(), + nullptr, + nullptr, + problem.batch_size, + problem.seq_len, + problem.channels, + at::cuda::getCurrentCUDAStream()); + + return output; +} + +at::Tensor seq_bias_add_add(at::Tensor& input, at::Tensor& bias, at::Tensor& other) +{ + assert(input.dtype() == at::kHalf); + + // TODO(cmikeh2): Should probably refactor this into a more portable + // description, since it does generalize for channels-last + ChannelsLastProblem problem = dimension_problem(input); + + auto output = at::empty_like(input); + + launch_opt_bias_add((__half*)output.data_ptr(), + (const __half*)input.data_ptr(), + (const __half*)bias.data_ptr(), + (const __half*)other.data_ptr(), + nullptr, + problem.batch_size, + problem.seq_len, + problem.channels, + at::cuda::getCurrentCUDAStream()); + + return output; +} + +at::Tensor seq_bias_add_bias_add(at::Tensor& input, + at::Tensor& bias, + at::Tensor& other, + at::Tensor& other_bias) +{ + assert(input.dtype() == at::kHalf); + + // TODO(cmikeh2): Should probably refactor this into a more portable + // description, since it does generalize for channels-last + ChannelsLastProblem problem = dimension_problem(input); + + auto output = at::empty_like(input); + + launch_opt_bias_add((__half*)output.data_ptr(), + (const __half*)input.data_ptr(), + (const __half*)bias.data_ptr(), + (const __half*)other.data_ptr(), + (const __half*)other_bias.data_ptr(), + problem.batch_size, + problem.seq_len, + problem.channels, + at::cuda::getCurrentCUDAStream()); + + return output; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("nhwc_bias_add", &seq_unroll_bias_add); + m.def("nhwc_bias_add_add", &seq_bias_add_add); + m.def("nhwc_bias_add_bias_add", &seq_bias_add_bias_add); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/spatial/includes/spatial_cuda_layers.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/spatial/includes/spatial_cuda_layers.h new file mode 100644 index 0000000000000000000000000000000000000000..4f56f89f16cd1a200f057ebd651575224be23b7b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/spatial/includes/spatial_cuda_layers.h @@ -0,0 +1,37 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#if __CUDA_ARCH__ >= 530 +#define HALF_PRECISION_AVAILABLE = 1 +#endif + +#ifdef __HIP_PLATFORM_AMD__ +#include +#else +#include +#endif + +#include +#include + +/*********** Group Norm Kernels, Structs, and Helpers ************/ + +struct { + int64_t batch_size; + int64_t seq_len; + int64_t channels; +} typedef ChannelsLastProblem; + +void launch_opt_bias_add(__half* result, + const __half* activation, + const __half* bias, + const __half* other, + const __half* other_bias, + int batch_size, + int seq_len, + int channels, + cudaStream_t stream); diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/cublas_wrappers.cu b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/cublas_wrappers.cu new file mode 100644 index 0000000000000000000000000000000000000000..7821a8759ab0d70bf5491032d4d1209b586b1fcd --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/cublas_wrappers.cu @@ -0,0 +1,408 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "cublas_wrappers.h" + +#ifdef __HIP_PLATFORM_AMD__ +int cublas_gemm_ex(rocblas_handle handle, + rocblas_operation transa, + rocblas_operation transb, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const float* A, + const float* B, + float* C, + rocblas_gemm_algo algo) +#else +int cublas_gemm_ex(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const float* A, + const float* B, + float* C, + cublasGemmAlgo_t algo) +#endif +{ +#ifdef __HIP_PLATFORM_AMD__ + rocblas_status status = rocblas_gemm_ex(handle, + transa, + transb, + m, + n, + k, + (const void*)alpha, + (const void*)A, + rocblas_datatype_f32_r, + (transa == rocblas_operation_none) ? m : k, + (const void*)B, + rocblas_datatype_f32_r, + (transb == rocblas_operation_none) ? k : n, + (const void*)beta, + C, + rocblas_datatype_f32_r, + m, + C, + rocblas_datatype_f32_r, + m, + rocblas_datatype_f32_r, + algo, + 0, + 0); +#else + cublasStatus_t status = cublasGemmEx(handle, + transa, + transb, + m, + n, + k, + (const void*)alpha, + (const void*)A, + CUDA_R_32F, + (transa == CUBLAS_OP_N) ? m : k, + (const void*)B, + CUDA_R_32F, + (transb == CUBLAS_OP_N) ? k : n, + (const void*)beta, + C, + CUDA_R_32F, + m, + CUDA_R_32F, + algo); +#endif + +#ifdef __HIP_PLATFORM_AMD__ + if (status != rocblas_status_success) { +#else + if (status != CUBLAS_STATUS_SUCCESS) { +#endif + fprintf(stderr, + "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", + m, + n, + k, + (int)status); + return EXIT_FAILURE; + } + return 0; +} + +#ifdef __HIP_PLATFORM_AMD__ +int cublas_gemm_ex(rocblas_handle handle, + rocblas_operation transa, + rocblas_operation transb, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const __half* A, + const __half* B, + __half* C, + rocblas_gemm_algo algo) +#else +int cublas_gemm_ex(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const __half* A, + const __half* B, + __half* C, + cublasGemmAlgo_t algo) +#endif +{ +#ifdef __HIP_PLATFORM_AMD__ + rocblas_status status = rocblas_gemm_ex(handle, + transa, + transb, + m, + n, + k, + (const void*)alpha, + (const void*)A, + rocblas_datatype_f16_r, + (transa == rocblas_operation_none) ? m : k, + (const void*)B, + rocblas_datatype_f16_r, + (transb == rocblas_operation_none) ? k : n, + (const void*)beta, + (void*)C, + rocblas_datatype_f16_r, + m, + (void*)C, + rocblas_datatype_f16_r, + m, + rocblas_datatype_f32_r, + algo, + 0, + 0); +#else + cublasStatus_t status = cublasGemmEx(handle, + transa, + transb, + m, + n, + k, + (const void*)alpha, + (const void*)A, + CUDA_R_16F, + (transa == CUBLAS_OP_N) ? m : k, + (const void*)B, + CUDA_R_16F, + (transb == CUBLAS_OP_N) ? k : n, + (const void*)beta, + (void*)C, + CUDA_R_16F, + m, + CUDA_R_32F, + algo); +#endif + +#ifdef __HIP_PLATFORM_AMD__ + if (status != rocblas_status_success) { +#else + if (status != CUBLAS_STATUS_SUCCESS) { +#endif + fprintf(stderr, + "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", + m, + n, + k, + (int)status); + return EXIT_FAILURE; + } + return 0; +} + +#ifdef __HIP_PLATFORM_AMD__ +int cublas_strided_batched_gemm(rocblas_handle handle, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const float* A, + const float* B, + float* C, + rocblas_operation op_A, + rocblas_operation op_B, + int stride_A, + int stride_B, + int stride_C, + int batch, + rocblas_gemm_algo algo) +#else +int cublas_strided_batched_gemm(cublasHandle_t handle, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const float* A, + const float* B, + float* C, + cublasOperation_t op_A, + cublasOperation_t op_B, + int stride_A, + int stride_B, + int stride_C, + int batch, + cublasGemmAlgo_t algo) +#endif +{ +#ifdef __HIP_PLATFORM_AMD__ + rocblas_status status = + rocblas_gemm_strided_batched_ex(handle, + op_A, + op_B, + m, + n, + k, + alpha, + A, + rocblas_datatype_f32_r, + (op_A == rocblas_operation_none) ? m : k, + stride_A, + B, + rocblas_datatype_f32_r, + (op_B == rocblas_operation_none) ? k : n, + stride_B, + beta, + C, + rocblas_datatype_f32_r, + m, + stride_C, + C, + rocblas_datatype_f32_r, + m, + stride_C, + batch, + rocblas_datatype_f32_r, + algo, + 0, + 0); +#else + cublasStatus_t status = cublasGemmStridedBatchedEx(handle, + op_A, + op_B, + m, + n, + k, + alpha, + A, + CUDA_R_32F, + (op_A == CUBLAS_OP_N) ? m : k, + stride_A, + B, + CUDA_R_32F, + (op_B == CUBLAS_OP_N) ? k : n, + stride_B, + beta, + C, + CUDA_R_32F, + m, + stride_C, + batch, + CUDA_R_32F, + algo); +#endif + +#ifdef __HIP_PLATFORM_AMD__ + if (status != rocblas_status_success) { +#else + if (status != CUBLAS_STATUS_SUCCESS) { +#endif + fprintf(stderr, + "!!!! kernel execution error. (batch: %d, m: %d, n: %d, k: %d, error: %d) \n", + batch, + m, + n, + k, + (int)status); + return EXIT_FAILURE; + } + return 0; +} + +#ifdef __HIP_PLATFORM_AMD__ +int cublas_strided_batched_gemm(rocblas_handle handle, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const __half* A, + const __half* B, + __half* C, + rocblas_operation op_A, + rocblas_operation op_B, + int stride_A, + int stride_B, + int stride_C, + int batch, + rocblas_gemm_algo algo) +#else +int cublas_strided_batched_gemm(cublasHandle_t handle, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const __half* A, + const __half* B, + __half* C, + cublasOperation_t op_A, + cublasOperation_t op_B, + int stride_A, + int stride_B, + int stride_C, + int batch, + cublasGemmAlgo_t algo) +#endif +{ +#ifdef __HIP_PLATFORM_AMD__ + rocblas_status status = + rocblas_gemm_strided_batched_ex(handle, + op_A, + op_B, + m, + n, + k, + alpha, + A, + rocblas_datatype_f16_r, + (op_A == rocblas_operation_none) ? m : k, + stride_A, + B, + rocblas_datatype_f16_r, + (op_B == rocblas_operation_none) ? k : n, + stride_B, + beta, + C, + rocblas_datatype_f16_r, + m, + stride_C, + C, + rocblas_datatype_f16_r, + m, + stride_C, + batch, + rocblas_datatype_f32_r, + algo, + 0, + 0); +#else + cublasStatus_t status = cublasGemmStridedBatchedEx(handle, + op_A, + op_B, + m, + n, + k, + alpha, + A, + CUDA_R_16F, + (op_A == CUBLAS_OP_N) ? m : k, + stride_A, + B, + CUDA_R_16F, + (op_B == CUBLAS_OP_N) ? k : n, + stride_B, + beta, + C, + CUDA_R_16F, + m, + stride_C, + batch, + CUDA_R_32F, + algo); +#endif + +#ifdef __HIP_PLATFORM_AMD__ + if (status != rocblas_status_success) { +#else + if (status != CUBLAS_STATUS_SUCCESS) { +#endif + fprintf(stderr, + "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", + m, + n, + k, + (int)status); + return EXIT_FAILURE; + } + + return 0; +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/dropout_kernels.cu b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/dropout_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..97cb3592dcdc4cf0a2dc37e34fc9b13117d9bcc5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/dropout_kernels.cu @@ -0,0 +1,873 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "custom_cuda_layers.h" + +const int unroll_factor = 4; + +__global__ void dropout_kernel(const int N, + const float ratio, + float* out, + const float* Xdata, + uint8_t* mask, + std::pair seed) +{ + const float scale = 1. / (1. - ratio); + int idx = blockIdx.x * blockDim.x + threadIdx.x; + + curandStatePhilox4_32_10_t state; + curand_init(seed.first, idx, seed.second, &state); + + CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) + { + float4 rand = curand_uniform4(&state); + uint8_t m[unroll_factor]; + + m[0] = (uint8_t)(rand.x > ratio); + m[1] = (uint8_t)(rand.y > ratio); + m[2] = (uint8_t)(rand.z > ratio); + m[3] = (uint8_t)(rand.w > ratio); + + int i = j * unroll_factor; + + mask[i] = (uint8_t)m[0]; + mask[i + 1] = (uint8_t)m[1]; + mask[i + 2] = (uint8_t)m[2]; + mask[i + 3] = (uint8_t)m[3]; + + out[i] = Xdata[i] * scale * m[0]; + out[i + 1] = Xdata[i + 1] * scale * m[1]; + out[i + 2] = Xdata[i + 2] * scale * m[2]; + out[i + 3] = Xdata[i + 3] * scale * m[3]; + } + int high_index = + ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; + if (N > high_index) { + float4 rand = curand_uniform4(&state); + float* rand_data = &(rand.x); + int k = 0; + for (int i = high_index; i < N; i++) { + uint8_t m = (uint8_t)(rand_data[k++] > ratio); + out[i] = Xdata[i] * scale * m; + mask[i] = m; + } + } +} + +__global__ void dropout_kernel(const int N, + const float ratio, + __half* out, + const __half* Xdata, + uint8_t* mask, + std::pair seed) +{ + const float scale = 1. / (1. - ratio); + + int idx = blockIdx.x * blockDim.x + threadIdx.x; + + curandStatePhilox4_32_10_t state; + curand_init(seed.first, idx, seed.second, &state); + +#ifdef __STOCHASTIC_MODE__ + + const __half2 h_scale = __float2half2_rn(scale); + const float2* x_cast = reinterpret_cast(Xdata); + float2* out_cast = reinterpret_cast(out); + uint32_t* mask_cast = reinterpret_cast(mask); + + uint32_t m_32; + uint8_t* m = reinterpret_cast(&m_32); + + float2 result_f; + __half2* result_h = reinterpret_cast<__half2*>(&result_f); + __half2 mask_h[2]; + float2 mask_f[2]; + + CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) + { + float2 x_f = x_cast[j]; + __half2* x_h = reinterpret_cast<__half2*>(&x_f); + + float4 rand = curand_uniform4(&state); + + m[0] = (uint8_t)(rand.x > ratio); + m[1] = (uint8_t)(rand.y > ratio); + m[2] = (uint8_t)(rand.z > ratio); + m[3] = (uint8_t)(rand.w > ratio); + + float* mask_f_data = &mask_f[0].x; +#pragma unroll + for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]); + + mask_h[0] = __float22half2_rn(mask_f[0]); + mask_h[1] = __float22half2_rn(mask_f[1]); + + result_h[0] = x_h[0] * h_scale * mask_h[0]; + result_h[1] = x_h[1] * h_scale * mask_h[1]; + + out_cast[j] = result_f; + + mask_cast[j] = m_32; + } + +#else + + CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) + { + int i = j * unroll_factor; + + const __half2* vals_half = reinterpret_cast(Xdata + i); + float2 vals_half_f[2]; + vals_half_f[0] = __half22float2(vals_half[0]); + vals_half_f[1] = __half22float2(vals_half[1]); + + uint8_t m[unroll_factor]; + float4 rand = curand_uniform4(&state); + m[0] = (uint8_t)(rand.x > ratio); + m[1] = (uint8_t)(rand.y > ratio); + m[2] = (uint8_t)(rand.z > ratio); + m[3] = (uint8_t)(rand.w > ratio); + + out[i] = __float2half(vals_half_f[0].x * scale * m[0]); + out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]); + out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]); + out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]); + + mask[i] = m[0]; + mask[i + 1] = m[1]; + mask[i + 2] = m[2]; + mask[i + 3] = m[3]; + } + +#endif + int high_index = + ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; + if (N > high_index) { + float4 rand = curand_uniform4(&state); + float* rand_data = &(rand.x); + int k = 0; + for (int i = high_index; i < N; i++) { + uint8_t m = (uint8_t)(rand_data[k++] > ratio); + out[i] = __float2half((float)Xdata[i] * scale * m); + mask[i] = m; + } + } +} + +__global__ void dropout_kernel_bwd(const int N, + const float ratio, + const float* Xdata, + float* out, + uint8_t* mask, + std::pair seed) +{ + const float scale = 1. / (1. - ratio); + CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) + { + int i = j * unroll_factor; + + out[i] = mask[i] ? Xdata[i] * scale : 0.0; + out[i + 1] = mask[i + 1] ? Xdata[i + 1] * scale : 0.0; + out[i + 2] = mask[i + 2] ? Xdata[i + 2] * scale : 0.0; + out[i + 3] = mask[i + 3] ? Xdata[i + 3] * scale : 0.0; + } + int high_index = + ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; + if (N > high_index) { + for (int i = high_index; i < N; i++) { out[i] = mask[i] ? Xdata[i] * scale : 0.0; } + } +} + +__global__ void dropout_kernel_bwd(const int N, + const float ratio, + const __half* Xdata, + __half* out, + uint8_t* mask, + std::pair seed) +{ + const float scale = 1. / (1. - ratio); + +#ifdef __STOCHASTIC_MODE__ + + const __half2 h_scale = __float2half2_rn(scale); + + const float2* x_cast = reinterpret_cast(Xdata); + float2* out_cast = reinterpret_cast(out); + uint32_t* mask_cast = reinterpret_cast(mask); + + CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) + { + float2 x_f = x_cast[j]; + __half2* x_h = reinterpret_cast<__half2*>(&x_f); + + uint32_t m_32 = mask_cast[j]; + uint8_t* m = (uint8_t*)&m_32; + + __half2 mask_h[2]; + float2 mask_f[2]; + + float* mask_f_data = &mask_f[0].x; +#pragma unroll + for (int i = 0; i < unroll_factor; i++) mask_f_data[i] = (float)(m[i]); + +#pragma unroll + for (int i = 0; i < 2; i++) mask_h[i] = __float22half2_rn(mask_f[i]); + + float2 result_f; + __half2* result_h = reinterpret_cast<__half2*>(&result_f); + + result_h[0] = x_h[0] * h_scale * mask_h[0]; + result_h[1] = x_h[1] * h_scale * mask_h[1]; + + out_cast[j] = result_f; + } + +#else + + const __half h_scale = __float2half(scale); + const __half h_zero = __float2half(0.0); + + CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) + { + int i = j * unroll_factor; + + const __half2* vals_half = reinterpret_cast(Xdata + i); + + uint8_t* m = mask + i; + + float2 vals_half_f[2]; + + vals_half_f[0] = __half22float2(vals_half[0]); + vals_half_f[1] = __half22float2(vals_half[1]); + + out[i] = __float2half(vals_half_f[0].x * scale * m[0]); + out[i + 1] = __float2half(vals_half_f[0].y * scale * m[1]); + out[i + 2] = __float2half(vals_half_f[1].x * scale * m[2]); + out[i + 3] = __float2half(vals_half_f[1].y * scale * m[3]); + } + +#endif + int high_index = + ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; + if (N > high_index) { + for (int i = high_index; i < N; i++) { + out[i] = __float2half((float)Xdata[i] * scale * mask[i]); + } + } +} + +template +void launch_dropout(T* out, + const T* vals, + uint8_t* mask, + int total_count, + int dim, + float ratio, + cudaStream_t stream, + bool bwd) +{ + assert(unroll_factor == 4); + + dim3 grid_dim = DS_GET_BLOCKS(total_count / unroll_factor); + dim3 block_dim = DS_CUDA_NUM_THREADS; + + if (dim > 512) { + block_dim.x >>= 1; + grid_dim.x <<= 1; + } + uint64_t inc = total_count / grid_dim.x / block_dim.x; + std::pair seed = TrainingContext::Instance().IncrementOffset(inc); + if (bwd) + dropout_kernel_bwd<<>>( + total_count, ratio, vals, out, mask, seed); + else + dropout_kernel<<>>( + total_count, ratio, out, vals, mask, seed); +} + +template void launch_dropout(float* out, + const float* vals, + uint8_t* mask, + int total_count, + int dim, + float ratio, + cudaStream_t stream, + bool); +template void launch_dropout(__half* out, + const __half* vals, + uint8_t* mask, + int total_count, + int dim, + float ratio, + cudaStream_t stream, + bool); + +__global__ void dropout_grad_kernel(const int N, const float scale, float* Xdata, uint8_t* mask) +{ + CUDA_1D_KERNEL_LOOP(i, N) { Xdata[i] *= scale * mask[i]; } +} + +__global__ void dropout_grad_kernel(const int N, const float scale, __half* Xdata, uint8_t* mask) +{ + const __half2 h_scale = __float2half2_rn(scale); + float2* x_cast = reinterpret_cast(Xdata); + uint32_t* mask_cast = reinterpret_cast(mask); + + CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) + { + float2 x_data = x_cast[j]; + uint32_t m_32 = mask_cast[j]; + uint8_t* m = (uint8_t*)&m_32; + + float2 result_f; + __half2* result_h = reinterpret_cast<__half2*>(&result_f); + +#ifdef __STOCHASTIC_MODE__ + + __half2* x_data_h = reinterpret_cast<__half2*>(&x_data); + __half2 mask_h[2]; + float2 mask_f[2]; + + float* mask_f_data = &mask_f[0].x; +#pragma unroll + for (int i = 0; i < unroll_factor; i++) *(mask_f_data++) = (float)(m[i]); + + mask_h[0] = __float22half2_rn(mask_f[0]); + mask_h[1] = __float22half2_rn(mask_f[1]); + + result_h[0] = x_data_h[0] * h_scale * mask_h[0]; + result_h[1] = x_data_h[1] * h_scale * mask_h[1]; + +#else + + __half* x_data_h = reinterpret_cast<__half*>(&x_data); + float2 result[2]; + + result[0].x = (float)x_data_h[0] * scale * m[0]; + result[0].y = (float)x_data_h[1] * scale * m[1]; + result[1].x = (float)x_data_h[2] * scale * m[2]; + result[1].y = (float)x_data_h[3] * scale * m[3]; + + result_h[0] = __float22half2_rn(result[0]); + result_h[1] = __float22half2_rn(result[1]); + +#endif + x_cast[j] = result_f; + } + int high_index = + ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; + if (N > high_index) { + for (int i = high_index; i < N; i++) { + Xdata[i] = __float2half((float)Xdata[i] * scale * mask[i]); + } + } +} + +template +void launch_dropout_grad(T* vals, uint8_t* mask, int total_count, float ratio, cudaStream_t stream) +{ + assert(unroll_factor == 4); + + const float scale = 1. / (1. - ratio); + dropout_grad_kernel<<>>(total_count, scale, vals, mask); +} + +template void launch_dropout_grad(float* vals, + uint8_t* mask, + int total_count, + float ratio, + cudaStream_t stream); +template void launch_dropout_grad(__half* vals, + uint8_t* mask, + int total_count, + float ratio, + cudaStream_t stream); + +__global__ void dropout_grad_kernel(const int N, + const float scale, + const float* Xdata, + float* out, + uint8_t* mask) +{ + CUDA_1D_KERNEL_LOOP(i, N) { out[i] = Xdata[i] * scale * mask[i]; } +} + +__global__ void dropout_grad_kernel(const int N, + const float scale, + const __half* Xdata, + __half* out, + uint8_t* mask) +{ + const float2* x_cast = reinterpret_cast(Xdata); + float2* out_cast = reinterpret_cast(out); + const uint32_t* mask_cast = reinterpret_cast(mask); + + float2 result_f; + __half2* result_h = reinterpret_cast<__half2*>(&result_f); + + CUDA_1D_KERNEL_LOOP(j, N / unroll_factor) + { + float2 x_data = x_cast[j]; + uint32_t m_32 = mask_cast[j]; + uint8_t* m = (uint8_t*)&m_32; + + __half* x_data_h = reinterpret_cast<__half*>(&x_data); + float2 result[2]; + + result[0].x = (float)x_data_h[0] * scale * m[0]; + result[0].y = (float)x_data_h[1] * scale * m[1]; + result[1].x = (float)x_data_h[2] * scale * m[2]; + result[1].y = (float)x_data_h[3] * scale * m[3]; + + result_h[0] = __float22half2_rn(result[0]); + result_h[1] = __float22half2_rn(result[1]); + + out_cast[j] = result_f; + } + int high_index = + ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; + if (N > high_index) { + for (int i = high_index; i < N; i++) { + out[i] = __float2half((float)Xdata[i] * scale * mask[i]); + } + } +} + +template +void launch_dropout_grad(T* vals_out, + const T* vals, + uint8_t* mask, + int total_count, + float ratio, + cudaStream_t stream) +{ + assert(unroll_factor == 4); + + const float scale = 1. / (1. - ratio); + dropout_grad_kernel<<>>(total_count, scale, vals, vals_out, mask); +} +template void launch_dropout_grad(float*, + const float* vals, + uint8_t* mask, + int total_count, + float ratio, + cudaStream_t stream); +template void launch_dropout_grad(__half*, + const __half* vals, + uint8_t* mask, + int total_count, + float ratio, + cudaStream_t stream); + +__global__ void dropout_kernel(const int N, + const int dim, + const float ratio, + const float* bias, + float* Xdata, + uint8_t* mask, + std::pair seed) +{ + const float scale = 1. / (1. - ratio); + int idx = blockIdx.x * blockDim.x + threadIdx.x; + int tid = threadIdx.x % (dim / unroll_factor); + + curandStatePhilox4_32_10_t state; + curand_init(seed.first, idx, seed.second, &state); + + float4* Xdata_cast = reinterpret_cast(Xdata); + uint32_t* mask_32 = reinterpret_cast(mask); + const float4* bias_cast = reinterpret_cast(bias); + + CUDA_1D_KERNEL_LOOP(j, N) + { + float4 rand = curand_uniform4(&state); + uint32_t m_32; + uint8_t* m = (uint8_t*)&m_32; + + m[0] = (uint8_t)(rand.x > ratio); + m[1] = (uint8_t)(rand.y > ratio); + m[2] = (uint8_t)(rand.z > ratio); + m[3] = (uint8_t)(rand.w > ratio); + + float4 x_data = Xdata_cast[j]; + float4 b_data = bias_cast[j % (dim / unroll_factor)]; + + x_data.x += b_data.x; + x_data.y += b_data.y; + x_data.z += b_data.z; + x_data.w += b_data.w; + + x_data.x = x_data.x * scale * m[0]; + x_data.y = x_data.y * scale * m[1]; + x_data.z = x_data.z * scale * m[2]; + x_data.w = x_data.w * scale * m[3]; + + mask_32[j] = m_32; + Xdata_cast[j] = x_data; + } + int high_index = + ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; + if (N > high_index) { + float4 rand = curand_uniform4(&state); + float* rand_data = &(rand.x); + int k = 0; + for (int i = high_index; i < N; i++) { + float x_data = Xdata[i] + bias[i % dim]; + uint8_t m = (uint8_t)(rand_data[k++] > ratio); + Xdata[i] = x_data * scale * m; + mask[i] = m; + } + } +} + +__global__ void dropout_kernel(const int N, + const int dim, + const float ratio, + const __half* bias, + __half* Xdata, + uint8_t* mask, + std::pair seed) +{ + const float scale = 1. / (1. - ratio); + int idx = blockIdx.x * blockDim.x + threadIdx.x; + int tid = threadIdx.x % (dim / unroll_factor); + + curandStatePhilox4_32_10_t state; + curand_init(seed.first, idx, seed.second, &state); + + float2* Xdata_cast = reinterpret_cast(Xdata); + uint32_t* mask_32 = reinterpret_cast(mask); + const float2* bias_cast = reinterpret_cast(bias); + + CUDA_1D_KERNEL_LOOP(j, N) + { + float4 rand = curand_uniform4(&state); + + float2 data_f; + __half2* data_h = reinterpret_cast<__half2*>(&data_f); + + float2 bias_f; + __half2* bias_h = reinterpret_cast<__half2*>(&bias_f); + + data_f = Xdata_cast[j]; + bias_f = bias_cast[j % (dim / unroll_factor)]; + + float2 data_h_0 = __half22float2(data_h[0]); + float2 data_h_1 = __half22float2(data_h[1]); + + float2 bias_h_0 = __half22float2(bias_h[0]); + float2 bias_h_1 = __half22float2(bias_h[1]); + + data_h_0.x += bias_h_0.x; + data_h_0.y += bias_h_0.y; + data_h_1.x += bias_h_1.x; + data_h_1.y += bias_h_1.y; + + uint32_t m_32; + uint8_t* m = (uint8_t*)&m_32; + + m[0] = (uint8_t)(rand.x > ratio); + m[1] = (uint8_t)(rand.y > ratio); + m[2] = (uint8_t)(rand.z > ratio); + m[3] = (uint8_t)(rand.w > ratio); + + data_h_0.x = __float2half(data_h_0.x * scale * m[0]); + data_h_0.y = __float2half(data_h_0.y * scale * m[1]); + data_h_1.x = __float2half(data_h_1.x * scale * m[2]); + data_h_1.y = __float2half(data_h_1.y * scale * m[3]); + + float2 result_f; + __half2* result_h = reinterpret_cast<__half2*>(&result_f); + + result_h[0] = __float22half2_rn(data_h_0); + result_h[1] = __float22half2_rn(data_h_1); + + Xdata_cast[j] = result_f; + mask_32[j] = m_32; + } + int high_index = + ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; + if (N > high_index) { + float4 rand = curand_uniform4(&state); + float* rand_data = &(rand.x); + int k = 0; + for (int i = high_index; i < N; i++) { + float x_data = (float)Xdata[i] + (float)bias[i % dim]; + uint8_t m = (uint8_t)(rand_data[k++] > ratio); + Xdata[i] = __float2half(x_data * scale * m); + mask[i] = m; + } + } +} + +template +void launch_dropout(T* out, + const T* bias, + uint8_t* mask, + int batch, + int dim, + float ratio, + cudaStream_t stream) +{ + assert(unroll_factor == 4); + + int total_count = batch * dim / unroll_factor; + + dim3 grid_dim = DS_GET_BLOCKS(total_count); + dim3 block_dim = DS_CUDA_NUM_THREADS; + + uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x; + std::pair seed = TrainingContext::Instance().IncrementOffset(inc); + + dropout_kernel<<>>( + total_count, dim, ratio, bias, out, mask, seed); +} + +template void launch_dropout(float*, + const float* bias, + uint8_t* mask, + int batch, + int dim, + float ratio, + cudaStream_t stream); +template void launch_dropout(__half*, + const __half* bias, + uint8_t* mask, + int batch, + int dim, + float ratio, + cudaStream_t stream); + +__global__ void dropout_kernel(const int N, + const int dim, + const float ratio, + const float* input, + const float* residual, + const float* bias, + float* out, + uint8_t* mask, + std::pair seed) +{ + const float scale = 1. / (1. - ratio); + int idx = blockIdx.x * blockDim.x + threadIdx.x; + int tid = threadIdx.x % (dim / unroll_factor); + + curandStatePhilox4_32_10_t state; + curand_init(seed.first, idx, seed.second, &state); + + float4* out_cast = reinterpret_cast(out); + uint32_t* mask_32 = reinterpret_cast(mask); + + const float4* bias_cast = reinterpret_cast(bias); + const float4* residual_cast = reinterpret_cast(residual); + const float4* input_cast = reinterpret_cast(input); + + CUDA_1D_KERNEL_LOOP(j, N) + { + float4 rand = curand_uniform4(&state); + + uint32_t m_32; + uint8_t* m = (uint8_t*)&m_32; + + m[0] = (uint8_t)(rand.x > ratio); + m[1] = (uint8_t)(rand.y > ratio); + m[2] = (uint8_t)(rand.z > ratio); + m[3] = (uint8_t)(rand.w > ratio); + + float4 out_data; + float4 b_data = bias_cast[j % (dim / unroll_factor)]; + float4 res_data = residual_cast[j]; + float4 inp_data = input_cast[j]; + + out_data.x = (b_data.x + inp_data.x); + out_data.y = (b_data.y + inp_data.y); + out_data.z = (b_data.z + inp_data.z); + out_data.w = (b_data.w + inp_data.w); + + out_data.x = out_data.x * scale * m[0]; + out_data.y = out_data.y * scale * m[1]; + out_data.z = out_data.z * scale * m[2]; + out_data.w = out_data.w * scale * m[3]; + + out_data.x += res_data.x; + out_data.y += res_data.y; + out_data.z += res_data.z; + out_data.w += res_data.w; + + mask_32[j] = m_32; + out_cast[j] = out_data; + } + int high_index = + ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; + if (N > high_index) { + float4 rand = curand_uniform4(&state); + float* rand_data = &(rand.x); + int k = 0; + for (int i = high_index; i < N; i++) { + float x_data = input[i] + bias[i % dim]; + uint8_t m = (uint8_t)(rand_data[k++] > ratio); + x_data = x_data * scale * m; + x_data += residual[i]; + + out[i] = x_data; + mask[i] = m; + } + } +} + +__global__ void dropout_kernel(const int N, + const int dim, + const float ratio, + const __half* input, + const __half* residual, + const __half* bias, + __half* out, + uint8_t* mask, + std::pair seed) +{ + const float scale = 1. / (1. - ratio); + int idx = blockIdx.x * blockDim.x + threadIdx.x; + int tid = threadIdx.x % (dim / unroll_factor); + + curandStatePhilox4_32_10_t state; + curand_init(seed.first, idx, seed.second, &state); + + float2* out_cast = reinterpret_cast(out); + uint32_t* mask_32 = reinterpret_cast(mask); + + const float2* bias_cast = reinterpret_cast(bias); + const float2* residual_cast = reinterpret_cast(residual); + const float2* input_cast = reinterpret_cast(input); + + CUDA_1D_KERNEL_LOOP(j, N) + { + float4 rand = curand_uniform4(&state); + + float2 data_f; + __half2* data_h = reinterpret_cast<__half2*>(&data_f); + + float2 bias_f; + __half2* bias_h = reinterpret_cast<__half2*>(&bias_f); + + float2 residual_f; + __half2* residual_h = reinterpret_cast<__half2*>(&residual_f); + + float2 input_f; + __half2* input_h = reinterpret_cast<__half2*>(&input_f); + + bias_f = bias_cast[j % (dim / unroll_factor)]; + residual_f = residual_cast[j]; + input_f = input_cast[j]; + + float2 data_h_0 = __half22float2(data_h[0]); + float2 data_h_1 = __half22float2(data_h[1]); + + float2 bias_h_0 = __half22float2(bias_h[0]); + float2 bias_h_1 = __half22float2(bias_h[1]); + + float2 residual_h_0 = __half22float2(residual_h[0]); + float2 residual_h_1 = __half22float2(residual_h[1]); + + float2 input_h_0 = __half22float2(input_h[0]); + float2 input_h_1 = __half22float2(input_h[1]); + + data_h_0.x = (bias_h_0.x + input_h_0.x); + data_h_0.y = (bias_h_0.y + input_h_0.y); + data_h_1.x = (bias_h_1.x + input_h_1.x); + data_h_1.y = (bias_h_1.y + input_h_1.y); + + uint32_t m_32; + uint8_t* m = (uint8_t*)&m_32; + + m[0] = (uint8_t)(rand.x > ratio); + m[1] = (uint8_t)(rand.y > ratio); + m[2] = (uint8_t)(rand.z > ratio); + m[3] = (uint8_t)(rand.w > ratio); + + data_h_0.x = __float2half(data_h_0.x * scale * m[0]); + data_h_0.y = __float2half(data_h_0.y * scale * m[1]); + data_h_1.x = __float2half(data_h_1.x * scale * m[2]); + data_h_1.y = __float2half(data_h_1.y * scale * m[3]); + + data_h_0.x += residual_h_0.x; + data_h_0.y += residual_h_0.y; + data_h_1.x += residual_h_1.x; + data_h_1.y += residual_h_1.y; + + float2 result_f; + __half2* result_h = reinterpret_cast<__half2*>(&result_f); + + result_h[0] = __float22half2_rn(data_h_0); + result_h[1] = __float22half2_rn(data_h_1); + + out_cast[j] = result_f; + mask_32[j] = m_32; + } + int high_index = + ((((N / unroll_factor) - 1) / blockDim.x + 1) * (unroll_factor * blockDim.x)) + threadIdx.x; + if (N > high_index) { + float4 rand = curand_uniform4(&state); + float* rand_data = &(rand.x); + int k = 0; + for (int i = high_index; i < N; i++) { + float x_data = (float)input[i] + (float)bias[i % dim]; + uint8_t m = (uint8_t)(rand_data[k++] > ratio); + x_data = x_data * scale * m; + x_data += (float)residual[i]; + + out[i] = __float2half(x_data); + mask[i] = m; + } + } +} + +template +void launch_dropout(T* out, + const T* input, + const T* residual, + const T* bias, + uint8_t* mask, + int batch, + int dim, + float ratio, + cudaStream_t stream) +{ + assert(unroll_factor == 4); + + int total_count = batch * dim / unroll_factor; + dim3 grid_dim = DS_GET_BLOCKS(total_count); + dim3 block_dim = DS_CUDA_NUM_THREADS; + + uint64_t inc = (batch * dim) / grid_dim.x / block_dim.x; + std::pair seed = TrainingContext::Instance().IncrementOffset(inc); + + dropout_kernel<<>>( + total_count, dim, ratio, input, residual, bias, out, mask, seed); +} + +template void launch_dropout(float*, + const float*, + const float* residual, + const float* bias, + uint8_t* mask, + int batch, + int dim, + float ratio, + cudaStream_t stream); +template void launch_dropout(__half*, + const __half*, + const __half* residual, + const __half* bias, + uint8_t* mask, + int batch, + int dim, + float ratio, + cudaStream_t stream); diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/ds_transformer_cuda.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/ds_transformer_cuda.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b637bb710c67a4f980571cd876c4caa350f4a71b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/ds_transformer_cuda.cpp @@ -0,0 +1,1055 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include + +#include +#include +#include +#include +#include +#include +#include "Timer.h" +#include "context.h" +#include "cublas_wrappers.h" +#include "custom_cuda_layers.h" +#include "ds_transformer_cuda.h" + +static std::unordered_map> s_transformer_layers; + +const int init_seq_length = 128; + +// C++ interface + +template +unsigned get_workspace_size(unsigned maxBatchSize, + unsigned seq_len, + unsigned hidden_size, + unsigned intermediate_size, + unsigned heads, + bool training, + bool gelu_checkpoint) +{ + unsigned workSpacesize = 4 * (size_t(maxBatchSize) * seq_len * hidden_size); + if (training) { + workSpacesize += 2 * (size_t(maxBatchSize) * seq_len * hidden_size); + workSpacesize += ((std::max)((size_t(maxBatchSize) * seq_len * intermediate_size), + 2 * (size_t(maxBatchSize) * heads * seq_len * seq_len))); + if (gelu_checkpoint) + workSpacesize += 2 * (size_t(maxBatchSize) * seq_len * intermediate_size); + } + return workSpacesize; // * sizeof(T); +} + +// NOTE: AT_ASSERT has become AT_CHECK on master after 0.4. +#define CHECK_CUDA(x) AT_ASSERTM(x.is_cuda(), #x " must be a CUDA tensor") +#define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous") +#define CHECK_INPUT(x) \ + CHECK_CUDA(x); \ + CHECK_CONTIGUOUS(x) + +template +BertTransformerLayer::BertTransformerLayer(unsigned layer_id, + unsigned batch_size, + unsigned hidden_size, + unsigned num_heads, + unsigned intermediate_size, + unsigned seq_length, + float attn_prob_dropout_ratio, + float hidden_output_dropout_ratio, + float layer_norm_eps, + bool pre_or_postLayerNorm, + const std::vector>& gemm_algos, + bool attn_dropout_checkpoint, + bool normalize_invertible, + bool gelu_checkpoint, + bool stochastic_mode) + : _layer_id(layer_id), + _batch_size(batch_size), + _hidden_size(hidden_size), + _heads(num_heads), + _intermediate_size(intermediate_size), + _seq_length(seq_length), + _training(true), + _pre_or_postLayerNorm(pre_or_postLayerNorm), + _attn_dropout_checkpoint(attn_dropout_checkpoint), + _normalize_invertible(normalize_invertible), + _gelu_checkpoint(gelu_checkpoint), + _stochastic_mode(stochastic_mode), + _stream(TrainingContext::Instance().GetCurrentStream()), + _cublasHandle(TrainingContext::Instance().GetCublasHandle()), + _qkv_linear(typename FeedForward::Config(batch_size * seq_length, + 3 * hidden_size, + hidden_size, + gemm_algos[0])), + _attn_out_linear(typename FeedForward::Config(batch_size * seq_length, + hidden_size, + hidden_size, + gemm_algos[0])), + _attn_layer_norm(typename Normalize_Layer::Config(batch_size, + seq_length, + hidden_size, + layer_norm_eps, + true, + !normalize_invertible)), + _layer_norm(typename Normalize_Layer::Config(batch_size, + seq_length, + hidden_size, + layer_norm_eps, + true, + !normalize_invertible)), + _ff1(typename FeedForward::Config(batch_size * seq_length, + _intermediate_size, + hidden_size, + gemm_algos[1])), + _ff2(typename FeedForward::Config(batch_size * seq_length, + hidden_size, + _intermediate_size, + gemm_algos[2])), + _softmax(typename Softmax::Config(batch_size, num_heads, seq_length)), + _gelu(typename Gelu::Config(_intermediate_size)), + _attn_prob_dropout(typename Dropout::Config(attn_prob_dropout_ratio, _seq_length)), + _attn_output_dropout(typename Dropout::Config(hidden_output_dropout_ratio, _hidden_size)), + _layer_output_dropout(typename Dropout::Config(hidden_output_dropout_ratio, _hidden_size)), + _attn_scores(typename StridedBatchGemm::Config(_batch_size * _heads, + _seq_length, + _seq_length, + _hidden_size / _heads, + (T(1.0) / T(sqrt(_hidden_size / _heads))), + T(0.0), + CUBLAS_OP_T, + CUBLAS_OP_N, + gemm_algos[3])), + _attn_context(typename StridedBatchGemm::Config(_batch_size * _heads, + _hidden_size / _heads, + _seq_length, + _seq_length, + T(1.0), + T(0.0), + CUBLAS_OP_N, + CUBLAS_OP_N, + gemm_algos[4])) +{ + assert(_hidden_size % _heads == 0); + + Initialize(); +} + +template +BertTransformerLayer::~BertTransformerLayer() +{ +} + +template +void BertTransformerLayer::Initialize() +{ +#ifndef __HIP_PLATFORM_AMD__ + if (std::is_same::value) cublasSetMathMode(_cublasHandle, CUBLAS_TENSOR_OP_MATH); +#endif +} + +template +void BertTransformerLayer::Forward(unsigned bsz, + const T* input_ptr, + const T* input_mask_ptr, + const T* attn_qkvw_ptr, + const T* attn_qkvb_ptr, + const T* attn_ow_ptr, + const T* attn_ob_ptr, + const T* attn_nw_ptr, + const T* attn_nb_ptr, + const T* inter_w_ptr, + const T* inter_b_ptr, + const T* output_w_ptr, + const T* output_b_ptr, + const T* norm_w_ptr, + const T* norm_b_ptr, + T* out_ptr, + T* inp_norm_ptr, + T* q_tf_ptr, + T* k_tf_ptr, + T* v_tf_ptr, + T* soft_out_ptr, + T* ctx_bufB_ptr, + T* attn_o_inp_ptr, + T* add_res_ptr, + T* ff1_inp_ptr, + T* gelu_inp_ptr, + T* ff2_inp_ptr) +{ + cublasSetStream(_cublasHandle, _stream); + + if (!_stochastic_mode) cudaStreamSynchronize(_stream); + + T* workspace = static_cast(TrainingContext::Instance().GetWorkSpace()); + size_t small_buf_size = bsz * _seq_length * _hidden_size; + T* buf_0 = workspace; + T* buf_1 = buf_0 + small_buf_size; + T* buf_2 = buf_1; + + if (_normalize_invertible) { + add_res_ptr = buf_1 + 3 * small_buf_size; + buf_2 = add_res_ptr; + } + if (_gelu_checkpoint) buf_2 += small_buf_size; + if (_attn_dropout_checkpoint) + ctx_bufB_ptr = + (_gelu_checkpoint ? (buf_2 + (_intermediate_size / _hidden_size) * small_buf_size) + : (buf_1 + 4 * small_buf_size)); + + int bsz_seq = bsz * _seq_length; + + if (_pre_or_postLayerNorm) { + if (_layer_norm.UseMean()) + _layer_norm.ForwardCheckpoint( + bsz_seq, inp_norm_ptr, input_ptr, norm_w_ptr, norm_b_ptr, _stream, true); + + else + _layer_norm.Forward( + bsz_seq, inp_norm_ptr, input_ptr, norm_w_ptr, norm_b_ptr, _stream, true); + } + + if (_pre_or_postLayerNorm) + _qkv_linear.Forward(bsz_seq, inp_norm_ptr, attn_qkvw_ptr, buf_0, _cublasHandle); + else + _qkv_linear.Forward(bsz_seq, input_ptr, attn_qkvw_ptr, buf_0, _cublasHandle); + + launch_bias_add_transform_0213( + q_tf_ptr, buf_0, attn_qkvb_ptr, bsz, _seq_length, _hidden_size, _heads, _stream, 3); + + int bsz_heads = bsz * _heads; + + // attention scores + _attn_scores.Forward(bsz_heads, soft_out_ptr, k_tf_ptr, q_tf_ptr, _cublasHandle); + + // Softmax + Mask + _softmax.Forward(bsz, soft_out_ptr, input_mask_ptr, _stream); + + // attn prob dropout. + _attn_prob_dropout.Forward(bsz_heads * _seq_length, ctx_bufB_ptr, soft_out_ptr, _stream); + + // attention context + _attn_context.Forward(bsz_heads, buf_1, v_tf_ptr, ctx_bufB_ptr, _cublasHandle); + + launch_transform4d_0213( + attn_o_inp_ptr, buf_1, bsz, _heads, _seq_length, _hidden_size, _stream, 1); + + if (_pre_or_postLayerNorm) + _attn_out_linear.Forward(bsz_seq, attn_o_inp_ptr, attn_ow_ptr, buf_1, _cublasHandle); + else + _attn_out_linear.Forward(bsz_seq, attn_o_inp_ptr, attn_ow_ptr, ff1_inp_ptr, _cublasHandle); + + // attn output dropout. + if (_pre_or_postLayerNorm) + _attn_output_dropout.ForwardWithBias( + bsz_seq, add_res_ptr, buf_1, input_ptr, attn_ob_ptr, _stream); + else + _attn_output_dropout.ForwardWithBias( + bsz_seq, add_res_ptr, ff1_inp_ptr, input_ptr, attn_ob_ptr, _stream); + + if (_pre_or_postLayerNorm) { + if (_attn_layer_norm.UseMean()) + _attn_layer_norm.ForwardCheckpoint( + bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); + else + _attn_layer_norm.Forward( + bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); + } else { + if (_attn_layer_norm.UseMean()) + _attn_layer_norm.ForwardCheckpoint( + bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); + else + _attn_layer_norm.Forward( + bsz_seq, ff1_inp_ptr, add_res_ptr, attn_nw_ptr, attn_nb_ptr, _stream, true); + } + + _ff1.Forward(bsz_seq, + ff1_inp_ptr, + inter_w_ptr, + (_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr), + _cublasHandle); + + _gelu.ForwardWithBiasAdd(bsz_seq, + (_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr), + inter_b_ptr, + (_gelu_checkpoint ? buf_2 : ff2_inp_ptr), + _stream); + + _ff2.Forward( + bsz_seq, (_gelu_checkpoint ? buf_2 : ff2_inp_ptr), output_w_ptr, out_ptr, _cublasHandle); + + // layer output dropout. + if (_pre_or_postLayerNorm) + _layer_output_dropout.ForwardWithBias( + bsz_seq, out_ptr, out_ptr, add_res_ptr, output_b_ptr, _stream); + else + _layer_output_dropout.ForwardWithBias( + bsz_seq, inp_norm_ptr, out_ptr, ff1_inp_ptr, output_b_ptr, _stream); + + if (!_pre_or_postLayerNorm) { + if (_layer_norm.UseMean()) + _layer_norm.ForwardCheckpoint( + bsz_seq, out_ptr, inp_norm_ptr, norm_w_ptr, norm_b_ptr, _stream, true); + else + _layer_norm.Forward( + bsz_seq, out_ptr, inp_norm_ptr, norm_w_ptr, norm_b_ptr, _stream, true); + } +} + +template +void BertTransformerLayer::Backward(unsigned bsz, + const T* grad_output_ptr, + const T* input_ptr, + const T* output_ptr, + const T* inp_norm_ptr, + const T* q_tf_ptr, + const T* k_tf_ptr, + const T* v_tf_ptr, + const T* soft_out_ptr, + const T* ctx_bufB_ptr, + const T* attn_o_inp_ptr, + const T* add_res_ptr, + const T* ff1_inp_ptr, + const T* gelu_inp_ptr, + const T* ff2_inp_ptr, + const T* input_mask_ptr, + const T* attn_qkvw_ptr, + const T* attn_ow_ptr, + const T* attn_nw_ptr, + const T* attn_nb_ptr, + const T* inter_w_ptr, + const T* inter_b_ptr, + const T* output_w_ptr, + const T* norm_w_ptr, + const T* norm_b_ptr, + + T* grad_input_ptr, + T* grad_attn_qkvw_ptr, + T* grad_attn_qkvb_ptr, + T* grad_attn_ow_ptr, + T* grad_attn_ob_ptr, + T* grad_attn_nw_ptr, + T* grad_attn_nb_ptr, + T* grad_inter_w_ptr, + T* grad_inter_b_ptr, + T* grad_output_w_ptr, + T* grad_output_b_ptr, + T* grad_norm_w_ptr, + T* grad_norm_b_ptr) +{ + cublasSetStream(_cublasHandle, _stream); + + if (!_stochastic_mode) cudaStreamSynchronize(_stream); + + T* workspace = static_cast(TrainingContext::Instance().GetWorkSpace()); + size_t small_buf_size = bsz * _seq_length * _hidden_size; + T* buf_0 = workspace; + T* buf_1 = buf_0 + small_buf_size; + T* buf_2 = buf_1 + small_buf_size; + T* buf_3 = buf_2 + small_buf_size; + + T* ff2_buf = (_gelu_checkpoint ? buf_3 + (bsz * _seq_length * _intermediate_size) + : buf_3 + small_buf_size); + T* ctx_bufB_ptr_recomp = ff2_buf + (_seq_length * _seq_length * bsz * _heads); + + cudaStream_t streams[2] = {_stream, _stream}; + + int bsz_seq = bsz * _seq_length; + int bsz_heads = bsz * _heads; + + if (!_pre_or_postLayerNorm) { + if (_layer_norm.UseMean()) + _layer_norm.Backward(bsz_seq, + grad_output_ptr, + norm_w_ptr, + grad_norm_w_ptr, + grad_norm_b_ptr, + streams, + buf_1, + inp_norm_ptr); + + else + _layer_norm.Backward(bsz_seq, + grad_output_ptr, + norm_w_ptr, + norm_b_ptr, + grad_norm_w_ptr, + grad_norm_b_ptr, + streams, + buf_1, + output_ptr); + } + + if (_pre_or_postLayerNorm) + _layer_output_dropout.Backward(bsz_seq, buf_0, grad_output_ptr, _stream); + else + _layer_output_dropout.Backward(bsz_seq, buf_0, buf_1, _stream); + + const T* layer_dropout_buf = _layer_output_dropout.HasDropout() + ? buf_0 + : (_pre_or_postLayerNorm ? grad_output_ptr : buf_1); + + if (_gelu_checkpoint) + _gelu.ForwardWithBiasAdd(bsz_seq, ff2_inp_ptr, inter_b_ptr, buf_2, _stream); + _ff2.Backward(bsz_seq, + layer_dropout_buf, + (_gelu_checkpoint ? buf_2 : ff2_inp_ptr), + output_w_ptr, + grad_output_w_ptr, + grad_output_b_ptr, + _cublasHandle, + _stream, + ff2_buf); + + _gelu.Backward( + bsz_seq, ff2_buf, (_gelu_checkpoint ? ff2_inp_ptr : gelu_inp_ptr), inter_b_ptr, _stream); + + _ff1.Backward(bsz_seq, + ff2_buf, + ff1_inp_ptr, + inter_w_ptr, + grad_inter_w_ptr, + grad_inter_b_ptr, + _cublasHandle, + _stream, + buf_3); + + if (!_pre_or_postLayerNorm) + launch_fused_add2(buf_2, buf_3, buf_1, bsz, _seq_length, _hidden_size, _stream); + + if (_pre_or_postLayerNorm) { + if (_attn_layer_norm.UseMean()) + _attn_layer_norm.BackwardFusedAdd(bsz_seq, + buf_3, + grad_output_ptr, + attn_nw_ptr, + grad_attn_nw_ptr, + grad_attn_nb_ptr, + streams, + buf_0, + add_res_ptr); + + else + _attn_layer_norm.BackwardFusedAdd(bsz_seq, + buf_3, + grad_output_ptr, + attn_nw_ptr, + attn_nb_ptr, + grad_attn_nw_ptr, + grad_attn_nb_ptr, + streams, + buf_0, + ff1_inp_ptr); + } else { + if (_attn_layer_norm.UseMean()) + _attn_layer_norm.Backward(bsz_seq, + buf_2, + attn_nw_ptr, + grad_attn_nw_ptr, + grad_attn_nb_ptr, + streams, + buf_0, + add_res_ptr); + + else + _attn_layer_norm.Backward(bsz_seq, + buf_2, + attn_nw_ptr, + attn_nb_ptr, + grad_attn_nw_ptr, + grad_attn_nb_ptr, + streams, + buf_0, + ff1_inp_ptr); + } + + _attn_output_dropout.Backward(bsz_seq, buf_2, buf_0, _stream); + + T* attn_output_dropout_buf = _attn_output_dropout.HasDropout() ? buf_2 : buf_0; + + _attn_out_linear.Backward(bsz_seq, + attn_output_dropout_buf, + attn_o_inp_ptr, + attn_ow_ptr, + grad_attn_ow_ptr, + grad_attn_ob_ptr, + _cublasHandle, + _stream, + buf_1); + + launch_transform_0213(buf_2, buf_1, bsz, _seq_length, _hidden_size, _heads, _stream); + + if (_attn_prob_dropout.HasDropout()) { + if (_attn_dropout_checkpoint) + _attn_prob_dropout.Forward( + bsz_heads * _seq_length, ctx_bufB_ptr_recomp, soft_out_ptr, _stream, true); + + _attn_context.Backward(bsz_heads, + buf_2, + v_tf_ptr, + (_attn_dropout_checkpoint ? ctx_bufB_ptr_recomp : ctx_bufB_ptr), + _cublasHandle, + buf_3, + ff2_buf); + } else + _attn_context.Backward( + bsz_heads, buf_2, v_tf_ptr, soft_out_ptr, _cublasHandle, buf_3, ff2_buf); + + _attn_prob_dropout.Backward(bsz_heads * _seq_length, ff2_buf, _stream); + + _softmax.Backward(bsz, ff2_buf, soft_out_ptr, _stream); + + _attn_scores.Backward(bsz_heads, ff2_buf, k_tf_ptr, q_tf_ptr, _cublasHandle, buf_2, buf_1); + + launch_transform4d_0213(ff2_buf, buf_1, bsz, _heads, _seq_length, _hidden_size, _stream, 3); + + if (_pre_or_postLayerNorm) + _qkv_linear.Backward(bsz_seq, + ff2_buf, + inp_norm_ptr, + attn_qkvw_ptr, + grad_attn_qkvw_ptr, + grad_attn_qkvb_ptr, + _cublasHandle, + _stream, + buf_2); + else + _qkv_linear.Backward(bsz_seq, + ff2_buf, + input_ptr, + attn_qkvw_ptr, + grad_attn_qkvw_ptr, + grad_attn_qkvb_ptr, + _cublasHandle, + _stream, + buf_2); + + if (_pre_or_postLayerNorm) { + if (_layer_norm.UseMean()) + _layer_norm.BackwardFusedAdd(bsz_seq, + buf_2, + buf_0, + norm_w_ptr, + grad_norm_w_ptr, + grad_norm_b_ptr, + streams, + grad_input_ptr, + input_ptr); + + else + _layer_norm.BackwardFusedAdd(bsz_seq, + buf_2, + buf_0, + norm_w_ptr, + norm_b_ptr, + grad_norm_w_ptr, + grad_norm_b_ptr, + streams, + grad_input_ptr, + inp_norm_ptr); + } else + launch_fused_add2(grad_input_ptr, buf_2, buf_0, bsz, _seq_length, _hidden_size, _stream); +} + +template +void BertTransformerLayer::SetTrainingMode(bool training) +{ + // Dropout will be skipped when not in training model. + _attn_prob_dropout.SetTrainingMode(training); + _attn_output_dropout.SetTrainingMode(training); + _layer_output_dropout.SetTrainingMode(training); +} + +template +void BertTransformerLayer::SetIntermediateBuffers(uint8_t* attn_prob_dropout_mask_ptr, + uint8_t* attn_output_dropout_mask_ptr, + uint8_t* layer_output_dropout_mask_ptr, + T* attn_layer_norm_var, + T* attn_layer_norm_mean, + T* layer_norm_var, + T* layer_norm_mean) +{ + _attn_prob_dropout.SetMask(attn_prob_dropout_mask_ptr); + _attn_output_dropout.SetMask(attn_output_dropout_mask_ptr); + _layer_output_dropout.SetMask(layer_output_dropout_mask_ptr); + + _attn_layer_norm.SetVar(attn_layer_norm_var); + _attn_layer_norm.SetMean(attn_layer_norm_mean); + _layer_norm.SetVar(layer_norm_var); + _layer_norm.SetMean(layer_norm_mean); +} + +template +void BertTransformerLayer::SetSeqLength(unsigned seq_len) +{ + _seq_length = seq_len; + + _softmax.SetSeqLength(_seq_length); + _attn_prob_dropout.SetDimension(_seq_length); + _attn_scores.SetConfig(_seq_length, _seq_length, _hidden_size / _heads); + _attn_context.SetConfig(_hidden_size / _heads, _seq_length, _seq_length); +} + +template +int create_transformer_layer(unsigned layer_id, + unsigned batch_size, + unsigned hidden_dim, + unsigned num_heads, + unsigned intermediate_size, + float attn_dropout_ratio, + float hidden_dropout_ratio, + float layer_norm_eps, + int seed, + bool pre_or_postLayerNorm, + bool test_gemm, + bool attn_dropout_checkpoint, + bool normalize_invertible, + bool gelu_checkpoint, + bool stochastic_mode) +{ + TrainingContext::Instance().SetSeed(seed); + TrainingContext::Instance().TestGemmFP16( + test_gemm, batch_size, init_seq_length, num_heads, hidden_dim / num_heads); + + auto layer = + std::make_shared>(layer_id, + batch_size, + hidden_dim, + num_heads, + intermediate_size, + init_seq_length, + attn_dropout_ratio, + hidden_dropout_ratio, + layer_norm_eps, + pre_or_postLayerNorm, + TrainingContext::Instance().GetGemmAlgos(), + attn_dropout_checkpoint, + normalize_invertible, + gelu_checkpoint, + stochastic_mode); + + s_transformer_layers[layer_id] = layer; + + std::string dtype = (std::is_same::value) ? "half" : "float"; + + std::cout << "layer #" << layer_id << " is created with date type [" << dtype << "]." + << std::endl; + + return 0; +} + +template +std::vector ds_transformer_forward(unsigned layer_id, + const torch::Tensor& input, + const torch::Tensor& input_mask, + const torch::Tensor& attn_qkvw, + const torch::Tensor& attn_qkvb, + const torch::Tensor& attn_ow, + const torch::Tensor& attn_ob, + const torch::Tensor& attn_nw, + const torch::Tensor& attn_nb, + const torch::Tensor& inter_w, + const torch::Tensor& inter_b, + const torch::Tensor& output_w, + const torch::Tensor& output_b, + const torch::Tensor& norm_w, + const torch::Tensor& norm_b, + bool training_mode, + bool prelayernorm, + bool attn_dropout_checkpoint, + bool normalize_invertible, + bool gelu_checkpoint) +{ + CHECK_INPUT(input); + CHECK_INPUT(input_mask); + CHECK_INPUT(attn_qkvw); + CHECK_INPUT(attn_qkvb); + CHECK_INPUT(attn_ow); + CHECK_INPUT(attn_ob); + CHECK_INPUT(attn_nw); + CHECK_INPUT(attn_nb); + CHECK_INPUT(inter_w); + CHECK_INPUT(inter_b); + CHECK_INPUT(output_w); + CHECK_INPUT(output_b); + CHECK_INPUT(norm_w); + CHECK_INPUT(norm_b); + + unsigned bsz = input.size(0); + + const T* input_ptr = (const T*)input.data_ptr(); + const T* input_mask_ptr = (const T*)input_mask.data_ptr(); + const T* attn_qkvw_ptr = (const T*)attn_qkvw.data_ptr(); + const T* attn_qkvb_ptr = (const T*)attn_qkvb.data_ptr(); + const T* attn_ow_ptr = (const T*)attn_ow.data_ptr(); + const T* attn_ob_ptr = (const T*)attn_ob.data_ptr(); + const T* attn_nw_ptr = (const T*)attn_nw.data_ptr(); + const T* attn_nb_ptr = (const T*)attn_nb.data_ptr(); + const T* inter_w_ptr = (const T*)inter_w.data_ptr(); + const T* inter_b_ptr = (const T*)inter_b.data_ptr(); + const T* output_w_ptr = (const T*)output_w.data_ptr(); + const T* output_b_ptr = (const T*)output_b.data_ptr(); + const T* norm_w_ptr = (const T*)norm_w.data_ptr(); + const T* norm_b_ptr = (const T*)norm_b.data_ptr(); + + auto output = torch::empty_like(input); + T* out_ptr = (T*)output.data_ptr(); + + auto options = torch::TensorOptions() + .dtype(input.options().dtype()) + .layout(torch::kStrided) + .device(torch::kCUDA) + .requires_grad(true); + + auto uint8_options = torch::TensorOptions() + .dtype(torch::kInt8) + .layout(torch::kStrided) + .device(torch::kCUDA) + .requires_grad(false); + + std::shared_ptr> layer = + std::static_pointer_cast>(s_transformer_layers[layer_id]); + + unsigned seq_len = layer->GetSeqLength(); + if (input.size(1) != seq_len) { + seq_len = input.size(1); + layer->SetSeqLength(seq_len); + } + + auto workspace = torch::empty({get_workspace_size(bsz, + seq_len, + layer->GetHiddenSize(), + layer->GetIntermediateSize(), + layer->GetNumHeads(), + layer->IsTrainingMode(), + layer->GeluCheckpoint())}, + options); + TrainingContext::Instance().SetWorkSpace((T*)workspace.data_ptr()); + + auto inp_norm = ((prelayernorm || !normalize_invertible) ? torch::empty_like(input) : output); + auto add_res = (normalize_invertible ? inp_norm : torch::empty_like(input)); + auto attn_o_inp = torch::empty_like(input); + auto qkv_tf = torch::empty({(bsz * seq_len), output_w.size(0) * 3}, options); + + auto attn_prob_dropout_mask = + torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, uint8_options); + auto attn_output_dropout_mask = + torch::empty({(bsz * seq_len), layer->GetHiddenSize()}, uint8_options); + auto layer_output_dropout_mask = + torch::empty({(bsz * seq_len), layer->GetHiddenSize()}, uint8_options); + + auto attn_layer_norm_var = torch::empty({(bsz * seq_len)}, options); + auto attn_layer_norm_mean = torch::empty({(bsz * seq_len)}, options); + auto layer_norm_var = torch::empty({(bsz * seq_len)}, options); + auto layer_norm_mean = torch::empty({(bsz * seq_len)}, options); + + T* inp_norm_ptr = (T*)inp_norm.data_ptr(); + T* add_res_ptr = (T*)add_res.data_ptr(); + T* q_tf_ptr = (T*)qkv_tf.data_ptr(); + T* k_tf_ptr = q_tf_ptr + (bsz * seq_len * output_w.size(0)); //(T*)k_tf.data_ptr(); + T* v_tf_ptr = k_tf_ptr + (bsz * seq_len * output_w.size(0)); //(T*)v_tf.data_ptr(); + T* attn_o_inp_ptr = (T*)attn_o_inp.data_ptr(); + + torch::Tensor ff2_inp = torch::empty({(bsz * seq_len), output_w.size(1)}, options); + torch::Tensor gelu_inp = + (gelu_checkpoint ? ff2_inp : torch::empty({(bsz * seq_len), output_w.size(1)}, options)); + auto ff1_inp = torch::empty_like(input); + T* ff2_inp_ptr = (T*)ff2_inp.data_ptr(); + T* gelu_inp_ptr = (T*)gelu_inp.data_ptr(); + T* ff1_inp_ptr = (T*)ff1_inp.data_ptr(); + + torch::Tensor soft_out = + torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, options); + torch::Tensor ctx_bufB = + (attn_dropout_checkpoint + ? soft_out + : torch::empty({(bsz * layer->GetNumHeads() * seq_len), seq_len}, options)); + T* soft_out_ptr = (T*)soft_out.data_ptr(); + T* ctx_bufB_ptr = (T*)ctx_bufB.data_ptr(); + + layer->SetTrainingMode(training_mode); + layer->SetIntermediateBuffers((uint8_t*)attn_prob_dropout_mask.data_ptr(), + (uint8_t*)attn_output_dropout_mask.data_ptr(), + (uint8_t*)layer_output_dropout_mask.data_ptr(), + (T*)attn_layer_norm_var.data_ptr(), + (T*)attn_layer_norm_mean.data_ptr(), + (T*)layer_norm_var.data_ptr(), + (T*)layer_norm_mean.data_ptr()); + + layer->Forward(bsz, + input_ptr, + input_mask_ptr, + attn_qkvw_ptr, + attn_qkvb_ptr, + attn_ow_ptr, + attn_ob_ptr, + attn_nw_ptr, + attn_nb_ptr, + inter_w_ptr, + inter_b_ptr, + output_w_ptr, + output_b_ptr, + norm_w_ptr, + norm_b_ptr, + out_ptr, + inp_norm_ptr, + q_tf_ptr, + k_tf_ptr, + v_tf_ptr, + soft_out_ptr, + ctx_bufB_ptr, + attn_o_inp_ptr, + add_res_ptr, + ff1_inp_ptr, + gelu_inp_ptr, + ff2_inp_ptr); + + return {output, + inp_norm, + qkv_tf, + soft_out, + ctx_bufB, + attn_o_inp, + add_res, + ff1_inp, + gelu_inp, + ff2_inp, + attn_prob_dropout_mask, + attn_output_dropout_mask, + layer_output_dropout_mask, + attn_layer_norm_var, + attn_layer_norm_mean, + layer_norm_var, + layer_norm_mean}; +} + +template +std::vector ds_transformer_backward(unsigned layer_id, + const torch::Tensor& grad_output, + const torch::Tensor& output, + const torch::Tensor& inp_norm, + const torch::Tensor& qkv_tf, + const torch::Tensor& soft_out, + const torch::Tensor& ctx_bufB, + const torch::Tensor& attn_o_inp, + const torch::Tensor& add_res, + const torch::Tensor& ff1_inp, + const torch::Tensor& gelu_inp, + const torch::Tensor& ff2_inp, + const torch::Tensor& attn_prob_dropout_mask, + const torch::Tensor& attn_output_dropout_mask, + const torch::Tensor& layer_output_dropout_mask, + const torch::Tensor& attn_layer_norm_var, + const torch::Tensor& attn_layer_norm_mean, + const torch::Tensor& layer_norm_var, + const torch::Tensor& layer_norm_mean, + const torch::Tensor& input, + const torch::Tensor& input_mask, + const torch::Tensor& attn_qkvw, + const torch::Tensor& attn_qkvb, + const torch::Tensor& attn_ow, + const torch::Tensor& attn_ob, + const torch::Tensor& attn_nw, + const torch::Tensor& attn_nb, + const torch::Tensor& inter_w, + const torch::Tensor& inter_b, + const torch::Tensor& output_w, + const torch::Tensor& output_b, + const torch::Tensor& norm_w, + const torch::Tensor& norm_b) +{ + auto g_output = grad_output.contiguous(); + CHECK_INPUT(g_output); + CHECK_INPUT(output); + CHECK_INPUT(inp_norm); + CHECK_INPUT(qkv_tf); + CHECK_INPUT(add_res); + CHECK_INPUT(soft_out); + CHECK_INPUT(ctx_bufB); + CHECK_INPUT(attn_o_inp); + CHECK_INPUT(ff1_inp); + CHECK_INPUT(gelu_inp); + CHECK_INPUT(ff2_inp); + CHECK_INPUT(input); + CHECK_INPUT(input_mask); + CHECK_INPUT(attn_qkvw); + CHECK_INPUT(attn_qkvb); + CHECK_INPUT(attn_ow); + CHECK_INPUT(attn_ob); + CHECK_INPUT(attn_nw); + CHECK_INPUT(attn_nb); + CHECK_INPUT(inter_w); + CHECK_INPUT(inter_b); + CHECK_INPUT(output_w); + CHECK_INPUT(output_b); + CHECK_INPUT(norm_w); + CHECK_INPUT(norm_b); + + unsigned bsz = g_output.size(0); + + std::shared_ptr> layer = + std::static_pointer_cast>(s_transformer_layers[layer_id]); + + unsigned seq_len = layer->GetSeqLength(); + if (g_output.size(1) != seq_len) { + seq_len = g_output.size(1); + layer->SetSeqLength(seq_len); + } + auto options = torch::TensorOptions() + .dtype(g_output.options().dtype()) + .layout(torch::kStrided) + .device(torch::kCUDA) + .requires_grad(true); + auto workspace = torch::empty({get_workspace_size(bsz, + seq_len, + layer->GetHiddenSize(), + layer->GetIntermediateSize(), + layer->GetNumHeads(), + layer->IsTrainingMode(), + layer->GeluCheckpoint())}, + options); + TrainingContext::Instance().SetWorkSpace((T*)workspace.data_ptr()); + + auto grad_input = torch::empty_like(input); + auto grad_attn_qkvw = torch::empty_like(attn_qkvw); + auto grad_attn_qkvb = torch::empty_like(attn_qkvb); + auto grad_attn_ow = torch::empty_like(attn_ow); + auto grad_attn_ob = torch::empty_like(attn_ob); + auto grad_attn_nw = torch::empty_like(attn_nw); + auto grad_attn_nb = torch::empty_like(attn_nb); + auto grad_inter_w = torch::empty_like(inter_w); + auto grad_inter_b = torch::empty_like(inter_b); + auto grad_output_w = torch::empty_like(output_w); + auto grad_output_b = torch::empty_like(output_b); + auto grad_norm_w = torch::empty_like(norm_w); + auto grad_norm_b = torch::empty_like(norm_b); + + // inputs. + const T* grad_output_ptr = (const T*)g_output.data_ptr(); + const T* input_ptr = (const T*)input.data_ptr(); + const T* output_ptr = (const T*)output.data_ptr(); + const T* inp_norm_ptr = (const T*)inp_norm.data_ptr(); + const T* q_tf_ptr = (const T*)qkv_tf.data_ptr(); + const T* add_res_ptr = (const T*)add_res.data_ptr(); + const T* k_tf_ptr = + q_tf_ptr + (bsz * layer->GetSeqLength() * output_w.size(0)); //(const T*)k_tf.data_ptr(); + const T* v_tf_ptr = + k_tf_ptr + (bsz * layer->GetSeqLength() * output_w.size(0)); //(const T*)v_tf.data_ptr(); + const T* ff1_inp_ptr = (const T*)ff1_inp.data_ptr(); + const T* gelu_inp_ptr = (const T*)gelu_inp.data_ptr(); + const T* ff2_inp_ptr = (const T*)ff2_inp.data_ptr(); + const T* ctx_bufB_ptr = (const T*)ctx_bufB.data_ptr(); + const T* soft_out_ptr = (const T*)soft_out.data_ptr(); + const T* attn_o_inp_ptr = (const T*)attn_o_inp.data_ptr(); + const T* input_mask_ptr = (const T*)input_mask.data_ptr(); + const T* attn_qkvw_ptr = (const T*)attn_qkvw.data_ptr(); + const T* attn_ow_ptr = (const T*)attn_ow.data_ptr(); + const T* attn_nw_ptr = (const T*)attn_nw.data_ptr(); + const T* attn_nb_ptr = (const T*)attn_nb.data_ptr(); + const T* inter_w_ptr = (const T*)inter_w.data_ptr(); + const T* inter_b_ptr = (const T*)inter_b.data_ptr(); + const T* output_w_ptr = (const T*)output_w.data_ptr(); + const T* norm_w_ptr = (const T*)norm_w.data_ptr(); + const T* norm_b_ptr = (const T*)norm_b.data_ptr(); + + // outputs. + T* grad_input_ptr = (T*)grad_input.data_ptr(); + T* grad_attn_qkvw_ptr = (T*)grad_attn_qkvw.data_ptr(); + T* grad_attn_qkvb_ptr = (T*)grad_attn_qkvb.data_ptr(); + T* grad_attn_ow_ptr = (T*)grad_attn_ow.data_ptr(); + T* grad_attn_ob_ptr = (T*)grad_attn_ob.data_ptr(); + T* grad_attn_nw_ptr = (T*)grad_attn_nw.data_ptr(); + T* grad_attn_nb_ptr = (T*)grad_attn_nb.data_ptr(); + T* grad_inter_w_ptr = (T*)grad_inter_w.data_ptr(); + T* grad_inter_b_ptr = (T*)grad_inter_b.data_ptr(); + T* grad_output_w_ptr = (T*)grad_output_w.data_ptr(); + T* grad_output_b_ptr = (T*)grad_output_b.data_ptr(); + T* grad_norm_w_ptr = (T*)grad_norm_w.data_ptr(); + T* grad_norm_b_ptr = (T*)grad_norm_b.data_ptr(); + + layer->SetIntermediateBuffers((uint8_t*)attn_prob_dropout_mask.data_ptr(), + (uint8_t*)attn_output_dropout_mask.data_ptr(), + (uint8_t*)layer_output_dropout_mask.data_ptr(), + (T*)attn_layer_norm_var.data_ptr(), + (T*)attn_layer_norm_mean.data_ptr(), + (T*)layer_norm_var.data_ptr(), + (T*)layer_norm_mean.data_ptr()); + + layer->Backward(bsz, + grad_output_ptr, + input_ptr, + output_ptr, + inp_norm_ptr, + q_tf_ptr, + k_tf_ptr, + v_tf_ptr, + soft_out_ptr, + ctx_bufB_ptr, + attn_o_inp_ptr, + add_res_ptr, + ff1_inp_ptr, + gelu_inp_ptr, + ff2_inp_ptr, + input_mask_ptr, + attn_qkvw_ptr, + attn_ow_ptr, + attn_nw_ptr, + attn_nb_ptr, + inter_w_ptr, + inter_b_ptr, + output_w_ptr, + norm_w_ptr, + norm_b_ptr, + + grad_input_ptr, + grad_attn_qkvw_ptr, + grad_attn_qkvb_ptr, + grad_attn_ow_ptr, + grad_attn_ob_ptr, + grad_attn_nw_ptr, + grad_attn_nb_ptr, + grad_inter_w_ptr, + grad_inter_b_ptr, + grad_output_w_ptr, + grad_output_b_ptr, + grad_norm_w_ptr, + grad_norm_b_ptr); + + return {grad_input, + grad_attn_qkvw, + grad_attn_qkvb, + grad_attn_ow, + grad_attn_ob, + grad_attn_nw, + grad_attn_nb, + grad_inter_w, + grad_inter_b, + grad_output_w, + grad_output_b, + grad_norm_w, + grad_norm_b}; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("forward_fp32", + &ds_transformer_forward, + "DeepSpeed Transformer forward with fp32 (CUDA)"); + m.def("forward_fp16", + &ds_transformer_forward<__half>, + "DeepSpeed Transformer forward with fp16 (CUDA)"); + m.def("backward_fp32", + &ds_transformer_backward, + "DeepSpeed Transformer backward with fp32 (CUDA)"); + m.def("backward_fp16", + &ds_transformer_backward<__half>, + "DeepSpeed Transformer backward with fp16 (CUDA)"); + m.def("create_transformer_layer_fp32", + &create_transformer_layer, + "Create DeepSpeed Transformer Transformer Layer with fp32 (CUDA)"); + m.def("create_transformer_layer_fp16", + &create_transformer_layer<__half>, + "Create DeepSpeed Transformer Transformer Layer with fp16 (CUDA)"); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/gelu_kernels.cu b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/gelu_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..273891b91923056fe3468600827f538c9cee5009 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/gelu_kernels.cu @@ -0,0 +1,335 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "custom_cuda_layers.h" + +inline __device__ float gelu(const float x) +{ + const float sqrt_param = 0.79788456080286535587989211986876f; + const float mul_param = 0.044715; + return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x))); +} + +inline __device__ float d_gelu(const float x) +{ + const float sqrt_param = 0.79788456080286535587989211986876f; + const float mul_param = 0.044715; + + float x2mul = x * x * mul_param; + float tan_h = tanhf(sqrt_param * (x + x * x2mul)); + float dg1 = 0.5f * (1.0f + tan_h); + float dg2 = x * 0.5f * sqrt_param * (1 - tan_h * tan_h); + float dg3 = dg2 * 3 * x2mul; + return (dg1 + dg2 + dg3); +} + +/* +Fused bias add with GELU + +Loads a vector of 4 elements each iteration, for stride +iterations. It was written with the intention to launch 256 thread +threadblocks, so to launch for bert-large, we would set ITERATIONS +to 4. This is currently done automatically as a heuristic, setting +the number of iterations as blocks of 1024. + +For FP16, the values are loaded from memory as __half, but converted +to FP32 for the arithmetic itself, to prevent numerous overflow on +the intermediate hyperbolic tangent, since there's no intrinsic +that computes it directly. +*/ + +__global__ void gelu_kernel(const float* input, float* vals, int row_stride, int iterations) +{ + int row = blockIdx.x; + int id = threadIdx.x; + int loop_stride = blockDim.x; + + const float4* input_cast = reinterpret_cast(input); + float4* vals_cast = reinterpret_cast(vals); + + for (int i = 0; i < iterations; i++) { + if (i * loop_stride + id < row_stride) { + float4 data = input_cast[row * row_stride + i * loop_stride + id]; + + data.x = gelu(data.x); + data.y = gelu(data.y); + data.z = gelu(data.z); + data.w = gelu(data.w); + + vals_cast[row * row_stride + i * loop_stride + id] = data; + } + } +} + +__global__ void gelu_kernel(const __half* input, __half* vals, int row_stride, int iterations) +{ +#ifdef HALF_PRECISION_AVAILABLE + int row = blockIdx.x; + int id = threadIdx.x; + int loop_stride = blockDim.x; + + const float2* input_cast = reinterpret_cast(input); + float2* vals_cast = reinterpret_cast(vals); + + for (int i = 0; i < iterations; i++) { + if (i * loop_stride + id < row_stride) { + float2 vals_vec = input_cast[row * row_stride + i * loop_stride + id]; + + __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); + + float2 low_data = __half22float2(vals_half[0]); + float2 high_data = __half22float2(vals_half[1]); + + low_data.x = gelu(low_data.x); + low_data.y = gelu(low_data.y); + high_data.x = gelu(high_data.x); + high_data.y = gelu(high_data.y); + + vals_half[0] = __float22half2_rn(low_data); + vals_half[1] = __float22half2_rn(high_data); + + vals_cast[row * row_stride + i * loop_stride + id] = vals_vec; + } + } +#endif +} + +__global__ void fused_bias_gelu(const float* input, + const float* bias, + float* vals, + int row_stride, + int iterations) +{ + int row = blockIdx.x; + int id = threadIdx.x; + int loop_stride = blockDim.x; + + const float4* input_cast = reinterpret_cast(input); + float4* vals_cast = reinterpret_cast(vals); + const float4* bias_cast = reinterpret_cast(bias); + + for (int i = 0; i < iterations; i++) { + if (i * loop_stride + id < row_stride) { + float4 data = input_cast[row * row_stride + i * loop_stride + id]; + float4 bias_data = bias_cast[i * loop_stride + id]; + + data.x += bias_data.x; + data.y += bias_data.y; + data.z += bias_data.z; + data.w += bias_data.w; + + data.x = gelu(data.x); + data.y = gelu(data.y); + data.z = gelu(data.z); + data.w = gelu(data.w); + + vals_cast[row * row_stride + i * loop_stride + id] = data; + } + } +} + +__global__ void fused_bias_gelu(const __half* input, + const __half* bias, + __half* vals, + int row_stride, + int iterations) +{ +#ifdef HALF_PRECISION_AVAILABLE + int row = blockIdx.x; + int id = threadIdx.x; + int loop_stride = blockDim.x; + + const float2* input_cast = reinterpret_cast(input); + float2* vals_cast = reinterpret_cast(vals); + const float2* bias_cast = reinterpret_cast(bias); + + for (int i = 0; i < iterations; i++) { + if (i * loop_stride + id < row_stride) { + float2 vals_vec = input_cast[row * row_stride + i * loop_stride + id]; + float2 bias_vec = bias_cast[i * loop_stride + id]; + + __half2* vals_half = reinterpret_cast<__half2*>(&vals_vec); + __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); + + float2 low_data = __half22float2(vals_half[0]); + float2 high_data = __half22float2(vals_half[1]); + + float2 low_bias = __half22float2(bias_half[0]); + float2 high_bias = __half22float2(bias_half[1]); + + low_data.x += low_bias.x; + low_data.y += low_bias.y; + high_data.x += high_bias.x; + high_data.y += high_bias.y; + + low_data.x = gelu(low_data.x); + low_data.y = gelu(low_data.y); + high_data.x = gelu(high_data.x); + high_data.y = gelu(high_data.y); + + vals_half[0] = __float22half2_rn(low_data); + vals_half[1] = __float22half2_rn(high_data); + + vals_cast[row * row_stride + i * loop_stride + id] = vals_vec; + } + } +#endif +} + +__global__ void d_gelu_func(float* d_output, + const float* gelu_input, + const float* bias, + int row_stride, + int iterations) +{ + int row = blockIdx.x; + int id = threadIdx.x; + int loop_stride = blockDim.x; + + float4* d_output_cast = reinterpret_cast(d_output); + const float4* gelu_input_cast = reinterpret_cast(gelu_input); + const float4* bias_cast = reinterpret_cast(bias); + + for (int i = 0; i < iterations; i++) { + if (i * loop_stride + id < row_stride) { + float4 output_data = d_output_cast[row * row_stride + i * loop_stride + id]; + float4 gelu_input_data = gelu_input_cast[row * row_stride + i * loop_stride + id]; + float4 bias_data = bias_cast[i * loop_stride + id]; + + gelu_input_data.x += bias_data.x; + gelu_input_data.y += bias_data.y; + gelu_input_data.z += bias_data.z; + gelu_input_data.w += bias_data.w; + + output_data.x *= d_gelu(gelu_input_data.x); + output_data.y *= d_gelu(gelu_input_data.y); + output_data.z *= d_gelu(gelu_input_data.z); + output_data.w *= d_gelu(gelu_input_data.w); + + d_output_cast[row * row_stride + i * loop_stride + id] = output_data; + } + } +} + +__global__ void d_gelu_func(__half* d_output, + const __half* gelu_input, + const __half* bias, + int row_stride, + int iterations) +{ +#ifdef HALF_PRECISION_AVAILABLE + int row = blockIdx.x; + int id = threadIdx.x; + int loop_stride = blockDim.x; + + float2* d_output_cast = reinterpret_cast(d_output); + const float2* gelu_input_cast = reinterpret_cast(gelu_input); + const float2* bias_cast = reinterpret_cast(bias); + +#pragma unroll + for (int i = 0; i < iterations; i++) { + if (i * loop_stride + id < row_stride) { + float2 output_data = d_output_cast[row * row_stride + i * loop_stride + id]; + float2 gelu_input_data = gelu_input_cast[row * row_stride + i * loop_stride + id]; + float2 bias_vec = bias_cast[i * loop_stride + id]; + + __half2* output_data_half = reinterpret_cast<__half2*>(&output_data); + __half2* gelu_input_data_half = reinterpret_cast<__half2*>(&gelu_input_data); + __half2* bias_half = reinterpret_cast<__half2*>(&bias_vec); + + float2 output_half_0 = __half22float2(output_data_half[0]); + float2 output_half_1 = __half22float2(output_data_half[1]); + + float2 gelu_input_half_0 = __half22float2(gelu_input_data_half[0]); + float2 gelu_input_half_1 = __half22float2(gelu_input_data_half[1]); + + float2 bias_half_0 = __half22float2(bias_half[0]); + float2 bias_half_1 = __half22float2(bias_half[1]); + + gelu_input_half_0.x += bias_half_0.x; + gelu_input_half_0.y += bias_half_0.y; + gelu_input_half_1.x += bias_half_1.x; + gelu_input_half_1.y += bias_half_1.y; + + output_half_0.x *= d_gelu(gelu_input_half_0.x); + output_half_0.y *= d_gelu(gelu_input_half_0.y); + output_half_1.x *= d_gelu(gelu_input_half_1.x); + output_half_1.y *= d_gelu(gelu_input_half_1.y); + + float2 result; + __half2* result_half2 = reinterpret_cast<__half2*>(&result); + + result_half2[0] = __float22half2_rn(output_half_0); + result_half2[1] = __float22half2_rn(output_half_1); + + d_output_cast[row * row_stride + i * loop_stride + id] = result; + } + } +#endif +} + +template +void launch_bias_gelu(const T* input, + const T* bias, + T* output, + int intermediate_size, + int batch_size, + cudaStream_t stream) +{ + int iterations = (intermediate_size + 1023) / 1024; + int threads = (intermediate_size - 1) / (iterations * 4) + 1; + dim3 block_dims(threads); + dim3 grid_dims(batch_size); + + fused_bias_gelu<<>>( + input, bias, output, intermediate_size / 4, iterations); +} + +template +void launch_gelu(const T* input, + T* output, + int intermediate_size, + int batch_size, + cudaStream_t stream) +{ + int iterations = (intermediate_size + 1023) / 1024; + int threads = (intermediate_size - 1) / (iterations * 4) + 1; + dim3 block_dims(threads); + dim3 grid_dims(batch_size); + + gelu_kernel<<>>( + input, output, intermediate_size / 4, iterations); +} + +template void launch_bias_gelu(const float*, const float*, float*, int, int, cudaStream_t); +template void launch_bias_gelu<__half>(const __half*, + const __half*, + __half*, + int, + int, + cudaStream_t); + +template void launch_gelu(const float*, float*, int, int, cudaStream_t); +template void launch_gelu<__half>(const __half*, __half*, int, int, cudaStream_t); + +template +void launch_d_gelu(T* d_output, + const T* input, + const T* bias, + int intermediate_size, + int batch_size, + cudaStream_t stream) +{ + int iterations = (intermediate_size + 1023) / 1024; + int threads = (intermediate_size - 1) / (iterations * 4) + 1; + dim3 block_dims(threads); + dim3 grid_dims(batch_size); + + d_gelu_func<<>>( + d_output, input, bias, intermediate_size / 4, iterations); +} + +template void launch_d_gelu(float*, const float*, const float*, int, int, cudaStream_t); +template void launch_d_gelu<__half>(__half*, const __half*, const __half*, int, int, cudaStream_t); diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/general_kernels.cu b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/general_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..a987eec5ef0bdc87153422eae635507f44188802 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/general_kernels.cu @@ -0,0 +1,416 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "general_kernels.h" + +namespace cg = cooperative_groups; + +template +__global__ void column_sum_reduce(const T* __restrict__ inp, + T* __restrict__ out, + int rows, + int width) +{ + __shared__ float tile[TILE_DIM][TILE_DIM + 1]; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + + int y_stride = width * TILE_DIM; + + float localSum = 0; + + // Loop across matrix height + if (idx < width) { + int offset = threadIdx.y * width + idx; + for (int r = threadIdx.y; r < rows; r += TILE_DIM) { + localSum += (float)inp[offset]; + offset += y_stride; + } + } + + tile[threadIdx.x][threadIdx.y] = localSum; + + __syncthreads(); + + // Sum the shared buffer. + float sum = tile[threadIdx.y][threadIdx.x]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < TILE_DIM; i <<= 1) sum += g.shfl_down(sum, i); + + if (threadIdx.x == 0) { + int pos = blockIdx.x * TILE_DIM + threadIdx.y; + if (pos < width) out[pos] = sum; + } +} + +template +void launch_fuse_transpose_bias_kernel(const T* inp, + T* out, + int rows, + int cols, + cudaStream_t stream); + +template <> +void launch_fuse_transpose_bias_kernel(const float* inp, + float* out, + int rows, + int cols, + cudaStream_t stream) +{ + // assert(rows % TILE_DIM == 0); + // assert(cols % TILE_DIM == 0); + + dim3 grid_dim((cols - 1) / TILE_DIM + 1); + dim3 block_dim(TILE_DIM, TILE_DIM); + + column_sum_reduce<<>>(inp, out, rows, cols); +} + +template <> +void launch_fuse_transpose_bias_kernel<__half>(const __half* inp, + __half* out, + int rows, + int cols, + cudaStream_t stream) +{ + // assert(rows % TILE_DIM == 0); + // assert(cols % TILE_DIM == 0); + + dim3 grid_dim((cols - 1) / TILE_DIM + 1); + dim3 block_dim(TILE_DIM, TILE_DIM); + + column_sum_reduce<__half><<>>(inp, out, rows, cols); +} + +__global__ void fused_add2_kernel(const int N, float* out, const float* inp1, const float* inp2) +{ + const float4* inp1_4 = reinterpret_cast(inp1); + const float4* inp2_4 = reinterpret_cast(inp2); + float4* out_4 = reinterpret_cast(out); + + CUDA_1D_KERNEL_LOOP(j, N) + { + float4 val; + float4 inp1_reg = inp1_4[j]; + float4 inp2_reg = inp2_4[j]; + + val.x = inp1_reg.x + inp2_reg.x; + val.y = inp1_reg.y + inp2_reg.y; + val.z = inp1_reg.z + inp2_reg.z; + val.w = inp1_reg.w + inp2_reg.w; + + out_4[j] = val; + } +} + +__global__ void fused_add2_kernel(const int N, __half* out, const __half* inp1, const __half* inp2) +{ + float2 inp1_4; + float2 inp2_4; + + __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); + __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); + + const float2* inp1_arr = reinterpret_cast(inp1); + const float2* inp2_arr = reinterpret_cast(inp2); + + CUDA_1D_KERNEL_LOOP(j, N) + { + inp1_4 = inp1_arr[j]; + inp2_4 = inp2_arr[j]; + + float2 inp1_h_f_0 = __half22float2(inp1_h[0]); + float2 inp1_h_f_1 = __half22float2(inp1_h[1]); + + float2 inp2_h_f_0 = __half22float2(inp2_h[0]); + float2 inp2_h_f_1 = __half22float2(inp2_h[1]); + + inp1_h_f_0.x += inp2_h_f_0.x; + inp1_h_f_0.y += inp2_h_f_0.y; + inp1_h_f_1.x += inp2_h_f_1.x; + inp1_h_f_1.y += inp2_h_f_1.y; + + float2 val_f; + __half2* val_h = reinterpret_cast<__half2*>(&val_f); + + val_h[0] = __float22half2_rn(inp1_h_f_0); + val_h[1] = __float22half2_rn(inp1_h_f_1); + + float2* out_4 = reinterpret_cast(out); + out_4[j] = val_f; + } +} + +template <> +void launch_fused_add2(float* out, + const float* inp1, + const float* inp2, + int batch_size, + int seq_length, + int hidden_dim, + cudaStream_t& stream) +{ + int total_count = batch_size * seq_length * hidden_dim / 4; + dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length); + + dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4); + + fused_add2_kernel<<>>(total_count, out, inp1, inp2); +} + +template <> +void launch_fused_add2<__half>(__half* out, + const __half* inp1, + const __half* inp2, + int batch_size, + int seq_length, + int hidden_dim, + cudaStream_t& stream) +{ + int total_count = batch_size * seq_length * hidden_dim / 4; + dim3 grid_dim = DS_GET_BLOCKS(total_count); //(batch_size * seq_length); + + dim3 block_dim = DS_CUDA_NUM_THREADS; //(hidden_dim / 4); + + fused_add2_kernel<<>>(total_count, out, inp1, inp2); +} + +__global__ void fused_add3_kernel(float* out, + const float* inp1, + const float* inp2, + const float* inp3, + int size, + int row_stride) +{ + int row = blockIdx.x; + int id = threadIdx.x; + + const float4* inp1_4 = reinterpret_cast(inp1); + const float4* inp2_4 = reinterpret_cast(inp2); + const float4* inp3_4 = reinterpret_cast(inp3); + + float4* out_4 = reinterpret_cast(out); + + float4 val; + float4 inp1_reg = inp1_4[row * row_stride + id]; + float4 inp2_reg = inp2_4[row * row_stride + id]; + float4 inp3_reg = inp3_4[row * row_stride + id]; + + val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x; + val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y; + val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z; + val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w; + + out_4[row * row_stride + id] = val; +} + +__global__ void fused_add3_kernel(__half* out, + const __half* inp1, + const __half* inp2, + const __half* inp3, + int size, + int row_stride) +{ + int row = blockIdx.x; + int id = threadIdx.x; + const float2* inp1_arr = reinterpret_cast(inp1); + const float2* inp2_arr = reinterpret_cast(inp2); + const float2* inp3_arr = reinterpret_cast(inp3); + + float2 inp1_4 = inp1_arr[row * row_stride + id]; + float2 inp2_4 = inp2_arr[row * row_stride + id]; + float2 inp3_4 = inp3_arr[row * row_stride + id]; + + __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); + __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); + __half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4); + + float2 inp1_h_f_0 = __half22float2(inp1_h[0]); + float2 inp1_h_f_1 = __half22float2(inp1_h[1]); + + float2 inp2_h_f_0 = __half22float2(inp2_h[0]); + float2 inp2_h_f_1 = __half22float2(inp2_h[1]); + + float2 inp3_h_f_0 = __half22float2(inp3_h[0]); + float2 inp3_h_f_1 = __half22float2(inp3_h[1]); + + inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x); + inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y); + inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x); + inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y); + + float2 val_f; + __half2* val_h = reinterpret_cast<__half2*>(&val_f); + + val_h[0] = __float22half2_rn(inp1_h_f_0); + val_h[1] = __float22half2_rn(inp1_h_f_1); + + float2* out_4 = reinterpret_cast(out); + out_4[row * row_stride + id] = val_f; +} + +template <> +void launch_fused_add3(float* out, + const float* inp1, + const float* inp2, + const float* inp3, + int batch_size, + int seq_length, + int hidden_size, + cudaStream_t& stream) +{ + dim3 grid_dim(batch_size * seq_length); + + dim3 block_dim(hidden_size / 4); + + fused_add3_kernel<<>>( + out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4); +} + +template <> +void launch_fused_add3<__half>(__half* out, + const __half* inp1, + const __half* inp2, + const __half* inp3, + int batch_size, + int seq_length, + int hidden_size, + cudaStream_t& stream) +{ + dim3 grid_dim(batch_size * seq_length); + + dim3 block_dim(hidden_size / 4); + + fused_add3_kernel<<>>( + out, inp1, inp2, inp3, (batch_size * seq_length * hidden_size), hidden_size / 4); +} + +__global__ void fused_add4_kernel(float* out, + const float* inp1, + const float* inp2, + const float* inp3, + const float* inp4, + int size, + int row_stride) +{ + int row = blockIdx.x; + int id = threadIdx.x; + + const float4* inp1_4 = reinterpret_cast(inp1); + const float4* inp2_4 = reinterpret_cast(inp2); + const float4* inp3_4 = reinterpret_cast(inp3); + const float4* inp4_4 = reinterpret_cast(inp4); + float4* out_4 = reinterpret_cast(out); + + float4 val; + float4 inp1_reg = inp1_4[row * row_stride + id]; + float4 inp2_reg = inp2_4[row * row_stride + id]; + float4 inp3_reg = inp3_4[row * row_stride + id]; + float4 inp4_reg = inp4_4[row * row_stride + id]; + + val.x = inp1_reg.x + inp2_reg.x + inp3_reg.x + inp4_reg.x; + val.y = inp1_reg.y + inp2_reg.y + inp3_reg.y + inp4_reg.y; + val.z = inp1_reg.z + inp2_reg.z + inp3_reg.z + inp4_reg.z; + val.w = inp1_reg.w + inp2_reg.w + inp3_reg.w + inp4_reg.w; + + out_4[row * row_stride + id] = val; +} + +__global__ void fused_add4_kernel(__half* out, + const __half* inp1, + const __half* inp2, + const __half* inp3, + const __half* inp4, + int size, + int row_stride) +{ + int row = blockIdx.x; + int id = threadIdx.x; + const float2* inp1_arr = reinterpret_cast(inp1); + const float2* inp2_arr = reinterpret_cast(inp2); + const float2* inp3_arr = reinterpret_cast(inp3); + const float2* inp4_arr = reinterpret_cast(inp4); + + float2 inp1_4 = inp1_arr[row * row_stride + id]; + float2 inp2_4 = inp2_arr[row * row_stride + id]; + float2 inp3_4 = inp3_arr[row * row_stride + id]; + float2 inp4_4 = inp4_arr[row * row_stride + id]; + + __half2* inp1_h = reinterpret_cast<__half2*>(&inp1_4); + __half2* inp2_h = reinterpret_cast<__half2*>(&inp2_4); + __half2* inp3_h = reinterpret_cast<__half2*>(&inp3_4); + __half2* inp4_h = reinterpret_cast<__half2*>(&inp4_4); + + float2 inp1_h_f_0 = __half22float2(inp1_h[0]); + float2 inp1_h_f_1 = __half22float2(inp1_h[1]); + + float2 inp2_h_f_0 = __half22float2(inp2_h[0]); + float2 inp2_h_f_1 = __half22float2(inp2_h[1]); + + float2 inp3_h_f_0 = __half22float2(inp3_h[0]); + float2 inp3_h_f_1 = __half22float2(inp3_h[1]); + + float2 inp4_h_f_0 = __half22float2(inp4_h[0]); + float2 inp4_h_f_1 = __half22float2(inp4_h[1]); + + inp1_h_f_0.x += (inp2_h_f_0.x + inp3_h_f_0.x + inp4_h_f_0.x); + inp1_h_f_0.y += (inp2_h_f_0.y + inp3_h_f_0.y + inp4_h_f_0.y); + inp1_h_f_1.x += (inp2_h_f_1.x + inp3_h_f_1.x + inp4_h_f_1.x); + inp1_h_f_1.y += (inp2_h_f_1.y + inp3_h_f_1.y + inp4_h_f_1.y); + + float2 val_f; + __half2* val_h = reinterpret_cast<__half2*>(&val_f); + + val_h[0] = __float22half2_rn(inp1_h_f_0); + val_h[1] = __float22half2_rn(inp1_h_f_1); + + float2* out_4 = reinterpret_cast(out); + out_4[row * row_stride + id] = val_f; +} + +template <> +void launch_fused_add4(float* out, + const float* inp1, + const float* inp2, + const float* inp3, + const float* inp4, + int batch_size, + int seq_length, + int hidden_size, + cudaStream_t& stream) +{ + dim3 grid_dim(batch_size * seq_length); + + dim3 block_dim(hidden_size / 4); + + fused_add4_kernel<<>>( + out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4); +} + +template <> +void launch_fused_add4<__half>(__half* out, + const __half* inp1, + const __half* inp2, + const __half* inp3, + const __half* inp4, + int batch_size, + int seq_length, + int hidden_size, + cudaStream_t& stream) +{ + dim3 grid_dim(batch_size * seq_length); + + dim3 block_dim(hidden_size / 4); + + fused_add4_kernel<<>>( + out, inp1, inp2, inp3, inp4, (batch_size * seq_length * hidden_size), hidden_size / 4); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu new file mode 100644 index 0000000000000000000000000000000000000000..a06dbb48fd332c8cd04b5c96407e5efa0431178f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/apply_rotary_pos_emb.cu @@ -0,0 +1,199 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "conversion_utils.h" +#ifdef __HIP_PLATFORM_AMD__ +#include "hip/hip_cooperative_groups.h" +#else +#include "cooperative_groups.h" +#endif +#include "ds_kernel_utils.h" +#include "inference_cuda_layers.h" +#include "memory_access_utils.h" + +#ifndef __HIP_PLATFORM_AMD__ +#include +#endif + +namespace cg = cooperative_groups; + +namespace rot_half { +constexpr int threads = 256; +} // namespace rot_half + +template +__global__ void apply_rotary_pos_half(T* mixed_query, + T* key_layer, + unsigned rotary_dim, + unsigned seq_len, + unsigned seq_offset, + unsigned num_heads, + unsigned head_size, + unsigned total_count, + float rope_theta, + int max_out_tokens) +{ + constexpr int T_per_thread = granularity / sizeof(T); + constexpr int heads_per_block = rot_half::threads / threadsPerHead; + + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile head_group = cg::tiled_partition(tb); + + const int head_idx = blockIdx.x * heads_per_block + threadIdx.x / threadsPerHead; + const int cur_seq_idx = head_idx % seq_len; + const int offset = head_idx * head_size; + const int k_offset = (cur_seq_idx + (head_idx / seq_len) * max_out_tokens) * head_size; + + const int seq_idx = cur_seq_idx + seq_offset; + const int half_dim = rotary_dim >> 1; + const int half_dim_threads = half_dim / T_per_thread; + + if (head_idx < total_count) { + const int base_neuron_idx = head_group.thread_rank() * T_per_thread; + + T q[T_per_thread], k[T_per_thread]; + mem_access::load_global(q, mixed_query + offset + base_neuron_idx); + mem_access::load_global(k, key_layer + k_offset + base_neuron_idx); + +#pragma unroll + for (int i = 0; i < T_per_thread; i++) { + const int neuron_idx = base_neuron_idx + i; + if (neuron_idx < rotary_dim) { + float inv_freq = (float)((neuron_idx % half_dim) * 2) / (float)rotary_dim; + inv_freq = 1.0 / powf(rope_theta, inv_freq) * (float)seq_idx; + + float rotary_sign = (neuron_idx > (half_dim - 1) ? -1.0 : 1.0); + float q_rot = conversion::to(q[i]) * rotary_sign; + float k_rot = conversion::to(k[i]) * rotary_sign; + + const int target_lane = (neuron_idx < half_dim) + ? head_group.thread_rank() + half_dim_threads + : head_group.thread_rank() - half_dim_threads; + + const float q_rot_temp = head_group.shfl(q_rot, target_lane); + const float k_rot_temp = head_group.shfl(k_rot, target_lane); + + q[i] = conversion::to(conversion::to(q[i]) * cosf(inv_freq) + + q_rot_temp * sinf(inv_freq)); + k[i] = conversion::to(conversion::to(k[i]) * cosf(inv_freq) + + k_rot_temp * sinf(inv_freq)); + } + } + + mem_access::store_global(mixed_query + offset + base_neuron_idx, q); + mem_access::store_global(key_layer + k_offset + base_neuron_idx, k); + } +} + +#define LAUNCH_ROT_POS_EMB_HALF(HEAD_THREADS, ALIGNMENT) \ + apply_rotary_pos_half<<>>(mixed_query, \ + key_layer, \ + rotary_dim, \ + seq_len, \ + offset, \ + num_heads, \ + head_size, \ + total_count, \ + rope_theta, \ + max_out_tokens); + +#ifdef __HIP_PLATFORM_AMD__ +#define LAUNCH_FOR_ALIGNMENT(ALIGNMENT) \ + if (threads_per_head == 4) { \ + LAUNCH_ROT_POS_EMB_HALF(4, ALIGNMENT); \ + } else if (threads_per_head == 8) { \ + LAUNCH_ROT_POS_EMB_HALF(8, ALIGNMENT); \ + } else if (threads_per_head == 16) { \ + LAUNCH_ROT_POS_EMB_HALF(16, ALIGNMENT); \ + } else if (threads_per_head == 32) { \ + LAUNCH_ROT_POS_EMB_HALF(32, ALIGNMENT); \ + } else if (threads_per_head == 64) { \ + LAUNCH_ROT_POS_EMB_HALF(64, ALIGNMENT); \ + } else { \ + assert(false); \ + } +#else +#define LAUNCH_FOR_ALIGNMENT(ALIGNMENT) \ + if (threads_per_head == 4) { \ + LAUNCH_ROT_POS_EMB_HALF(4, ALIGNMENT); \ + } else if (threads_per_head == 8) { \ + LAUNCH_ROT_POS_EMB_HALF(8, ALIGNMENT); \ + } else if (threads_per_head == 16) { \ + LAUNCH_ROT_POS_EMB_HALF(16, ALIGNMENT); \ + } else if (threads_per_head == 32) { \ + LAUNCH_ROT_POS_EMB_HALF(32, ALIGNMENT); \ + } else { \ + assert(false); \ + } +#endif + +template +void launch_apply_rotary_pos_emb(T* mixed_query, + T* key_layer, + unsigned head_size, + unsigned seq_len, + unsigned rotary_dim, + unsigned offset, + unsigned num_heads, + unsigned batch, + float rope_theta, + cudaStream_t stream, + int max_out_tokens) +{ + const int half_dim = rotary_dim >> 1; + + int alignment = sizeof(T); + if (half_dim % (16 / sizeof(T)) == 0) { + alignment = 16; + } else if (half_dim % (8 / sizeof(T)) == 0) { + alignment = 8; + } else if (half_dim % (4 / sizeof(T)) == 0) { + alignment = 4; + } else { + assert(false); + } + const int T_per_elem = alignment / sizeof(T); + + int total_count = batch * num_heads * seq_len; + + const int padded_head_size = next_pow2(head_size); + + assert(padded_head_size <= hw_warp_size * T_per_elem); + + const int threads_per_head = padded_head_size / T_per_elem; + const int heads_per_block = rot_half::threads / threads_per_head; + + dim3 block(rot_half::threads); + dim3 grid((total_count + heads_per_block - 1) / heads_per_block); + + if (alignment == 4) { + LAUNCH_FOR_ALIGNMENT(4); + } else if (alignment == 8) { + LAUNCH_FOR_ALIGNMENT(8); + } else if (alignment == 16) { + LAUNCH_FOR_ALIGNMENT(16); + } else { + assert(false); + } +} + +#define INSTANTIATE_LAUNCH_ROTARY_POS_EMB(T) \ + template void launch_apply_rotary_pos_emb(T*, \ + T*, \ + unsigned, \ + unsigned, \ + unsigned, \ + unsigned, \ + unsigned, \ + unsigned, \ + float, \ + cudaStream_t, \ + int); + +INSTANTIATE_LAUNCH_ROTARY_POS_EMB(float); +#ifdef BF16_AVAILABLE +INSTANTIATE_LAUNCH_ROTARY_POS_EMB(__nv_bfloat16); +#endif +INSTANTIATE_LAUNCH_ROTARY_POS_EMB(__half); diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/dequantize.cu b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/dequantize.cu new file mode 100644 index 0000000000000000000000000000000000000000..7a8e7ca446b0fbc4154fd0198c8e301f77cc7815 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/dequantize.cu @@ -0,0 +1,153 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "conversion_utils.h" +#include "inference_cuda_layers.h" + +#define MAX_QUANTIZE_GROUPING 1024 + +#define loop_unroll 1 +#define loop_unroll_bits 1 + +template +__global__ void dequantize_kernel(T* output, + const int8_t* input, + const float* qscale, + int output_size, + int hidden_dim, + int groups, + int merge_count) +{ + unsigned merge_hidden = hidden_dim >> merge_count; + unsigned quantization_stride = (merge_hidden * output_size) / groups; + + unsigned bid = blockIdx.x; + unsigned tid = threadIdx.x; + + while (tid < output_size) { + unsigned w_index = bid / merge_hidden; + unsigned q_index = tid + bid * output_size; + + auto q = input[q_index]; + + unsigned merge_hidden_total = w_index * merge_hidden; + unsigned scale_index = + ((((bid - merge_hidden_total) + tid * merge_hidden) / quantization_stride) + << merge_count) + + w_index; + + float scale_data = qscale[scale_index]; + + output[q_index] = conversion::to(scale_data * (float)q); + tid += blockDim.x; + } +} + +template +void launch_dequantize(T* output, + const int8_t* input, + const float* qscale, + unsigned output_size, + unsigned hidden_dim, + unsigned groups, + unsigned merge_count, + cudaStream_t stream) +{ + unsigned threads = 1024; + dim3 block_dims(threads); + dim3 grid_dims(hidden_dim); + + dequantize_kernel<<>>( + output, input, qscale, output_size, hidden_dim, groups, merge_count); +} + +#define INSTANTIATE_DEQUANTIZE_MERGE(T) \ + template void launch_dequantize( \ + T*, const int8_t*, const float*, unsigned, unsigned, unsigned, unsigned, cudaStream_t); + +INSTANTIATE_DEQUANTIZE_MERGE(float); +#ifdef BF16_AVAILABLE +INSTANTIATE_DEQUANTIZE_MERGE(__nv_bfloat16); +#endif +INSTANTIATE_DEQUANTIZE_MERGE(__half); + +__global__ void dequantize_kernel(float* output, + const int8_t* input, + const float* qscale, + int hidden_dim, + unsigned merge_hidden, + int cnt) +{ +} + +template +__global__ void dequantize_kernel(T* output, + const int8_t* input, + const float* qscale, + unsigned hidden_dim, + unsigned merge_hidden, + int cnt) +{ + unsigned bid = blockIdx.x * gridDim.y + blockIdx.y; + unsigned tid = threadIdx.x; + + float local_scale = qscale[blockIdx.x]; + + const float* input_cast = reinterpret_cast(input); + float2* output_cast = reinterpret_cast(output); + + input_cast += bid * merge_hidden; + output_cast += bid * merge_hidden; + + for (int c = 0; c < cnt; c++) { + if (tid < merge_hidden) { + float q = input_cast[tid]; + int8_t* q_int8 = (int8_t*)&q; + + float2 q_f; + T* q_h = (T*)&q_f; + + q_h[0] = conversion::to(local_scale * (float)q_int8[0]); + q_h[1] = conversion::to(local_scale * (float)q_int8[1]); + q_h[2] = conversion::to(local_scale * (float)q_int8[2]); + q_h[3] = conversion::to(local_scale * (float)q_int8[3]); + output_cast[tid] = q_f; + tid += blockDim.x; + } + } +} + +template +void launch_dequantize(T* output, + const int8_t* input, + const float* qscale, + unsigned output_size, + unsigned hidden_dim, + unsigned groups, + cudaStream_t stream) +{ + unsigned threads = 1024; + hidden_dim /= 4; + unsigned thd_cnt = (hidden_dim - 1) / threads + 1; + + assert(output_size % groups == 0); + unsigned blocks = output_size / groups; + + dim3 block_dims(threads); + dim3 grid_dims(groups, blocks); + + dequantize_kernel<<>>( + output, input, qscale, hidden_dim, hidden_dim, thd_cnt); +} + +#define INSTANTIATE_DEQUANTIZE_NO_MERGE(T) \ + template void launch_dequantize( \ + T*, const int8_t*, const float*, unsigned, unsigned, unsigned, cudaStream_t); + +INSTANTIATE_DEQUANTIZE_NO_MERGE(float); +#ifdef BF16_AVAILABLE +INSTANTIATE_DEQUANTIZE_NO_MERGE(__nv_bfloat16); +#endif +INSTANTIATE_DEQUANTIZE_NO_MERGE(__half); diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/layer_norm.cu b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/layer_norm.cu new file mode 100644 index 0000000000000000000000000000000000000000..e5e7e89c9d2069678bc445004053f8ad5f9809b1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/layer_norm.cu @@ -0,0 +1,503 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "conversion_utils.h" +#include "ds_kernel_utils.h" +#include "inference_cuda_layers.h" +#include "memory_access_utils.h" +#include "reduction_utils.h" + +namespace cg = cooperative_groups; +using rop = reduce::ROpType; + +namespace ln { +constexpr int granularity = 16; +} // namespace ln + +/* +Primary layer norm implementation. Assumes elems_per_row % 8 +is equal to 0. + +Args: + output: buffer for output data + vals: buffer for input data + gamma: gain for normalization + beta: bias for normalization + epsilon: numeric stability + elems_per_row: number of elements each block will normalize +*/ +template +__global__ void fused_ln(T* output, + const T* vals, + const T* gamma, + const T* beta, + float epsilon, + int elems_per_row) +{ + constexpr int T_per_load = ln::granularity / sizeof(T); + + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + // X-dimension of the block + const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) + + (tb.thread_index().y * elems_per_row); + const int thread_offset = tb.thread_index().x * T_per_load; + const int base_offset = block_offset + thread_offset; + const int stride = blockDim.x * T_per_load; + + float sum = reduce::init(); + + const T* input_base = vals + base_offset; + + T local_buffer[unRoll * T_per_load]; + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + + mem_access::load_global( + iteration_buffer, input_base + i * stride, thread_offset + i * stride < elems_per_row); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + float vals_up_cast = conversion::to(iteration_buffer[j]); + sum = reduce::element(sum, vals_up_cast); + } + } + + reduce::partitioned_block(tb, warp, sum); + const float mean = sum / elems_per_row; + + float mean_diff = reduce::init(); + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + // Using a 0 value here skews the variance, have to if-guard + if (thread_offset + i * stride < elems_per_row) { + float diff = (conversion::to(local_buffer[i * T_per_load + j]) - mean); + mean_diff = reduce::element(mean_diff, diff * diff); + } + } + } + + reduce::partitioned_block(tb, warp, mean_diff); + const float variance = mean_diff / elems_per_row; + const float denom = __frsqrt_rn(variance + epsilon); + + // const T mean_compute = conversion::to(mean); + // const T denom_compute = conversion::to(denom); + + T* block_output = output + block_offset; + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + const int iter_idx = i * stride + thread_offset; + const bool do_loads = iter_idx < elems_per_row; + + T gamma_local[T_per_load], beta_local[T_per_load]; + + mem_access::load_global(gamma_local, gamma + iter_idx, do_loads); + mem_access::load_global(beta_local, beta + iter_idx, do_loads); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + float val = conversion::to(iteration_buffer[j]); + val = (val - mean) * denom; + val = + val * conversion::to(gamma_local[j]) + conversion::to(beta_local[j]); + iteration_buffer[j] = conversion::to(val); + } + + if (do_loads) { + mem_access::store_global(block_output + iter_idx, iteration_buffer); + } + } +} + +#define LAUNCH_FUSED_LN(unRollFactor, threadsPerGroup, maxThreads) \ + fused_ln \ + <<>>(output, vals, gamma, beta, epsilon, elems_per_row); + +template +void launch_fused_ln(T* output, + const T* vals, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream) +{ + // 8 for __half, 4 for float + constexpr int T_per_load = ln::granularity / sizeof(T); + + constexpr int maxThreads = 256; + + // For Flaoat, unRoll 4, for __half, unRoll 2 + constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2; + + const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step); + const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads; + + const int groups_per_block_max = + is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1; + const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max; + const int groups_launch = (groups_per_block + rows - 1) / groups_per_block; + + dim3 block(threadsPerGroup, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threadsPerGroup * h_per_step; + const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step; + + if (is_subblock_schedule) { + // <=128 + if (threadsPerGroup == 1) { + LAUNCH_FUSED_LN(1, 1, maxThreads); + } else if (threadsPerGroup == 2) { + LAUNCH_FUSED_LN(1, 2, maxThreads); + } else if (threadsPerGroup == 4) { + LAUNCH_FUSED_LN(1, 4, maxThreads); + } else if (threadsPerGroup == 8) { + LAUNCH_FUSED_LN(1, 8, maxThreads); + } else if (threadsPerGroup == 16) { + LAUNCH_FUSED_LN(1, 16, maxThreads); + } + } else if (external_unRoll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_FUSED_LN(1 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 2) { + // 4097 - 8192 elems + LAUNCH_FUSED_LN(2 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 3) { + // 8193 - 12288 elems + LAUNCH_FUSED_LN(3 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 4) { + // 12289 - 16384 elems + LAUNCH_FUSED_LN(4 * internal_unRoll, maxThreads, maxThreads); + } +} + +#define INSTANTIATE_FUSED_LN(T) \ + template void launch_fused_ln(T*, const T*, const T*, const T*, float, int, int, cudaStream_t); + +INSTANTIATE_FUSED_LN(__half); +#ifdef BF16_AVAILABLE +INSTANTIATE_FUSED_LN(__nv_bfloat16); +#endif +INSTANTIATE_FUSED_LN(float); + +/* +Fused resiual + bias + layer norm implementation. Assumes elems_per_row % 8 +is equal to 0. + +TODO(cmikeh2): Goal is to deprecate this implementation. The bias + residual +need to be fused into compute-bound producer operations. + +Args: + output: buffer for output data + res_output: output of residual addition + vals: buffer for input data + residual: residual data + bias: bias of of input data + gamma: gain for normalization + beta: bias for normalization + epsilon: numeric stability + elems_per_row: number of elements each block will normalize +Template arg: + StoreResidual: controls whether the residual calculation is stored + or not. When set to false, the input `res_output` is unused. +*/ +template +__global__ void fused_residual_ln(T* output, + T* res_output, + const T* vals, + const T* residual, + const T* bias, + const T* gamma, + const T* beta, + float epsilon, + int elems_per_row) +{ + constexpr int T_per_load = ln::granularity / sizeof(T); + + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + // X-dimension of the block + const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) + + (tb.thread_index().y * elems_per_row); + const int thread_offset = tb.thread_index().x * T_per_load; + const int base_offset = block_offset + thread_offset; + const int stride = tb.size() * T_per_load; + + float sum = reduce::init(); + + const T* input_base = vals + base_offset; + const T* residual_base = residual + base_offset; + const T* bias_base = bias + thread_offset; + + T local_buffer[unRoll * T_per_load]; + + // Unlike a vanilla layernorm, since we're fusing the two adds as well + // an inner unRoll seems to be less valuable. If anything, a double unRoll + // makes the most sense if we find we are having performance issues. +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + T residual_buffer[T_per_load]; + T bias_buffer[T_per_load]; + + mem_access::load_global( + iteration_buffer, input_base + i * stride, thread_offset + i * stride < elems_per_row); + mem_access::load_global(residual_buffer, + residual_base + i * stride, + thread_offset + i * stride < elems_per_row); + mem_access::load_global( + bias_buffer, bias_base + i * stride, thread_offset + i * stride < elems_per_row); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + float vals_up_cast = conversion::to(iteration_buffer[j]); + float res_up_cast = conversion::to(residual_buffer[j]); + float bias_up_cast = conversion::to(bias_buffer[j]); + vals_up_cast = vals_up_cast + bias_up_cast + res_up_cast; + sum = reduce::element(sum, vals_up_cast); + iteration_buffer[j] = conversion::to(vals_up_cast); + } + + if (preLnResidual && (thread_offset + i * stride < elems_per_row)) { + mem_access::store_global(res_output + base_offset + i * stride, + iteration_buffer); + } + } + + reduce::partitioned_block(tb, warp, sum); + const float mean = sum / elems_per_row; + + float mean_diff = reduce::init(); +#pragma unRoll + for (int i = 0; i < unRoll; i++) { +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + // Using a 0 value here skews the variance, have to if-guard + if (thread_offset + i * stride < elems_per_row) { + float diff = (conversion::to(local_buffer[i * T_per_load + j]) - mean); + mean_diff = reduce::element(mean_diff, diff * diff); + } + } + } + + reduce::partitioned_block(tb, warp, mean_diff); + const float variance = mean_diff / elems_per_row; + const float denom = __frsqrt_rn(variance + epsilon); + + T* block_output = output + block_offset; + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + const int iter_idx = i * stride + thread_offset; + const bool do_loads = iter_idx < elems_per_row; + + T gamma_local[T_per_load], beta_local[T_per_load]; + + mem_access::load_global(gamma_local, gamma + iter_idx, do_loads); + mem_access::load_global(beta_local, beta + iter_idx, do_loads); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + // iteration_buffer[j] = (iteration_buffer[j] - mean_compute) * denom_compute; + // iteration_buffer[j] = iteration_buffer[j] * gamma_local[j] + beta_local[j]; + float val = conversion::to(iteration_buffer[j]); + val = (val - mean) * denom; + val = + val * conversion::to(gamma_local[j]) + conversion::to(beta_local[j]); + iteration_buffer[j] = conversion::to(val); + } + + if (do_loads) { + mem_access::store_global(block_output + iter_idx, iteration_buffer); + } + } +} + +// TODO(cmikeh2): There's a bunch of redundancy here that needs to be removed/simplified. +#define LAUNCH_FUSED_RES_LN(unRollFactor, threadsPerGroup, maxThreads) \ + fused_residual_ln \ + <<>>( \ + output, nullptr, vals, residual, bias, gamma, beta, epsilon, elems_per_row); + +template +void launch_fused_residual_ln(T* output, + const T* vals, + const T* residual, + const T* bias, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream) +{ + // 8 for __half, 4 for float + constexpr int T_per_load = ln::granularity / sizeof(T); + + constexpr int maxThreads = 256; + + // For Flaoat, unRoll 4, for __half, unRoll 2 + constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2; + + const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step); + const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads; + + const int groups_per_block_max = + is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1; + const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max; + const int groups_launch = (groups_per_block + rows - 1) / groups_per_block; + + dim3 block(threadsPerGroup, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threadsPerGroup * h_per_step; + const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step; + + if (is_subblock_schedule) { + // <=128 + if (threadsPerGroup == 1) { + LAUNCH_FUSED_RES_LN(1, 1, maxThreads); + } else if (threadsPerGroup == 2) { + LAUNCH_FUSED_RES_LN(1, 2, maxThreads); + } else if (threadsPerGroup == 4) { + LAUNCH_FUSED_RES_LN(1, 4, maxThreads); + } else if (threadsPerGroup == 8) { + LAUNCH_FUSED_RES_LN(1, 8, maxThreads); + } else if (threadsPerGroup == 16) { + LAUNCH_FUSED_RES_LN(1, 16, maxThreads); + } + } else if (external_unRoll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_FUSED_RES_LN(1 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 2) { + // 4097 - 8192 elems + LAUNCH_FUSED_RES_LN(2 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 3) { + // 8193 - 12288 elems + LAUNCH_FUSED_RES_LN(3 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 4) { + // 12289 - 16384 elems + LAUNCH_FUSED_RES_LN(4 * internal_unRoll, maxThreads, maxThreads); + } +} + +#define LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(unRollFactor, threadsPerGroup, maxThreads) \ + fused_residual_ln \ + <<>>( \ + norm_output, res_output, vals, residual, bias, gamma, beta, epsilon, elems_per_row); + +template +void launch_fused_residual_ln_store_pre_ln_res(T* norm_output, + T* res_output, + const T* vals, + const T* residual, + const T* bias, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream) +{ + // 8 for __half, 4 for float + constexpr int T_per_load = ln::granularity / sizeof(T); + + constexpr int maxThreads = 256; + + // For Flaoat, unRoll 4, for __half, unRoll 2 + constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2; + + const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step); + const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads; + + const int groups_per_block_max = + is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1; + const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max; + const int groups_launch = (groups_per_block + rows - 1) / groups_per_block; + + dim3 block(threadsPerGroup, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threadsPerGroup * h_per_step; + const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step; + + if (is_subblock_schedule) { + // <=128 + if (threadsPerGroup == 1) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 1, maxThreads); + } else if (threadsPerGroup == 2) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 2, maxThreads); + } else if (threadsPerGroup == 4) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 4, maxThreads); + } else if (threadsPerGroup == 8) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 8, maxThreads); + } else if (threadsPerGroup == 16) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 16, maxThreads); + } + } else if (external_unRoll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 2) { + // 4097 - 8192 elems + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(2 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 3) { + // 8193 - 12288 elems + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(3 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 4) { + // 12289 - 16384 elems + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(4 * internal_unRoll, maxThreads, maxThreads); + } +} + +#define INSTANTIATE_RES_LN(T) \ + template void launch_fused_residual_ln( \ + T*, const T*, const T*, const T*, const T*, const T*, float, int, int, cudaStream_t); + +#define INSTANTIATE_PRE_LN_RES(T) \ + template void launch_fused_residual_ln_store_pre_ln_res( \ + T*, T*, const T*, const T*, const T*, const T*, const T*, float, int, int, cudaStream_t); + +INSTANTIATE_RES_LN(__half); +INSTANTIATE_RES_LN(float); +#ifdef BF16_AVAILABLE +INSTANTIATE_RES_LN(__nv_bfloat16); +#endif + +INSTANTIATE_PRE_LN_RES(__half); +INSTANTIATE_PRE_LN_RES(float); +#ifdef BF16_AVAILABLE +INSTANTIATE_PRE_LN_RES(__nv_bfloat16); +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/pt_binding.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/pt_binding.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b7277d1e167826207a3d09e676efdad42a836f08 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/pt_binding.cpp @@ -0,0 +1,2020 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include +#include +#include +#include "inference_context.h" +#include "inference_cublas_wrappers.h" +#include "inference_cuda_layers.h" + +std::array gemm_algos = std::array({99, 99, 99}); + +// NOTE: This activation function type enum should be always in sync +// with the python counterpart, otherwise the casting from python binding +// will be incorrect. +enum class ActivationFuncType { UNKNOWN = 0, GELU = 1, ReLU = 2, GATED_GELU = 3, GATED_SILU = 4 }; + +enum class NormType { UNKNOWN = 0, LayerNorm = 1, GroupNorm = 2, RMSNorm = 3 }; + +enum class TransformerType : uint8_t { UNKNOWN = 0, GPTType = 1, BERTType = 2 }; + +// NOTE: this is a temporary and dodgy solution to distinguish GPT and BERT style models +// based on the dimensions of the corresponding attention mask. +inline auto infer_transformer_type(at::Tensor& attn_mask) -> TransformerType +{ + auto attn_mask_num_dims = attn_mask.sizes().size(); + + if (attn_mask_num_dims > 2) { + return TransformerType::GPTType; + } else if (attn_mask_num_dims == 2) { + return TransformerType::BERTType; + } else { + return TransformerType::UNKNOWN; + } +} + +// infer stride of attention mask memory layout based on the model type. +inline auto get_attn_mask_stride(at::Tensor& attn_mask) -> int +{ + auto trnsfrmr_type = infer_transformer_type(attn_mask); + + if (trnsfrmr_type == TransformerType::GPTType) { + return attn_mask.size(2); + } else if (trnsfrmr_type == TransformerType::BERTType) { + // Bert style models have always a mask stride of 1. + return 1; + } else if (trnsfrmr_type == TransformerType::UNKNOWN) { + return 0; + } + + // this is just to make the compiler happy. + return 0; +} + +template +at::Tensor ds_softmax(at::Tensor& attn_scores, + at::Tensor& attn_mask, + at::Tensor& alibi, + bool triangular, + bool recompute, + bool local_attention, + int window_size, + bool async_op, + float layer_scale, + int head_offset, + int mp_size) +{ + auto attn_scores_c = attn_scores.contiguous(); + int bsz = attn_scores_c.size(0); + + int seq_len = attn_scores_c.size(1); + int len = attn_scores_c.sizes().size(); + if (len > 2) seq_len = attn_scores_c.size(2); + + int soft_len = attn_scores_c.size(2); + if (len > 3) soft_len = attn_scores_c.size(3); + + int heads = 1; + if (len > 1) heads = attn_scores_c.size(1); + + auto mask_stride = get_attn_mask_stride(attn_mask); + + launch_attn_softmax_v2((T*)attn_scores_c.data_ptr(), + (attn_mask.sizes().size() > 1 ? (T*)attn_mask.data_ptr() : nullptr), + (alibi.sizes().size() > 1 ? (T*)alibi.data_ptr() : nullptr), + layer_scale, + triangular, + recompute, + local_attention, + window_size, + bsz, + heads, + seq_len, + soft_len, + head_offset, + mask_stride, + mp_size, + InferenceContext::Instance().GetCurrentStream(async_op)); + + return attn_scores_c; +} + +template +void allocate_workspace(unsigned hidden_dim, + unsigned num_heads, + unsigned prompt_length, + unsigned batch_size, + unsigned num_layers, + unsigned mp_size = 1, + bool external_cache = false, + unsigned rank = 0, + unsigned max_out_tokens = 1024, + unsigned min_out_tokens = 1) +{ + InferenceContext::Instance().GenWorkSpace(num_layers, + num_heads, + batch_size, + prompt_length, + hidden_dim, + mp_size, + external_cache, + sizeof(T), + rank, + max_out_tokens, + min_out_tokens); +} + +template +at::Tensor einsum_sec_sm_ecm(at::Tensor& Q, at::Tensor& W) +{ + auto options = at::TensorOptions() + .dtype(Q.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + T* workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + float alpha = 1; + float gemm_beta = 0.0; + + /* + // Reallocate memory if we received a new prompt + if (!workspace || input.size(1) != 1) { + allocate_workspace(W.size(1), InferenceContext::Instance().GetMaxTokenLength(), + Q.size(0), 1, head_size); workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + } + */ + + auto O = at::from_blob(workspace, {Q.size(1), Q.size(2), W.size(1)}, options); + unsigned m = W.size(1); + unsigned n = Q.size(1) * Q.size(2); + unsigned k = Q.size(0); + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + CUBLAS_OP_N, + CUBLAS_OP_T, + m, + n, + k, + &alpha, + &gemm_beta, + (T*)W.data_ptr(), + (T*)Q.data_ptr(), + (T*)O.data_ptr(), +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + return O; +} + +template +void attention_unfused(at::Tensor& prev_key_cont, + at::Tensor& query_cont, + at::Tensor& attn_mask, + at::Tensor& prev_value_cont, + at::Tensor& output, + int& bsz, + int& seq_len, + int& soft_len, + int& heads, + float& norm_factor, + bool triangular, + bool recompute, + bool local_attention, + int window_size) +{ + auto options = at::TensorOptions() + .dtype(query_cont.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + float alpha = norm_factor; + float gemm_beta = 0.0; + auto attn_score = at::empty({bsz, heads, seq_len, soft_len}, options); + int k = prev_value_cont.size(2) / heads; + + auto mask_stride = get_attn_mask_stride(attn_mask); + + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream()); + cublas_strided_batched_gemm(InferenceContext::Instance().GetCublasHandle(), + soft_len, + seq_len, + k, + &alpha, + &gemm_beta, + (T*)prev_key_cont.data_ptr(), + (T*)query_cont.data_ptr(), + (T*)attn_score.data_ptr(), + CUBLAS_OP_N, + CUBLAS_OP_N, + soft_len * k, + seq_len * k, + seq_len * soft_len, + bsz * heads, +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + launch_attn_softmax_v2((T*)attn_score.data_ptr(), + (T*)(attn_mask.sizes().size() > 1 ? attn_mask.data_ptr() : nullptr), + (T*)nullptr, + 1.0, + triangular, + recompute, + local_attention, + window_size, + bsz, + heads, + seq_len, + soft_len, + 0, + mask_stride, + 1, + InferenceContext::Instance().GetCurrentStream(false)); + alpha = 1.0; + cublas_strided_batched_gemm(InferenceContext::Instance().GetCublasHandle(), + k, + seq_len, + soft_len, + &alpha, + &gemm_beta, + (T*)prev_value_cont.data_ptr(), + (T*)attn_score.data_ptr(), + (T*)output.data_ptr(), + CUBLAS_OP_N, + CUBLAS_OP_N, + soft_len * k, + seq_len * soft_len, + seq_len * k, + bsz * heads, +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif +} + +template +std::vector ds_softmax_context1(at::Tensor& query, + at::Tensor& prev_key, + at::Tensor& new_key, + at::Tensor& attn_mask, + at::Tensor& prev_value, + at::Tensor& new_value, + int heads, + float norm_factor, + bool merging, + bool triangular, + bool local_attention, + int window_size, + bool no_masking) +{ + auto query_cont = query.contiguous(); + auto prev_key_cont = prev_key.contiguous(); + auto prev_value_cont = prev_value.contiguous(); + + int new_size = (new_value.sizes().size() > 1 ? new_value.size(1) : 0); + + // Attn_Score [ batch Head Sequence-length Softmax-length] + + int bsz = query_cont.size(0); + int seq_len = query_cont.size(1); + int soft_len = prev_value.size(1); + + auto options = at::TensorOptions() + .dtype(query_cont.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + + auto output = + at::empty({prev_value.size(0), heads, seq_len, prev_value.size(2) / heads}, options); + attention_unfused(prev_key_cont, + query_cont, + attn_mask, //(no_masking ? nullptr : (T*)attn_mask.data_ptr()), + prev_value_cont, + output, + bsz, + seq_len, + soft_len, + heads, + norm_factor, + (triangular && (new_size == 0)), + (new_size == 0), + local_attention, + window_size); + + return {output, prev_key, prev_value}; +} + +template +void ds_softmax_internal(T* attn_scores, + at::Tensor& attn_mask, + at::Tensor& alibi, + float& layer_scale, + bool triangular, + bool recompute, + bool local_attention, + int window_size, + int bsz, + int seq_len, + int soft_len, + int heads) +{ + auto mask_stride = get_attn_mask_stride(attn_mask); + + launch_attn_softmax_v2((T*)attn_scores, + (attn_mask.sizes().size() > 1 ? (T*)attn_mask.data_ptr() : nullptr), + (alibi.sizes().size() > 1 ? (T*)alibi.data_ptr() : nullptr), + layer_scale, + triangular, + recompute, + local_attention, + window_size, + bsz, + heads, + seq_len, + soft_len, + 0, + mask_stride, + 1, + at::cuda::getCurrentCUDAStream()); +} + +template +void attention_unfused(T* prev_key_cont, + T* query_cont, + at::Tensor& attn_mask, + T* prev_value_cont, + T* output, + unsigned& bsz, + int& k, + unsigned& seq_len, + unsigned& soft_len, + int& heads, + float& norm_factor, + bool triangular, + bool recompute, + bool local_attention, + int window_size, + at::Tensor& alibi, + int layer_id) +{ + float layer_scale = alibi.sizes().size() > 1 ? std::max(1, layer_id) : 1.0; + float alpha = norm_factor * norm_factor / layer_scale; + float gemm_beta = 0.0; + T* workspace = (T*)InferenceContext::Instance().GetAttentionUnfusedWorkspace(); + + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream()); + cublas_strided_batched_gemm(InferenceContext::Instance().GetCublasHandle(), + soft_len, + seq_len, + k, + &alpha, + &gemm_beta, + (T*)prev_key_cont, + (T*)query_cont, + workspace, + CUBLAS_OP_T, + CUBLAS_OP_N, + InferenceContext::Instance().GetMaxTokenLength() * k, + seq_len * k, + seq_len * soft_len, + bsz * heads, +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + ds_softmax_internal(workspace, + attn_mask, + alibi, + layer_scale, + triangular, + recompute, + local_attention, + window_size, + bsz, + seq_len, + soft_len, + heads); + alpha = 1.0; + cublas_strided_batched_gemm(InferenceContext::Instance().GetCublasHandle(), + k, + seq_len, + soft_len, + &alpha, + &gemm_beta, + (T*)prev_value_cont, + workspace, + (T*)output, + CUBLAS_OP_N, + CUBLAS_OP_N, + InferenceContext::Instance().GetMaxTokenLength() * k, + seq_len * soft_len, + seq_len * k, + bsz * heads, +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif +} + +void reset_cache() { InferenceContext::Instance().reset_tokens(); } + +template +std::vector ds_softmax_context(at::Tensor& query_key_value, + at::Tensor& attn_mask, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + int heads, + int num_kv, + float norm_factor, + bool triangular, + bool local_attention, + int window_size, + bool no_masking, + unsigned layer_id, + unsigned num_layers, + at::Tensor& alibi, + float rope_theta) +{ + unsigned bsz = query_key_value.size(0); + unsigned seq_len = query_key_value.size(1); + int k = query_key_value.size(2) / (heads + 2 * (num_kv > 0 ? num_kv : heads)); + unsigned hidden_dim = heads * k; + + bool is_prompt = (seq_len > 1); + + if (is_prompt) InferenceContext::Instance().reset_tokens(seq_len); + unsigned soft_len = InferenceContext::Instance().current_tokens(); + + auto options = at::TensorOptions() + .dtype(query_key_value.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + + T* workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + size_t buf_size = bsz * seq_len * hidden_dim; + auto output = torch::from_blob(workspace + 4 * buf_size, {bsz, seq_len, hidden_dim}, options); + + auto query_cont = workspace + 5 * buf_size; + size_t offset = + 10 * (hidden_dim * bsz * InferenceContext::Instance().GetMaxTokenLength()) + + layer_id * 2 * bsz * InferenceContext::Instance().GetMaxTokenLength() * hidden_dim; + unsigned all_tokens = soft_len; + auto kv_cache = workspace + offset + (hidden_dim / heads) * (is_prompt ? 0 : soft_len - 1); + size_t value_offset = bsz * InferenceContext::Instance().GetMaxTokenLength() * hidden_dim; + + T* temp_buf = (T*)output.data_ptr() + at::numel(output); + launch_bias_add_transform_0213((T*)query_cont, + kv_cache, + kv_cache + value_offset, + (T*)query_key_value.data_ptr(), + nullptr, + bsz, + seq_len, + (is_prompt ? 0 : soft_len - 1), + soft_len, + hidden_dim, + heads, + (num_kv > 0 ? num_kv : heads), + rotary_dim, + rotate_half, + rotate_every_two, + InferenceContext::Instance().GetCurrentStream(), + 3, + InferenceContext::Instance().GetMaxTokenLength(), + rope_theta); + if (rotary_dim > 0 && rotate_half) + launch_apply_rotary_pos_emb(query_cont, + kv_cache, + k, + seq_len, + rotary_dim, + (is_prompt ? 0 : soft_len - 1), + heads, + bsz, + rope_theta, + InferenceContext::Instance().GetCurrentStream(), + InferenceContext::Instance().GetMaxTokenLength()); + + attention_unfused(workspace + offset, + (T*)query_cont, + attn_mask, + workspace + offset + value_offset, + temp_buf, + bsz, + k, + seq_len, + all_tokens, + heads, + norm_factor, + (triangular && is_prompt), + is_prompt, + local_attention, + window_size, + alibi, + layer_id); + launch_transform4d_0213((T*)output.data_ptr(), + temp_buf, + bsz, + heads, + seq_len, + output.size(2), + InferenceContext::Instance().GetCurrentStream(false), + 1); + + if (layer_id == num_layers - 1) InferenceContext::Instance().advance_tokens(); + auto prev_key = torch::from_blob(workspace + offset, + {bsz, heads, all_tokens, k}, + {hidden_dim * InferenceContext::Instance().GetMaxTokenLength(), + k * InferenceContext::Instance().GetMaxTokenLength(), + k, + 1}, + options); + + auto prev_value = + torch::from_blob(workspace + offset + value_offset, + {bsz, heads, all_tokens, k}, + {hidden_dim * InferenceContext::Instance().GetMaxTokenLength(), + k * InferenceContext::Instance().GetMaxTokenLength(), + k, + 1}, + options); + + return {output, prev_key, prev_value}; +} + +template +at::Tensor ds_bias_gelu(at::Tensor& input, at::Tensor& bias) +{ + auto input_cont = input.contiguous(); + + int bsz = input_cont.size(0) * input_cont.size(1); + int intermediate_size = input_cont.size(2); + + launch_bias_gelu((T*)input_cont.data_ptr(), + (T*)bias.data_ptr(), + intermediate_size, + bsz, + InferenceContext::Instance().GetCurrentStream()); + return input_cont; +} + +#define DISPATCH_GATED_ACT(T_TYPE, C_TYPE) \ + if (activation.options().dtype() == torch::T_TYPE) { \ + launch_gated_activation((C_TYPE*)output.data_ptr(), \ + (const C_TYPE*)activation.data_ptr(), \ + (const C_TYPE*)bias.data_ptr(), \ + rows, \ + out_channels, \ + channels, \ + activation_type == ActivationFuncType::GATED_GELU, \ + InferenceContext::Instance().GetCurrentStream()); \ + } + +at::Tensor ds_gated_activation(at::Tensor& activation, at::Tensor& bias, int actFun) +{ + /* + Used in FF of Stable diffusion + */ + + const ActivationFuncType activation_type = static_cast(actFun); + + assert(activation_type == ActivationFuncType::GATED_GELU || + activation_type == ActivationFuncType::GATED_SILU); + + const int batch_size = activation.size(0); + const int seq_len = activation.size(1); + const int channels = activation.size(2); + + const int rows = batch_size * seq_len; + // Dimensionality is cut in half + const int out_channels = channels / 2; + + auto output = at::empty({batch_size, seq_len, out_channels}, activation.options()); + + DISPATCH_GATED_ACT(kFloat, float); + DISPATCH_GATED_ACT(kHalf, __half); +#ifdef BF16_AVAILABLE + DISPATCH_GATED_ACT(kBFloat16, __nv_bfloat16); +#endif + + return output; +} + +template +at::Tensor ds_bias_relu(at::Tensor& input, at::Tensor& bias) +{ + auto input_cont = input.contiguous(); + + int bsz = input_cont.size(0) * input_cont.size(1); + int intermediate_size = input_cont.size(2); + + launch_bias_relu((T*)input_cont.data_ptr(), + (T*)bias.data_ptr(), + intermediate_size, + bsz, + InferenceContext::Instance().GetCurrentStream()); + return input_cont; +} + +template +at::Tensor ds_bias_add(at::Tensor& input, at::Tensor& bias) +{ + auto input_cont = input.contiguous(); + + int bsz = input_cont.size(0) * input_cont.size(1); + int hidden_size = input_cont.size(2); + + launch_bias_add((T*)input_cont.data_ptr(), + (T*)bias.data_ptr(), + hidden_size, + bsz, + InferenceContext::Instance().GetCurrentStream()); + return input_cont; +} + +template +at::Tensor ds_bias_residual(at::Tensor& input, at::Tensor& residual, at::Tensor& bias) +{ + auto input_cont = input.contiguous(); + auto residual_cont = residual.contiguous(); + + int bsz = input_cont.size(0) * input_cont.size(1); + // launch_bias_residual((T*)input_cont.data_ptr(), + // (T*)residual_cont.data_ptr(), + // (T*)bias.data_ptr(), + // bsz, + // input_cont.size(2), + // (bias.size(0) > 1), + // InferenceContext::Instance().GetCurrentStream()); + return input_cont; +} + +#define DISPATCH_LAYER_NORM(T_TYPE, C_TYPE) \ + if (input.options().dtype() == torch::T_TYPE) { \ + launch_fused_ln((C_TYPE*)output.data_ptr(), \ + (const C_TYPE*)input.data_ptr(), \ + (const C_TYPE*)gamma.data_ptr(), \ + (const C_TYPE*)beta.data_ptr(), \ + epsilon, \ + rows, \ + elems_per_row, \ + InferenceContext::Instance().GetCurrentStream()); \ + } + +at::Tensor ds_layer_norm(at::Tensor& input, at::Tensor& gamma, at::Tensor& beta, float epsilon) +{ + const int rows = input.size(0) * input.size(1); + const int elems_per_row = input.size(2); + auto output = at::empty_like(input); + + DISPATCH_LAYER_NORM(kFloat, float); + DISPATCH_LAYER_NORM(kHalf, __half); +#ifdef BF16_AVAILABLE + DISPATCH_LAYER_NORM(kBFloat16, __nv_bfloat16); +#endif + + return output; +} + +#define DISPATCH_RMS_NORM(T_TYPE, C_TYPE) \ + if (input.options().dtype() == torch::T_TYPE) { \ + launch_rms_norm((C_TYPE*)output.data_ptr(), \ + (C_TYPE*)nullptr, \ + (const C_TYPE*)input.data_ptr(), \ + (const C_TYPE*)nullptr, \ + (const C_TYPE*)gamma.data_ptr(), \ + epsilon, \ + rows, \ + elems_per_row, \ + InferenceContext::Instance().GetCurrentStream()); \ + } + +at::Tensor ds_rms_norm(at::Tensor& input, at::Tensor& gamma, float epsilon) +{ + // Get number of dims of tensor + int num_dims = input.dim(); + const int rows = (num_dims == 2) ? input.size(0) : input.size(0) * input.size(1); + const int elems_per_row = (num_dims == 2) ? input.size(1) : input.size(2); + + auto output = at::empty_like(input); + + DISPATCH_RMS_NORM(kFloat, float); + DISPATCH_RMS_NORM(kHalf, __half); +#ifdef BF16_AVAILABLE + DISPATCH_RMS_NORM(kBFloat16, __nv_bfloat16); +#endif + + return output; +} + +#define DISPATCH_PRE_RMS_NORM(T_TYPE, C_TYPE) \ + if (input.options().dtype() == torch::T_TYPE) { \ + launch_rms_norm((C_TYPE*)output.data_ptr(), \ + (C_TYPE*)res_out.data_ptr(), \ + (const C_TYPE*)input.data_ptr(), \ + (const C_TYPE*)residual.data_ptr(), \ + (const C_TYPE*)gamma.data_ptr(), \ + epsilon, \ + rows, \ + elems_per_row, \ + InferenceContext::Instance().GetCurrentStream()); \ + } + +std::vector ds_pre_rms_norm(at::Tensor& input, + at::Tensor& residual, + at::Tensor& gamma, + float epsilon) +{ + // Get number of dims of tensor + int num_dims = input.dim(); + const int rows = (num_dims == 2) ? input.size(0) : input.size(0) * input.size(1); + const int elems_per_row = (num_dims == 2) ? input.size(1) : input.size(2); + + auto output = at::empty_like(input); + auto res_out = at::empty_like(residual); + + DISPATCH_PRE_RMS_NORM(kFloat, float); + DISPATCH_PRE_RMS_NORM(kHalf, __half); +#ifdef BF16_AVAILABLE + DISPATCH_PRE_RMS_NORM(kBFloat16, __nv_bfloat16); +#endif + + return {output, res_out}; +} + +template +void ds_layer_norm_internal(T* workspace, + at::Tensor& input, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon) +{ + int bsz = input.size(0) * input.size(1); + launch_fused_ln(workspace, + (const T*)input.data_ptr(), + (const T*)gamma.data_ptr(), + (const T*)beta.data_ptr(), + epsilon, + bsz, + input.size(2), + InferenceContext::Instance().GetCurrentStream()); +} + +#define DISPATCH_LAYER_NORM_RESIDUAL(T_TYPE, C_TYPE) \ + if (input.options().dtype() == torch::T_TYPE) { \ + launch_fused_residual_ln((C_TYPE*)output.data_ptr(), \ + (const C_TYPE*)input.data_ptr(), \ + (const C_TYPE*)residual.data_ptr(), \ + (const C_TYPE*)bias.data_ptr(), \ + (const C_TYPE*)gamma.data_ptr(), \ + (const C_TYPE*)beta.data_ptr(), \ + epsilon, \ + rows, \ + elems_per_row, \ + InferenceContext::Instance().GetCurrentStream()); \ + } + +/* Currently only used in unit testing */ +at::Tensor ds_layer_norm_residual(at::Tensor& input, + at::Tensor& bias, + at::Tensor& residual, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon) +{ + const int rows = input.size(0) * input.size(1); + const int elems_per_row = input.size(2); + auto output = at::empty_like(input); + + DISPATCH_LAYER_NORM_RESIDUAL(kFloat, float); + DISPATCH_LAYER_NORM_RESIDUAL(kHalf, __half); +#ifdef BF16_AVAILABLE + DISPATCH_LAYER_NORM_RESIDUAL(kBFloat16, __nv_bfloat16); +#endif + + return output; +} + +#define DISPATCH_PRE_LAYER_NORM_RESIDUAL(T_TYPE, C_TYPE) \ + if (input.options().dtype() == torch::T_TYPE) { \ + launch_fused_residual_ln_store_pre_ln_res( \ + (C_TYPE*)norm_output.data_ptr(), \ + (C_TYPE*)res_output.data_ptr(), \ + (const C_TYPE*)input.data_ptr(), \ + (const C_TYPE*)residual.data_ptr(), \ + (const C_TYPE*)bias.data_ptr(), \ + (const C_TYPE*)gamma.data_ptr(), \ + (const C_TYPE*)beta.data_ptr(), \ + epsilon, \ + rows, \ + elems_per_row, \ + InferenceContext::Instance().GetCurrentStream()); \ + } + +/* Currently only used in unit testing */ +std::vector ds_layer_norm_residual_store_pre_ln_res(at::Tensor& input, + at::Tensor& bias, + at::Tensor& residual, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon) +{ + const int rows = input.size(0) * input.size(1); + const int elems_per_row = input.size(2); + auto norm_output = at::empty_like(input); + auto res_output = at::empty_like(input); + + DISPATCH_PRE_LAYER_NORM_RESIDUAL(kFloat, float); + DISPATCH_PRE_LAYER_NORM_RESIDUAL(kHalf, __half); +#ifdef BF16_AVAILABLE + DISPATCH_PRE_LAYER_NORM_RESIDUAL(kBFloat16, __nv_bfloat16); +#endif + + return {norm_output, res_output}; +} + +template +void quantized_gemm(void* output, + T* input, + at::Tensor& weight, + at::Tensor& qscale, + int groups, + int bsz, + int hidden_size) +{ + // T* weight16 = (T*)InferenceContext::Instance().GetWorkSpace() + 12 * hidden_size * bsz; + + auto options = at::TensorOptions() + .dtype(at::kHalf) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + auto tmp = torch::empty(weight.sizes(), options); + T* weight16 = (T*)tmp.data_ptr(); + launch_dequantize(weight16, + (int8_t*)weight.data_ptr(), + (float*)qscale.data_ptr(), + weight.size(0), + weight.size(1), + groups, + InferenceContext::Instance().GetCurrentStream()); + + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + CUBLAS_OP_T, + CUBLAS_OP_N, + weight.size(0), + bsz, + weight.size(1), + &alpha, + &gemm_beta, + weight16, + (T*)input, + (T*)output, +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif +} + +template +at::Tensor qkv_unfused_cublas(at::Tensor& output, + at::Tensor& input, + at::Tensor& weight, + at::Tensor& q_scale, + at::Tensor& bias, + at::Tensor& gamma, + at::Tensor& beta, + const float epsilon, + bool add_bias, + bool q_int8, + bool transposed_mode) +{ + int bsz = input.size(0) * input.size(1); + T* workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + workspace += (3 * bsz * input.size(2)); + ds_layer_norm_internal(workspace, input, gamma, beta, epsilon); + + if (q_int8) { + quantized_gemm( + output.data_ptr(), workspace, weight, q_scale, q_scale.size(0), bsz, input.size(2)); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream()); + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + (transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N), + CUBLAS_OP_N, + weight.size(transposed_mode ? 0 : 1), + bsz, + input.size(2), + &alpha, + &gemm_beta, + (T*)weight.data_ptr(), + workspace, + (T*)output.data_ptr(), +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + } + if (add_bias) + launch_bias_add((T*)output.data_ptr(), + (T*)bias.data_ptr(), + (transposed_mode || q_int8) ? weight.size(0) : weight.size(1), + bsz, + InferenceContext::Instance().GetCurrentStream()); + return torch::from_blob(workspace, input.sizes(), input.options()); +} + +template +std::vector ds_rms_qkv(at::Tensor& input, + at::Tensor& weight, + at::Tensor& q_scale, + at::Tensor& gamma, + const float epsilon, + bool q_int8, + bool transposed_mode) +{ + const int bsz = input.size(0) * input.size(1); + T* workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + T* rms_norm_ptr = workspace + (3 * bsz * input.size(2)); + int out_size = (transposed_mode || q_int8) ? weight.size(0) : weight.size(1); + + auto options = at::TensorOptions() + .dtype(input.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + auto rms_norm = at::from_blob(rms_norm_ptr, input.sizes(), options); + auto output = at::from_blob(workspace, {input.size(0), input.size(1), out_size}, options); + + launch_rms_norm((T*)rms_norm.data_ptr(), + (T*)nullptr, + (const T*)input.data_ptr(), + (const T*)nullptr, + (const T*)gamma.data_ptr(), + epsilon, + bsz, + input.size(2), + InferenceContext::Instance().GetCurrentStream()); + + if (q_int8) { + quantized_gemm((T*)output.data_ptr(), + (T*)rms_norm.data_ptr(), + weight, + q_scale, + q_scale.size(0), + bsz, + input.size(2)); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream()); + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + (transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N), + CUBLAS_OP_N, + weight.size(transposed_mode ? 0 : 1), + bsz, + input.size(2), + &alpha, + &gemm_beta, + (T*)weight.data_ptr(), + (T*)rms_norm.data_ptr(), + (T*)output.data_ptr(), +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + } + + return {output, rms_norm}; +} + +template +std::vector ds_qkv_gemm(at::Tensor& input, + at::Tensor& weight, + at::Tensor& q_scale, + at::Tensor& bias, + at::Tensor& gamma, + at::Tensor& beta, + const float epsilon, + bool add_bias, + bool q_int8, + bool transposed_mode) +{ + int bsz = input.size(0) * input.size(1); + T* workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + int out_size = (transposed_mode || q_int8) ? weight.size(0) : weight.size(1); + + auto options = at::TensorOptions() + .dtype(input.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + + auto output = at::from_blob(workspace, {input.size(0), input.size(1), out_size}, options); + auto inp_norm = qkv_unfused_cublas(output, + input, + weight, + q_scale, + bias, + gamma, + beta, + epsilon, + add_bias, + q_int8, + transposed_mode); + + return {output, inp_norm}; +} + +template +void quantized_gemm(at::Tensor& output, + at::Tensor& input, + at::Tensor& weight, + at::Tensor& qscale, + int groups, + int merge_count) +{ + int bsz = input.size(0) * input.size(1); + auto options = at::TensorOptions() + .dtype(input.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + auto weight16 = at::empty({weight.size(0), weight.size(1)}, options); + + launch_dequantize((T*)weight16.data_ptr(), + (int8_t*)weight.data_ptr(), + (float*)qscale.data_ptr(), + weight.size(0), + weight.size(1), + groups, + merge_count, + InferenceContext::Instance().GetCurrentStream()); + + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + CUBLAS_OP_T, + CUBLAS_OP_N, + weight.size(0), + bsz, + input.size(2), + &alpha, + &gemm_beta, + (T*)weight16.data_ptr(), + (T*)input.data_ptr(), + (T*)output.data_ptr(), +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif +} + +template +at::Tensor ds_linear_layer(at::Tensor& input, + at::Tensor& weight, + at::Tensor& bias, + bool add_bias, + bool do_flash_attn, + int num_heads, + bool transposed_mode, + float rope_theta) +{ + auto input_cont = input.contiguous(); + auto options = at::TensorOptions() + .dtype(input_cont.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + + int head_size = input_cont.size(2) / num_heads; + int bsz = input.size(0) * input.size(1); + int out_size = transposed_mode ? weight.size(0) : weight.size(1); + T* workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + auto output = at::from_blob(workspace, {input.size(0), input.size(1), out_size}, options); + + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream()); + + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + (transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N), + CUBLAS_OP_N, + weight.size(transposed_mode ? 0 : 1), + bsz, + input_cont.size(2), + &alpha, + &gemm_beta, + (T*)weight.data_ptr(), + (T*)input_cont.data_ptr(), + (T*)output.data_ptr(), +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + if (add_bias) + launch_bias_add((T*)output.data_ptr(), + (T*)bias.data_ptr(), + weight.size(transposed_mode ? 0 : 1), + bsz, + InferenceContext::Instance().GetCurrentStream()); + bool add_padding = (head_size % 32 != 0 && head_size < 64) || (head_size % 64 != 0); + if (do_flash_attn) { + if (add_padding) { + int padded_head_size = head_size < 32 ? 32 : (head_size < 64 ? 64 : 128); + auto padded_output = workspace + output.numel(); + auto final_output = + padded_output + (input.size(0) * input.size(1) * 3 * num_heads * padded_head_size); + pad_data(padded_output, + workspace, + 3 * bsz * num_heads, + head_size, + padded_head_size, + InferenceContext::Instance().GetCurrentStream()); + + launch_bias_add_transform_0213( + final_output, + final_output + (input.size(0) * input.size(1) * num_heads * padded_head_size), + final_output + (input.size(0) * input.size(1) * 2 * num_heads * padded_head_size), + padded_output, + nullptr, + input.size(0), + input.size(1), + 0, + input.size(1), + (num_heads * padded_head_size), + num_heads, + -1, + -1, + false, + false, + InferenceContext::Instance().GetCurrentStream(), + 3, + input.size(1), + rope_theta); + return at::from_blob(final_output, + {3, input.size(0), num_heads, input.size(1), padded_head_size}, + options); + // return at::from_blob(padded_output, {input.size(0) * input.size(1), 3, num_heads, + // padded_head_size}, options); + } else { + auto final_output = workspace + output.numel(); + launch_bias_add_transform_0213( + final_output, + final_output + (input.size(0) * input.size(1) * input_cont.size(2)), + final_output + (input.size(0) * input.size(1) * 2 * input_cont.size(2)), + workspace, + nullptr, + input.size(0), + input.size(1), + 0, + input.size(1), + input_cont.size(2), + num_heads, + -1, + -1, + false, + false, + InferenceContext::Instance().GetCurrentStream(), + 3, + input.size(1), + rope_theta); + return at::from_blob( + final_output, {3, input.size(0), num_heads, input.size(1), head_size}, options); + // return at::from_blob(workspace, {input.size(0) * input.size(1), 3, num_heads, + // head_size}, options); + } + + } else + return output; +} + +template +std::vector add_padding(at::Tensor& query, at::Tensor& key, at::Tensor& value) +{ + int head_size = query.size(3); + int padded_head_size = head_size < 32 ? 32 : (head_size < 64 ? 64 : 128); + T* workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + T* key_pad_ptr = workspace + padded_head_size * query.size(0) * query.size(1) * query.size(2); + T* value_pad_ptr = key_pad_ptr + padded_head_size * query.size(0) * query.size(1) * 128; + pad_head_seq(workspace, + (T*)query.data_ptr(), + query.size(0) * query.size(1), + query.size(2), + query.size(2), + head_size, + padded_head_size, + InferenceContext::Instance().GetCurrentStream()); + pad_head_seq(key_pad_ptr, + (T*)key.data_ptr(), + query.size(0) * query.size(1), + key.size(2), + 128, + head_size, + padded_head_size, + InferenceContext::Instance().GetCurrentStream()); + pad_head_seq(value_pad_ptr, + (T*)value.data_ptr(), + query.size(0) * query.size(1), + key.size(2), + 128, + head_size, + padded_head_size, + InferenceContext::Instance().GetCurrentStream()); + return { + at::from_blob(workspace, + {query.size(0), query.size(1), query.size(2), padded_head_size}, + query.options()), + at::from_blob( + key_pad_ptr, {query.size(0), query.size(1), 128, padded_head_size}, query.options()), + at::from_blob( + value_pad_ptr, {query.size(0), query.size(1), 128, padded_head_size}, query.options())}; +} + +template +std::vector padd_add_transform(at::Tensor& query, + at::Tensor& key, + at::Tensor& value, + int heads, + bool add_padding) +{ + int head_size = query.size(2) / heads; + int key_value_length = add_padding ? 128 : key.size(1); + int padded_head_size = add_padding ? (head_size < 32 ? 32 : (head_size < 64 ? 64 : 128)) + : head_size; + T* workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + T* key_pad_ptr = workspace + padded_head_size * query.size(0) * heads * query.size(1); + T* value_pad_ptr = key_pad_ptr + padded_head_size * query.size(0) * heads * key_value_length; + launch_pad_add_transform_0213(workspace, + (T*)query.data_ptr(), + query.size(0), + query.size(2), + query.size(1), + query.size(1), + heads, + padded_head_size, + InferenceContext::Instance().GetCurrentStream()); + launch_pad_add_transform_0213(key_pad_ptr, + (T*)key.data_ptr(), + key.size(0), + key.size(2), + key.size(1), + key_value_length, + heads, + padded_head_size, + InferenceContext::Instance().GetCurrentStream()); + launch_pad_add_transform_0213(value_pad_ptr, + (T*)value.data_ptr(), + value.size(0), + value.size(2), + value.size(1), + key_value_length, + heads, + padded_head_size, + InferenceContext::Instance().GetCurrentStream()); + return { + at::from_blob( + workspace, {query.size(0), heads, query.size(1), padded_head_size}, query.options()), + at::from_blob(key_pad_ptr, + {query.size(0), heads, key_value_length, padded_head_size}, + query.options()), + at::from_blob(value_pad_ptr, + {query.size(0), heads, key_value_length, padded_head_size}, + query.options())}; +} + +template +at::Tensor ds_vector_matmul(at::Tensor& input, + at::Tensor& weight, + bool async_op, + at::Tensor& q_scale, + bool q_int8, + bool transposed_mode) +{ + auto options = at::TensorOptions() + .dtype(input.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + int out_size = (q_int8 || transposed_mode) ? weight.size(0) : weight.size(1); + int bsz = input.size(0) * input.size(1); + + T* workspace = (T*)InferenceContext::Instance().GetWorkSpace(); + auto output = at::from_blob(workspace, {input.size(0), input.size(1), out_size}, options); + if (q_int8) { + quantized_gemm(output.data_ptr(), + (T*)input.data_ptr(), + weight, + q_scale, + q_scale.size(0), + bsz, + input.size(2)); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream(async_op)); + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + (transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N), + CUBLAS_OP_N, + weight.size(transposed_mode ? 0 : 1), + bsz, + input.size(2), + &alpha, + &gemm_beta, + (T*)weight.data_ptr(), + (T*)input.data_ptr(), + (T*)output.data_ptr(), +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + } + return output; +} + +template +at::Tensor ds_vector_matmul_int8(at::Tensor& input, + at::Tensor& weight, + at::Tensor& q_scale, + int groups, + int merge_count) +{ + auto input_cont = input.contiguous(); + auto options = at::TensorOptions() + .dtype(input_cont.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + + auto output = at::empty({input_cont.size(0), input_cont.size(1), weight.size(1)}, options); + + quantized_gemm(output, input_cont, weight, q_scale, groups, merge_count); + return output; +} + +template +at::Tensor mlp_unfused_cublas(at::Tensor& output, + at::Tensor& input, + at::Tensor& residual, + at::Tensor& input_bias, + at::Tensor& weight, + at::Tensor& weight1, + at::Tensor& bias, + at::Tensor& gamma, + at::Tensor& beta, + const float epsilon, + bool preLayerNorm, + bool mlp_after_attn, + at::Tensor& q_scale, + at::Tensor& q_scale1, + bool q_int8, + ActivationFuncType act_func_type, + bool transposed_mode) +{ + int bsz = input.size(0) * input.size(1); + T* inp_norm = (T*)InferenceContext::Instance().GetWorkSpace() + torch::numel(input) + + torch::numel(output); + T* intermediate = inp_norm + torch::numel(input); + + if (mlp_after_attn) { + launch_fused_residual_ln((T*)inp_norm, + (const T*)input.data_ptr(), + (const T*)residual.data_ptr(), + (const T*)input_bias.data_ptr(), + (const T*)gamma.data_ptr(), + (const T*)beta.data_ptr(), + epsilon, + bsz, + input.size(2), + InferenceContext::Instance().GetCurrentStream()); + } else { + ds_layer_norm_internal(inp_norm, input, gamma, beta, epsilon); + } + if (q_int8) { + quantized_gemm( + intermediate, inp_norm, weight, q_scale, q_scale.size(0), bsz, input.size(2)); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream()); + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + (transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N), + CUBLAS_OP_N, + weight.size(transposed_mode ? 0 : 1), + bsz, + input.size(2), + &alpha, + &gemm_beta, + (T*)weight.data_ptr(), + inp_norm, + intermediate, +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + } + if (act_func_type == ActivationFuncType::GELU) { + launch_bias_gelu(intermediate, + (T*)bias.data_ptr(), + (transposed_mode || q_int8) ? weight.size(0) : weight.size(1), + bsz, + InferenceContext::Instance().GetCurrentStream()); + } else if (act_func_type == ActivationFuncType::ReLU) { + launch_bias_relu(intermediate, + (T*)bias.data_ptr(), + (transposed_mode || q_int8) ? weight.size(0) : weight.size(1), + bsz, + InferenceContext::Instance().GetCurrentStream()); + } + + if (q_int8) { + quantized_gemm(output.data_ptr(), + intermediate, + weight1, + q_scale1, + q_scale1.size(0), + bsz, + input.size(2)); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream()); + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + (transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N), + CUBLAS_OP_N, + weight1.size(transposed_mode ? 0 : 1), + bsz, + weight1.size(transposed_mode ? 1 : 0), + &alpha, + &gemm_beta, + (T*)weight1.data_ptr(), + intermediate, + (T*)output.data_ptr(), +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + } + + return torch::from_blob(inp_norm, input.sizes(), input.options()); +} + +template +std::vector ds_mlp_gemm(at::Tensor& input, + at::Tensor& residual, + at::Tensor& input_bias, + at::Tensor& weight_interm, + at::Tensor& weight_out, + at::Tensor& bias, + at::Tensor& gamma, + at::Tensor& beta, + const float epsilon, + bool preLayerNorm, + bool mlp_after_attn, + at::Tensor& q_scale, + at::Tensor& q_scale1, + bool q_int8, + int activation_type, + bool transposed_mode) +{ + auto options = at::TensorOptions() + .dtype(input.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + + int out_size = (q_int8 || transposed_mode) ? weight_out.size(0) : weight_out.size(1); + auto output = + at::from_blob((T*)InferenceContext::Instance().GetWorkSpace() + torch::numel(input), + {input.size(0), input.size(1), out_size}, + options); + int bsz = input.size(0) * input.size(1); + + auto act_func_type = static_cast(activation_type); + auto res_add = mlp_unfused_cublas(output, + mlp_after_attn ? input : residual, + residual, + input_bias, + weight_interm, + weight_out, + bias, + gamma, + beta, + epsilon, + preLayerNorm, + mlp_after_attn, + q_scale, + q_scale1, + q_int8, + act_func_type, + transposed_mode); + + return {output, res_add}; +} + +template +std::vector ds_rms_mlp_gemm(at::Tensor& input, + at::Tensor& residual, + at::Tensor& weight_interm, + at::Tensor& weight_out, + at::Tensor& gamma, + const float epsilon, + at::Tensor& q_scale, + at::Tensor& q_scale1, + bool q_int8, + int activation_type, + bool transposed_mode) +{ + const int bsz = input.size(0) * input.size(1); + const size_t input_neurons = input.size(2); + const size_t mlp_1_out_neurons = transposed_mode ? weight_interm.size(0) + : weight_interm.size(1); + const size_t mlp_2_in_neurons = transposed_mode ? weight_out.size(1) : weight_out.size(0); + + auto options = at::TensorOptions() + .dtype(input.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + + T* output_ptr = (T*)InferenceContext::Instance().GetWorkSpace() + torch::numel(input); + T* inp_norm_ptr = output_ptr + torch::numel(input); + T* intermediate_ptr = inp_norm_ptr + torch::numel(input); + + auto output = at::from_blob(output_ptr, input.sizes(), options); + auto inp_norm = at::from_blob(inp_norm_ptr, input.sizes(), options); + auto intermediate_gemm = + at::from_blob(intermediate_ptr, {input.size(0), input.size(1), mlp_1_out_neurons}, options); + + auto act_func_type = static_cast(activation_type); + + // RMS Norm, we'll update the residual in-place + launch_rms_norm((T*)inp_norm.data_ptr(), + (T*)residual.data_ptr(), + (const T*)input.data_ptr(), + (const T*)residual.data_ptr(), + (const T*)gamma.data_ptr(), + epsilon, + bsz, + input_neurons, + InferenceContext::Instance().GetCurrentStream()); + + if (q_int8) { + quantized_gemm(intermediate_ptr, + (T*)inp_norm.data_ptr(), + weight_interm, + q_scale, + q_scale.size(0), + bsz, + input_neurons); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream()); + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + (transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N), + CUBLAS_OP_N, + mlp_1_out_neurons, + bsz, + input_neurons, + &alpha, + &gemm_beta, + (T*)weight_interm.data_ptr(), + (T*)inp_norm.data_ptr(), + intermediate_ptr, +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + } + + if (act_func_type == ActivationFuncType::GELU) { + launch_bias_gelu(intermediate_ptr, + (T*)nullptr, + mlp_1_out_neurons, + bsz, + InferenceContext::Instance().GetCurrentStream()); + } else if (act_func_type == ActivationFuncType::ReLU) { + launch_bias_relu(intermediate_ptr, + (T*)nullptr, + mlp_1_out_neurons, + bsz, + InferenceContext::Instance().GetCurrentStream()); + } else if (act_func_type == ActivationFuncType::GATED_GELU) { + launch_gated_activation(intermediate_ptr, + (const T*)intermediate_ptr, + (const T*)nullptr, + bsz, + mlp_1_out_neurons, + mlp_1_out_neurons, + true, + InferenceContext::Instance().GetCurrentStream()); + } else if (act_func_type == ActivationFuncType::GATED_SILU) { + launch_gated_activation(intermediate_ptr, + (const T*)intermediate_ptr, + (const T*)nullptr, + bsz, + mlp_1_out_neurons, + mlp_1_out_neurons, + false, + InferenceContext::Instance().GetCurrentStream()); + } + + if (q_int8) { + quantized_gemm(output.data_ptr(), + intermediate_ptr, + weight_out, + q_scale1, + q_scale1.size(0), + bsz, + input.size(2)); + } else { + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream()); + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + (transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N), + CUBLAS_OP_N, + input_neurons, + bsz, + mlp_2_in_neurons, + &alpha, + &gemm_beta, + (T*)weight_out.data_ptr(), + intermediate_ptr, + (T*)output.data_ptr(), +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard, +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP, +#endif + mlp_1_out_neurons); + } + + return {output, residual}; +} + +template +at::Tensor fused_gemm_gelu(at::Tensor& input, + at::Tensor& weight, + at::Tensor& weight_scale, + at::Tensor& bias, + at::Tensor& weight_out, + at::Tensor& weight_out_scale, + bool q_int8, + bool transposed_mode) +{ + auto options = at::TensorOptions() + .dtype(input.options().dtype()) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + + int intm_dim = (transposed_mode || q_int8) ? weight.size(0) : weight.size(1); + + // auto output = at::from_blob((T*)InferenceContext::Instance().GetWorkSpace() + + // torch::numel(input), + // {input.size(0), input.size(1), out_size}, + // options); + // T* intermediate = (T*)input.data_ptr() + torch::numel(input); + auto intermediate = at::empty({input.size(0), input.size(1), intm_dim}, options); + + int bsz = input.size(0) * input.size(1); + + float alpha = (T)1.0; + float gemm_beta = (T)0.0; + if (q_int8) { + quantized_gemm(intermediate.data_ptr(), + (T*)input.data_ptr(), + weight, + weight_scale, + weight_scale.size(0), + bsz, + input.size(2)); + } else { + cublasSetStream(InferenceContext::Instance().GetCublasHandle(), + InferenceContext::Instance().GetCurrentStream()); + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + (transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N), + CUBLAS_OP_N, + intm_dim, + bsz, + input.size(2), + &alpha, + &gemm_beta, + (T*)weight.data_ptr(), + (T*)input.data_ptr(), + (T*)intermediate.data_ptr(), +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + } + launch_bias_gelu((T*)intermediate.data_ptr(), + (T*)bias.data_ptr(), + intm_dim, + bsz, + InferenceContext::Instance().GetCurrentStream()); + + int out_size = (transposed_mode || q_int8) ? weight_out.size(0) : weight_out.size(1); + auto output = at::empty({input.size(0), input.size(1), out_size}, options); + if (q_int8) { + quantized_gemm(output.data_ptr(), + (T*)intermediate.data_ptr(), + weight_out, + weight_out_scale, + weight_out_scale.size(0), + bsz, + input.size(2)); + } else { + cublas_gemm_ex(InferenceContext::Instance().GetCublasHandle(), + (transposed_mode ? CUBLAS_OP_T : CUBLAS_OP_N), + CUBLAS_OP_N, + out_size, + bsz, + intm_dim, + &alpha, + &gemm_beta, + (T*)weight_out.data_ptr(), + (T*)intermediate.data_ptr(), + (T*)output.data_ptr(), +#ifdef __HIP_PLATFORM_AMD__ + rocblas_gemm_algo_standard); +#else + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + } + // cudaEventRecord(InferenceContext::Instance().GetCompEvent(2), + // InferenceContext::Instance().GetCurrentStream(true)); + return output; +} + +template +at::Tensor& residual_add_bias(at::Tensor& hidden_state, + at::Tensor& residual, + const at::Tensor& attention_output, + const at::Tensor& attention_bias, + const at::Tensor& final_bias, + const int mp_size, + const bool mlp_after_attn, + const bool add_bias, + const bool preln) +{ + int bsz = residual.size(0) * residual.size(1); + int hidden_size = residual.size(2); + if (mlp_after_attn) + launch_bias_residual(static_cast(residual.data_ptr()), + static_cast(hidden_state.data_ptr()), + static_cast(attention_output.data_ptr()), + static_cast(final_bias.data_ptr()), + static_cast(attention_bias.data_ptr()), + bsz, + hidden_size, + mp_size, + preln, + InferenceContext::Instance().GetCurrentStream()); + else + launch_gptj_residual_add( + static_cast(residual.data_ptr()), + static_cast(hidden_state.data_ptr()), + static_cast(attention_output.data_ptr()), + static_cast(final_bias.data_ptr()), + static_cast((add_bias ? attention_bias.data_ptr() : nullptr)), + hidden_size, + bsz, + mp_size, + InferenceContext::Instance().GetCurrentStream()); + return residual; +} + +#define DISPATCH_VECTOR_ADD(T_TYPE, C_TYPE) \ + if (a.scalar_type() == at::k##T_TYPE) { \ + launch_vector_add((C_TYPE*)(a.data_ptr()), \ + (const C_TYPE*)(a.data_ptr()), \ + (const C_TYPE*)(b.data_ptr()), \ + gamma, \ + total_elems, \ + InferenceContext::Instance().GetCurrentStream()); \ + } + +at::Tensor& _vector_add(at::Tensor& a, at::Tensor& b, float gamma) +{ + const int total_elems = a.numel(); + + DISPATCH_VECTOR_ADD(Float, float) + DISPATCH_VECTOR_ADD(Half, __half) +#ifdef BF16_AVAILABLE + DISPATCH_VECTOR_ADD(BFloat16, __nv_bfloat16) +#endif + + return a; +} + +std::vector apply_rotary_pos_emb(at::Tensor& mixed_query, + at::Tensor& key_layer, + unsigned rotary_dim, + unsigned offset, + unsigned num_heads, + bool rotate_half, + float rope_theta) +{ + auto query_cont = mixed_query.contiguous(); + auto key_cont = key_layer.contiguous(); + + unsigned bsz = mixed_query.size(0); + unsigned head_size = mixed_query.size(2) / num_heads; + unsigned seq_len = mixed_query.size(1); + + if (mixed_query.scalar_type() == at::kFloat) + launch_apply_rotary_pos_emb((float*)query_cont.data_ptr(), + (float*)key_cont.data_ptr(), + head_size, + seq_len, + rotary_dim, + offset, + num_heads, + bsz, + rope_theta, + InferenceContext::Instance().GetCurrentStream(), + InferenceContext::Instance().GetMaxTokenLength()); + else + launch_apply_rotary_pos_emb<__half>((__half*)query_cont.data_ptr(), + (__half*)key_cont.data_ptr(), + head_size, + seq_len, + rotary_dim, + offset, + num_heads, + bsz, + rope_theta, + InferenceContext::Instance().GetCurrentStream(), + InferenceContext::Instance().GetMaxTokenLength()); + return {query_cont, key_cont}; +} + +#define DISPATCH_MOE_RESIDUAL(T_TYPE, C_TYPE) \ + if (moe_res.scalar_type() == torch::T_TYPE) { \ + launch_moe_res_matmul((C_TYPE*)moe_res.data_ptr(), \ + (C_TYPE*)coef.data_ptr(), \ + (C_TYPE*)output.data_ptr(), \ + M, \ + N, \ + InferenceContext::Instance().GetCurrentStream()); \ + } + +at::Tensor moe_res_matmul(at::Tensor& moe_res, at::Tensor& coef, at::Tensor& output) +{ + int M = moe_res.size(0) * moe_res.size(1); + int N = moe_res.size(2); + InferenceContext::Instance().SynchComm(); + + DISPATCH_MOE_RESIDUAL(kFloat, float) + DISPATCH_MOE_RESIDUAL(kHalf, __half) +#ifdef BF16_AVAILABLE + DISPATCH_MOE_RESIDUAL(kBFloat16, __nv_bfloat16) +#endif + + return output; +} + +void ds_release_workspace() { InferenceContext::Instance().release_workspace(); } + +bool ds_retake_workspace() { return InferenceContext::Instance().retake_workspace(); } + +template +at::Tensor ds_dequantize(at::Tensor& weight, at::Tensor& qscale, int groups) +{ + auto options = at::TensorOptions() + .dtype(torch::kFloat16) + .layout(at::kStrided) + .device(at::kCUDA) + .requires_grad(false); + auto weight16 = at::empty({weight.size(0), weight.size(1)}, options); + + launch_dequantize((T*)weight16.data_ptr(), + (int8_t*)weight.data_ptr(), + (float*)qscale.data_ptr(), + weight.size(0), + weight.size(1), + groups, + InferenceContext::Instance().GetCurrentStream()); + + return weight16; +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("softmax_context_int8", + &ds_softmax_context1<__half>, + "DeepSpeed attention with int8 (CUDA)"); + + // The following functions handle type dispatching internally + m.def("gated_activation", &ds_gated_activation, "DeepSpeed Bias GEGLU (CUDA)"); + m.def("layer_norm", &ds_layer_norm, "DeepSpeed layer norm (CUDA)"); + m.def( + "_layer_norm_residual", &ds_layer_norm_residual, "DeepSpeed layer norm + residual (CUDA)"); + m.def("layer_norm_residual_store_pre_ln_res", + &ds_layer_norm_residual_store_pre_ln_res, + "DeepSpeed layer norm + store pre Layernorm residual (CUDA)"); + m.def("rms_norm", &ds_rms_norm, "DeepSpeed rms norm (CUDA)"); + m.def("pre_rms_norm", &ds_pre_rms_norm, "DeepSpeed pre rms norm (CUDA)"); + m.def("_vector_add", &_vector_add, "DeepSpeed vector add (CUDA)"); + m.def("apply_rotary_pos_emb", &apply_rotary_pos_emb, "DeepSpeed mlp with fp16 (CUDA)"); + m.def("moe_res_matmul", &moe_res_matmul, "DeepSpeed moe residual matmul (CUDA)"); + m.def("reset_cache", &reset_cache, "Reset Cache for generation tasks"); + m.def("release_workspace", &ds_release_workspace, "DeepSpeed Release Workspace"); + m.def("retake_workspace", &ds_retake_workspace, "DeepSpeed Retake Workspace"); + + // The following functions are templated and need to be explicitly instantiated and bound + // to different python methods +#define DEF_OPS(_name, _dtype) \ + m.def("softmax_" #_name, &ds_softmax<_dtype>, "DeepSpeed SoftMax with " #_name " (CUDA)"); \ + m.def("softmax_context_" #_name, \ + &ds_softmax_context<_dtype>, \ + "DeepSpeed attention with " #_name " (CUDA)"); \ + m.def("bias_gelu_" #_name, &ds_bias_gelu<_dtype>, "DeepSpeed Gelu with " #_name " (CUDA)"); \ + m.def("bias_add_" #_name, &ds_bias_add<_dtype>, "DeepSpeed Bias Add with " #_name " (CUDA)"); \ + m.def("bias_relu_" #_name, &ds_bias_relu<_dtype>, "DeepSpeed ReLU with " #_name " (CUDA)"); \ + m.def("bias_residual_" #_name, \ + &ds_bias_residual<_dtype>, \ + "DeepSpeed residual-bias add with " #_name " (CUDA)"); \ + m.def("qkv_gemm_" #_name, &ds_qkv_gemm<_dtype>, "DeepSpeed qkv gemm with " #_name " (CUDA)"); \ + m.def("rms_qkv_gemm_" #_name, \ + &ds_rms_qkv<_dtype>, \ + "DeepSpeed rms qkv gemm with " #_name " (CUDA)"); \ + m.def("mlp_gemm_" #_name, &ds_mlp_gemm<_dtype>, "DeepSpeed mlp with " #_name " (CUDA)"); \ + m.def("rms_mlp_gemm_" #_name, \ + &ds_rms_mlp_gemm<_dtype>, \ + "DeepSpeed rms mlp gemm with " #_name " (CUDA)"); \ + m.def("vector_matmul_" #_name, \ + &ds_vector_matmul<_dtype>, \ + "DeepSpeed vector-MM with " #_name " (CUDA)"); \ + m.def("linear_layer_" #_name, \ + &ds_linear_layer<_dtype>, \ + "DeepSpeed linear_layer with " #_name " (CUDA)"); \ + m.def("fused_gemm_gelu_" #_name, \ + &fused_gemm_gelu<_dtype>, \ + "DeepSpeed mlp with " #_name " (CUDA)"); \ + m.def("residual_add_bias_" #_name, \ + &residual_add_bias<_dtype>, \ + "DeepSpeed residual add with " #_name " (CUDA)"); \ + m.def("einsum_sec_sm_ecm_" #_name, \ + &einsum_sec_sm_ecm<_dtype>, \ + "DeepSpeed vector-MM with " #_name " (CUDA)"); \ + m.def("add_padding_" #_name, \ + &add_padding<_dtype>, \ + "DeepSpeed residual add with " #_name " (CUDA)"); \ + m.def("pad_transform_" #_name, \ + &padd_add_transform<_dtype>, \ + "DeepSpeed residual add with " #_name " (CUDA)"); \ + m.def("allocate_workspace_" #_name, \ + &allocate_workspace<_dtype>, \ + "DeepSpeed memory allocation for GPT inference with " #_name " (CUDA)"); \ + m.def("dequantize_" #_name, \ + &ds_dequantize<_dtype>, \ + "DeepSpeed dequantize with " #_name " (CUDA)") + + DEF_OPS(fp32, float); + DEF_OPS(fp16, __half); +#ifdef BF16_AVAILABLE + DEF_OPS(bf16, __nv_bfloat16); +#endif +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/relu.cu b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/relu.cu new file mode 100644 index 0000000000000000000000000000000000000000..40926b776cf2f0bd3c46f44c6eda2ed3ec6bfce5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/relu.cu @@ -0,0 +1,71 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "conversion_utils.h" +#include "inference_cuda_layers.h" +#include "memory_access_utils.h" + +namespace cg = cooperative_groups; +#define MAX_CAP 4 +#define MAX_SEQ 2048 + +inline __device__ float relu(const float x) { return x < 0 ? 0 : x; } + +/* +In-place relu(biasAdd(x)) for channels last +*/ +template +__global__ void fused_bias_relu(T* input, const T* bias, int total_count, int intermediate_size) +{ + // Input restriction: intermediate_size % vals_per_access == 0 + constexpr int granularity = 16; + constexpr int values_per_access = granularity / sizeof(T); + const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access; + + if (offset < total_count) { + T data[values_per_access]; + T data_bias[values_per_access]; + mem_access::load_global(data, input + offset); + mem_access::load_global( + data_bias, bias + (offset % intermediate_size), bias != nullptr); + +#pragma unroll + for (int i = 0; i < values_per_access; i++) { + float data_f = conversion::to(data[i]); + float bias_f = conversion::to(data_bias[i]); + data[i] = conversion::to(relu(data_f + bias_f)); + } + + mem_access::store_global(input + offset, data); + } +} + +template +void launch_bias_relu(T* input, + const T* bias, + int intermediate_size, + int batch_size, + cudaStream_t stream) +{ + constexpr int threads = 1024; + constexpr int granularity = 16; + + const int total_count = batch_size * intermediate_size; + const int elems_per_block = threads * (granularity / sizeof(T)); + dim3 block_dims(threads); + dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block); + + fused_bias_relu<<>>( + input, bias, total_count, intermediate_size); +} + +#define INSTANTIATE_LAUNCH_BIAS_RELU(T) \ + template void launch_bias_relu(T*, const T*, int, int, cudaStream_t); + +INSTANTIATE_LAUNCH_BIAS_RELU(float) +#ifdef BF16_AVAILABLE +INSTANTIATE_LAUNCH_BIAS_RELU(__nv_bfloat16) +#endif +INSTANTIATE_LAUNCH_BIAS_RELU(__half) diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/softmax.cu b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/softmax.cu new file mode 100644 index 0000000000000000000000000000000000000000..bb06cc149ef4fb29dae84a7b16173d1babfa5e50 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/softmax.cu @@ -0,0 +1,562 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include "conversion_utils.h" +#include "inference_cuda_layers.h" + +#ifndef __HIP_PLATFORM_AMD__ +#include +#endif +#include +#include +#include + +#define MAX_REG_SIZE 8 + +#define minus_infinity -10000.0 + +void CheckCudaErrorAux(const char* file, unsigned line) +{ + cudaError_t err = cudaGetLastError(); + if (err == cudaSuccess) return; + std::cerr << cudaGetErrorString(err) << "(" << err << ") at " << file << ":" << line + << std::endl; + throw std::runtime_error("CUDA ERROR!!!\n"); +} + +#define CUDA_CHECK_ERROR() CheckCudaErrorAux(__FILE__, __LINE__) + +namespace cg = cooperative_groups; + +template +__global__ void attn_softmax_v2(T* vals, + T* mask, + T* alibi, + float layer_scale, + bool triangular, + bool recompute, + bool local_attention, + int window_size, + int total_count, + int heads, + int sequence_length, + int num_seq, + int head_offset, + int mask_stride, + int mp_size, + int reduceWidth) +{ + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + float2 low_data[MAX_REG_SIZE]; + float2 high_data[MAX_REG_SIZE]; + const T zero_h = conversion::to(0.f); + + int wid = threadIdx.x >> 5; + int lane = threadIdx.x & 0x1f; + int warp_num = blockDim.x >> 5; + + int reduce_blocks = reduceWidth >> 5; + int seq_lane = threadIdx.x % reduceWidth; + + __shared__ float partialSum[MAX_WARP_NUM]; + + int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); + int batch_idx = iter_offset / (num_seq * heads); + int alibi_offset = batch_idx * heads * mp_size + head_offset; + int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride); + + if (iter_offset < total_count) { + vals += (iter_offset * sequence_length); + + alibi_offset = (alibi_offset + ((iter_offset / num_seq) % heads)) * sequence_length; + mask_offset = mask_offset * sequence_length; + int seq_id = iter_offset % num_seq; + + int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); + int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) + ? (real_seq_id >> 2) - (window_size >> 2) + : 0; + int window_stride = + (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; + + float max_val = minus_infinity; + // if (lane == 0) printf("%d, %d: %d \n", wid, blockIdx.x, mask_offset); + for (int i = 0; i < iterations; i++) { + int data_id = i * (reduceWidth << 2) + (seq_lane); + bool check = (data_id >> 2) >= window_stride4; + bool low_x_check = check && (data_id < sequence_length) && + (!triangular || (data_id <= seq_id)) && (data_id > window_stride); + bool low_y_check = check && ((data_id + reduceWidth) < sequence_length) && + (!triangular || ((data_id + reduceWidth) <= seq_id)) && + ((data_id + reduceWidth) > window_stride); + bool high_x_check = check && ((data_id + reduceWidth * 2) < sequence_length) && + (!triangular || ((data_id + reduceWidth * 2) <= seq_id)) && + ((data_id + reduceWidth * 2) > window_stride); + bool high_y_check = check && ((data_id + reduceWidth * 3) < sequence_length) && + (!triangular || ((data_id + reduceWidth * 3) <= seq_id)) && + ((data_id + reduceWidth * 3) > window_stride); + + if (mask && alibi) { + low_data[i].x = low_x_check + ? conversion::to(vals[data_id]) * layer_scale + + (conversion::to(alibi[data_id + alibi_offset])) + + (conversion::to(mask[data_id + mask_offset])) + : minus_infinity; + low_data[i].y = + low_y_check + ? conversion::to(vals[data_id + reduceWidth]) * layer_scale + + (conversion::to(alibi[data_id + alibi_offset + reduceWidth])) + + (conversion::to(mask[data_id + mask_offset + reduceWidth])) + : minus_infinity; + high_data[i].x = + high_x_check + ? conversion::to(vals[data_id + reduceWidth * 2]) * layer_scale + + (conversion::to( + alibi[data_id + alibi_offset + reduceWidth * 2])) + + (conversion::to(mask[data_id + mask_offset + reduceWidth * 2])) + : minus_infinity; + high_data[i].y = + high_y_check + ? conversion::to(vals[data_id + reduceWidth * 3]) * layer_scale + + (conversion::to( + alibi[data_id + alibi_offset + reduceWidth * 3])) + + (conversion::to(mask[data_id + mask_offset + reduceWidth * 3])) + : minus_infinity; + } else if (mask) { + low_data[i].x = low_x_check + ? conversion::to(vals[data_id]) * layer_scale + + (conversion::to(mask[data_id + mask_offset])) + : minus_infinity; + low_data[i].y = + low_y_check + ? conversion::to(vals[data_id + reduceWidth]) * layer_scale + + (conversion::to(mask[data_id + mask_offset + reduceWidth])) + : minus_infinity; + high_data[i].x = + high_x_check + ? conversion::to(vals[data_id + reduceWidth * 2]) * layer_scale + + (conversion::to(mask[data_id + mask_offset + reduceWidth * 2])) + : minus_infinity; + high_data[i].y = + high_y_check + ? conversion::to(vals[data_id + reduceWidth * 3]) * layer_scale + + (conversion::to(mask[data_id + mask_offset + reduceWidth * 3])) + : minus_infinity; + } else if (alibi) { + low_data[i].x = low_x_check + ? conversion::to(vals[data_id]) * layer_scale + + (conversion::to(alibi[data_id + alibi_offset])) + : minus_infinity; + low_data[i].y = + low_y_check + ? conversion::to(vals[data_id + reduceWidth]) * layer_scale + + (conversion::to(alibi[data_id + alibi_offset + reduceWidth])) + : minus_infinity; + high_data[i].x = + high_x_check + ? conversion::to(vals[data_id + reduceWidth * 2]) * layer_scale + + (conversion::to( + alibi[data_id + alibi_offset + reduceWidth * 2])) + : minus_infinity; + high_data[i].y = + high_y_check + ? conversion::to(vals[data_id + reduceWidth * 3]) * layer_scale + + (conversion::to( + alibi[data_id + alibi_offset + reduceWidth * 3])) + : minus_infinity; + } else { + low_data[i].x = low_x_check ? conversion::to(vals[data_id]) * layer_scale + : minus_infinity; + low_data[i].y = + low_y_check ? conversion::to(vals[data_id + reduceWidth]) * layer_scale + : minus_infinity; + high_data[i].x = + high_x_check + ? conversion::to(vals[data_id + reduceWidth * 2]) * layer_scale + : minus_infinity; + high_data[i].y = + high_y_check + ? conversion::to(vals[data_id + reduceWidth * 3]) * layer_scale + : minus_infinity; + } + + // if(lane == 0) printf("%f , %d, %d \n", low_data[i].x, data_id, seq_id); + max_val = (low_data[i].x > max_val ? low_data[i].x : max_val); + max_val = (low_data[i].y > max_val ? low_data[i].y : max_val); + max_val = (high_data[i].x > max_val ? high_data[i].x : max_val); + max_val = (high_data[i].y > max_val ? high_data[i].y : max_val); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { + auto temp = g.shfl_xor(max_val, i); + max_val = (temp > max_val ? temp : max_val); + } + + if (reduceWidth > WARP_SIZE) { + if (lane == 0) partialSum[wid] = max_val; + b.sync(); + + if (lane < warp_num) max_val = partialSum[lane]; + + b.sync(); + + for (int i = 1; i < reduce_blocks; i *= 2) { + auto temp = g.shfl_xor(max_val, i); + max_val = (temp > max_val ? temp : max_val); + } + + max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); + } + float sum = 0; + for (int i = 0; i < iterations; i++) { + low_data[i].x = __expf(low_data[i].x - max_val); + low_data[i].y = __expf(low_data[i].y - max_val); + high_data[i].x = __expf(high_data[i].x - max_val); + high_data[i].y = __expf(high_data[i].y - max_val); + + sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); + + if (reduceWidth > WARP_SIZE) { + if (lane == 0) partialSum[wid] = sum; + b.sync(); + + if (lane < warp_num) sum = partialSum[lane]; + + b.sync(); + + for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } + + sum = g.shfl(sum, threadIdx.x / WARP_SIZE); + } + sum += 1e-6; + for (int i = 0; i < iterations; i++) { + int data_id = i * (reduceWidth << 2) + (seq_lane); + if (data_id < sequence_length) { + vals[data_id] = conversion::to(low_data[i].x / sum); + if ((data_id + reduceWidth) < sequence_length) + vals[data_id + reduceWidth] = conversion::to(low_data[i].y / sum); + if ((data_id + reduceWidth * 2) < sequence_length) + vals[data_id + reduceWidth * 2] = conversion::to(high_data[i].x / sum); + if ((data_id + reduceWidth * 3) < sequence_length) + vals[data_id + reduceWidth * 3] = conversion::to(high_data[i].y / sum); + } + } + } +} + +template +__global__ void attn_softmax_v2(float* vals, + float* attn_mask, + float* alibi, + float layer_scale, + bool triangular, + bool recompute, + bool local_attention, + int window_size, + int total_count, + int heads, + int sequence_length, + int num_seq, + int head_offset, + int mask_stride, + int mp_size, + int reduceWidth) +{ + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + float4 data[MAX_REG_SIZE]; + + int wid = threadIdx.x >> 5; + int lane = threadIdx.x & 0x1f; + int warp_num = blockDim.x >> 5; + + int reduce_blocks = reduceWidth >> 5; + int seq_lane = threadIdx.x % reduceWidth; + + __shared__ float partialSum[MAX_WARP_NUM]; + + int iter_offset = blockIdx.x * (warp_num / reduce_blocks) + (wid / reduce_blocks); + if (iter_offset < total_count) { + vals += (iter_offset * sequence_length); + + int batch_idx = iter_offset / (num_seq * heads); + int mask_offset = batch_idx * mask_stride + (iter_offset % mask_stride); + mask_offset = mask_offset * sequence_length; + int seq_id = iter_offset % num_seq; + + int real_seq_id = seq_id + (num_seq == sequence_length ? 0 : sequence_length); + int window_stride4 = (local_attention && (real_seq_id >> 2) > (window_size >> 2)) + ? (real_seq_id >> 2) - (window_size >> 2) + : 0; + int window_stride = + (local_attention && real_seq_id >= window_size) ? real_seq_id - window_size : -1; + + float max_val = minus_infinity; + + for (int i = 0; i < iterations; i++) { + int data_id = i * (reduceWidth << 2) + (seq_lane); + bool check = (data_id >> 2) >= window_stride4; + bool x_check = check && (data_id < sequence_length) && + (!triangular || (data_id <= seq_id)) && (data_id > window_stride); + bool y_check = check && ((data_id + reduceWidth) < sequence_length) && + (!triangular || ((data_id + reduceWidth) <= seq_id)) && + ((data_id + reduceWidth) > window_stride); + bool z_check = check && ((data_id + reduceWidth * 2) < sequence_length) && + (!triangular || ((data_id + reduceWidth * 2) <= seq_id)) && + ((data_id + reduceWidth * 2) > window_stride); + bool w_check = check && ((data_id + reduceWidth * 3) < sequence_length) && + (!triangular || ((data_id + reduceWidth * 3) <= seq_id)) && + ((data_id + reduceWidth * 3) > window_stride); + + if (attn_mask) { + data[i].x = x_check ? vals[data_id] + attn_mask[data_id + mask_offset] + : minus_infinity; + data[i].y = y_check ? vals[data_id + reduceWidth] + + attn_mask[data_id + mask_offset + reduceWidth] + : minus_infinity; + data[i].z = z_check ? vals[data_id + reduceWidth * 2] + + attn_mask[data_id + mask_offset + reduceWidth * 2] + : minus_infinity; + data[i].w = w_check ? vals[data_id + reduceWidth * 3] + + attn_mask[data_id + mask_offset + reduceWidth * 3] + : minus_infinity; + } else { + data[i].x = x_check ? vals[data_id] : minus_infinity; + data[i].y = y_check ? vals[data_id + reduceWidth] : minus_infinity; + data[i].z = z_check ? vals[data_id + reduceWidth * 2] : minus_infinity; + data[i].w = w_check ? vals[data_id + reduceWidth * 3] : minus_infinity; + } + + max_val = (data[i].x > max_val ? data[i].x : max_val); + max_val = (data[i].y > max_val ? data[i].y : max_val); + max_val = (data[i].z > max_val ? data[i].z : max_val); + max_val = (data[i].w > max_val ? data[i].w : max_val); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { + auto temp = g.shfl_xor(max_val, i); + max_val = (temp > max_val ? temp : max_val); + } + + if (reduceWidth > WARP_SIZE) { + if (lane == 0) partialSum[wid] = max_val; + b.sync(); + + if (lane < warp_num) max_val = partialSum[lane]; + + b.sync(); + + for (int i = 1; i < reduce_blocks; i *= 2) { + auto temp = g.shfl_xor(max_val, i); + max_val = (temp > max_val ? temp : max_val); + } + + max_val = g.shfl(max_val, threadIdx.x / WARP_SIZE); + } + + float sum = 0; + for (int i = 0; i < iterations; i++) { + data[i].x = __expf(data[i].x - max_val); + data[i].y = __expf(data[i].y - max_val); + data[i].z = __expf(data[i].z - max_val); + data[i].w = __expf(data[i].w - max_val); + + sum += (data[i].x + data[i].y + data[i].z + data[i].w); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) sum += g.shfl_xor(sum, i); + + if (reduceWidth > WARP_SIZE) { + if (lane == 0) partialSum[wid] = sum; + b.sync(); + + if (lane < warp_num) sum = partialSum[lane]; + + b.sync(); + + for (int i = 1; i < reduce_blocks; i *= 2) { sum += g.shfl_xor(sum, i); } + + sum = g.shfl(sum, threadIdx.x / WARP_SIZE); + } + sum += 1e-6; + + for (int i = 0; i < iterations; i++) { + int data_id = i * (reduceWidth << 2) + (seq_lane); + if (data_id < sequence_length) { + vals[data_id] = data[i].x / sum; + if ((data_id + reduceWidth) < sequence_length) + vals[data_id + reduceWidth] = data[i].y / sum; + if ((data_id + reduceWidth * 2) < sequence_length) + vals[data_id + reduceWidth * 2] = data[i].z / sum; + if ((data_id + reduceWidth * 3) < sequence_length) + vals[data_id + reduceWidth * 3] = data[i].w / sum; + } + } + } +} + +#define LAUNCH_ATTN_SOFTMAX_V2(iterations) \ + attn_softmax_v2<<>>(vals, \ + mask, \ + alibi, \ + layer_scale, \ + triangular, \ + recompute, \ + local_attention, \ + window_size, \ + total_count, \ + heads, \ + sequence_length, \ + num_seq, \ + head_offset, \ + mask_stride, \ + mp_size, \ + reduce_width); + +template +void launch_attn_softmax_v2(T* vals, + T* mask, + T* alibi, + float layer_scale, + bool triangular, + bool recompute, + bool local_attention, + int window_size, + int batch_size, + int heads, + int num_seq, + int sequence_length, + int head_offset, + int mask_stride, + int mp_size, + cudaStream_t stream) +{ + const int total_count = batch_size * heads * num_seq; + + // Scheduling Overview + // 4 element unroll with power of 2 `reduce_width` threads to a ceiling of `attn_threads` + // Each block should be partitioned into as many `reduce_width` blocks + // as can be fit. + constexpr int attn_threads = 256; + constexpr int min_reduce_width = hw_warp_size; + constexpr int internal_unroll = 4; + + // Handle internal unroll then round to next power of 2. Bump up to minimum granularity. + const int thread_steps_rounded = + next_pow2((sequence_length + internal_unroll - 1) / internal_unroll); + const int thread_steps_schedule = + (thread_steps_rounded < min_reduce_width) ? min_reduce_width : thread_steps_rounded; + // Bound reduce width to the number of threads + const int reduce_width = (thread_steps_schedule < attn_threads) ? thread_steps_schedule + : attn_threads; + // Scale for the excess + const int iterations = thread_steps_schedule / reduce_width; + // Should be safe since reduce_width is capped to attn_threads + const int partitions = attn_threads / reduce_width; + + // Launch params + dim3 grid((total_count + partitions - 1) / partitions); + dim3 block(attn_threads); + + if (sequence_length <= 32768) { + if (iterations == 1) { + LAUNCH_ATTN_SOFTMAX_V2(1); + } else if (iterations == 2) { + LAUNCH_ATTN_SOFTMAX_V2(2); + } else if (iterations == 4) { + LAUNCH_ATTN_SOFTMAX_V2(4); + } else if (iterations == 8) { + LAUNCH_ATTN_SOFTMAX_V2(8); + } else if (iterations == 16) { + LAUNCH_ATTN_SOFTMAX_V2(16); + } else if (iterations == 32) { + LAUNCH_ATTN_SOFTMAX_V2(32); + } else if (iterations == 64) { + LAUNCH_ATTN_SOFTMAX_V2(64); + } + } else + throw std::runtime_error("Unsupport Seq_Length!"); +} + +#define INSTANTIATE_LAUNCH_ATTN_SOFTMAX_V2(T) \ + template void launch_attn_softmax_v2(T* vals, \ + T* mask, \ + T* alibi, \ + float layer_scale, \ + bool triangular, \ + bool recompute, \ + bool local_attention, \ + int window_size, \ + int batch_size, \ + int heads, \ + int num_seq, \ + int sequence_length, \ + int head_offset, \ + int mask_stride, \ + int mp_size, \ + cudaStream_t stream); + +INSTANTIATE_LAUNCH_ATTN_SOFTMAX_V2(float); +#ifdef BF16_AVAILABLE +INSTANTIATE_LAUNCH_ATTN_SOFTMAX_V2(__nv_bfloat16); +#endif +INSTANTIATE_LAUNCH_ATTN_SOFTMAX_V2(__half); + +#define DEF_ATTN_SOFTMAX_V2_HALF(_iter) \ + template __global__ void attn_softmax_v2<__half, _iter>(__half * vals, \ + __half * mask, \ + __half * alibi, \ + float layer_scale, \ + bool triangular, \ + bool recompute, \ + bool local_attention, \ + int window_size, \ + int total_count, \ + int heads, \ + int sequence_length, \ + int num_seq, \ + int head_offset, \ + int mask_stride, \ + int mp_size, \ + int reduceWidth) + +#define DEF_ATTN_SOFTMAX_V2_BF16(_iter) \ + template __global__ void attn_softmax_v2<__nv_bfloat16, _iter>(__nv_bfloat16 * vals, \ + __nv_bfloat16 * mask, \ + __nv_bfloat16 * alibi, \ + float layer_scale, \ + bool triangular, \ + bool recompute, \ + bool local_attention, \ + int window_size, \ + int total_count, \ + int heads, \ + int sequence_length, \ + int num_seq, \ + int head_offset, \ + int mask_stride, \ + int mp_size, \ + int reduceWidth) + +#define FOREACH_ITERATIONS(cb) \ + cb(1); \ + cb(2); \ + cb(4); \ + cb(8); \ + cb(16); \ + cb(32); \ + cb(64) + +FOREACH_ITERATIONS(DEF_ATTN_SOFTMAX_V2_HALF); +#ifdef BF16_AVAILABLE +FOREACH_ITERATIONS(DEF_ATTN_SOFTMAX_V2_BF16); +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_context.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_context.h new file mode 100644 index 0000000000000000000000000000000000000000..378fd4e5e990d5be9b642e2092ab30370506b191 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_context.h @@ -0,0 +1,292 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include +#include +#include +#include "cublas_v2.h" +#include "cuda.h" + +#define MEGABYTE (1024 * 1024) +#define GIGABYTE (1024 * 1024 * 1024) + +// TODO: refactor out +#define WARP_SIZE 32 + +#define CUDA_CHECK(callstr) \ + { \ + cudaError_t error_code = callstr; \ + if (error_code != cudaSuccess) { \ + std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \ + assert(0); \ + } \ + } + +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) + +#define CUDA_2D_KERNEL_LOOP(i, n, j, m) \ + for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) \ + for (size_t j = blockIdx.y * blockDim.y + threadIdx.y; j < (m); j += blockDim.y * gridDim.y) + +#define DS_CUDA_NUM_THREADS 512 +#define DS_MAXIMUM_NUM_BLOCKS 262144 + +inline int DS_GET_BLOCKS(const int N) +{ + return std::max( + std::min((N + DS_CUDA_NUM_THREADS - 1) / DS_CUDA_NUM_THREADS, DS_MAXIMUM_NUM_BLOCKS), + // Use at least 1 block, since CUDA does not allow empty block + 1); +} + +class InferenceContext { +public: + InferenceContext() + : _workspace(nullptr), + _seed(42), + _curr_offset(0), + _stream(0), + _free_memory_size(0), + _num_tokens(1), + _attention_unfused_workspace_offset(0), + _workSpaceSize(0) + { + _workSpaceSize = 0; + _workspace = 0; + + cublasStatus_t stat = cublasCreate(&_cublasHandle); + if (stat != CUBLAS_STATUS_SUCCESS) { + // It would be nice to use cublasGetStatusName and + // cublasGetStatusString, but they were only added in CUDA 11.4.2. + auto message = std::string("Failed to create cublas handle: cublasStatus_t was ") + + std::to_string(stat); + std::cerr << message << std::endl; + throw std::runtime_error(message); + } +#ifndef __HIP_PLATFORM_AMD__ + cublasSetMathMode(_cublasHandle, CUBLAS_TENSOR_OP_MATH); +#endif + cudaEventCreate(&_comp1_event); + cudaEventCreate(&_comp2_event); + cudaEventCreate(&_comp_event); + cudaEventCreate(&_comm_event); + } + + virtual ~InferenceContext() + { + cublasDestroy(_cublasHandle); + cudaFree(_workspace); + cudaEventDestroy(_comp1_event); + cudaEventDestroy(_comp2_event); + cudaEventDestroy(_comp_event); + cudaEventDestroy(_comm_event); + } + + static InferenceContext& Instance() + { + static InferenceContext _ctx; + return _ctx; + } + + void GenWorkSpace(const unsigned& num_layers, + const unsigned& num_heads, + const size_t& batch_size, + const size_t& prompt_len, + const size_t& hidden_dim, + const unsigned& mp_size, + const bool& external_cache, + const size_t& elem_size, + const unsigned& rank, + unsigned max_out_tokens, + unsigned min_out_tokens) + { + size_t total_size; + if (!_free_memory_size) { cudaMemGetInfo(&_free_memory_size, &total_size); } + + // Flash attention requires padded heads and we'll conservatively allocate + // for that here. Flash attention is only enabled for head size <= 128 right now + const int head_size = hidden_dim / num_heads; + const int padded_head_size = head_size <= 32 ? 32 : (head_size <= 64 ? 64 : 128); + const int effective_head_size = (head_size > 128) ? head_size : padded_head_size; + + size_t activation_size = 10 * (num_heads * effective_head_size) * batch_size; + // Other sequence length dimension is added when the final workSpaceSize is calculated + size_t temp_size = batch_size * (num_heads / mp_size) * max_out_tokens; + size_t cache_size = + num_layers * batch_size * ((num_heads * effective_head_size) / mp_size) * 2; + size_t minimal_requirements = + temp_size + (_free_memory_size > GIGABYTE ? 500 : 100) * MEGABYTE; + if (_free_memory_size < minimal_requirements) { + printf("Requested:\t%lu\nFree:\t%lu\nTotal:\t%lu\n", + minimal_requirements, + _free_memory_size, + total_size); + throw std::runtime_error("Workspace can't be allocated, no enough memory."); + } + + _max_seq_len = ((_free_memory_size - minimal_requirements) / elem_size) / + (activation_size + temp_size + cache_size); + _max_seq_len = std::min((size_t)max_out_tokens, _max_seq_len); + size_t workSpaceSize = ((external_cache ? (activation_size + temp_size) + : (activation_size + temp_size + cache_size))) * + _max_seq_len * elem_size; + temp_size *= _max_seq_len * elem_size; + + if (_max_seq_len < min_out_tokens) { + printf( + "Allocatable workspace available (%ld tokens) is less than minimum requested " + "workspace (%d tokens)\n", + _max_seq_len, + min_out_tokens); + throw std::runtime_error("Workspace can't be allocated, not enough memory"); + } + + if (!_workspace) { + assert(_workspace == nullptr); + cudaMalloc(&_workspace, workSpaceSize); + } else if (_workSpaceSize < workSpaceSize) { + cudaFree(_workspace); + cudaMalloc(&_workspace, workSpaceSize); + } + if (rank == 0 && (!_workspace || _workSpaceSize < workSpaceSize)) + printf( + "------------------------------------------------------\n" + "Free memory : %f (GigaBytes) \n" + "Total memory: %f (GigaBytes) \n" + "Requested memory: %f (GigaBytes) \n" + "Setting maximum total tokens (input + output) to %lu \n" + "WorkSpace: %p \n" + "------------------------------------------------------\n", + (float)_free_memory_size / GIGABYTE, + (float)total_size / GIGABYTE, + (float)workSpaceSize / GIGABYTE, + _max_seq_len, + _workspace); + + if (!_workspace) { + printf("Requested:\t%lu\nFree:\t%lu\nTotal:\t%lu\n", + workSpaceSize, + _free_memory_size, + total_size); + throw std::runtime_error("Workspace is null."); + } + _workSpaceSize = workSpaceSize; + _attention_unfused_workspace_offset = workSpaceSize - temp_size; + } + inline size_t GetMaxTokenLength() const { return _max_seq_len; } + + cudaEvent_t GetCompEvent(int id) { return id == 1 ? _comp1_event : _comp2_event; } + + size_t get_workspace_size() const { return _workSpaceSize; } + void* GetWorkSpace() { return _workspace; } + void* GetAttentionUnfusedWorkspace() + { + return (char*)_workspace + _attention_unfused_workspace_offset; + } + + inline unsigned new_token(unsigned layer_id) + { + if (layer_id == 0) _token_length++; + return _token_length; + } + + inline void reset_tokens(unsigned initial_tokens = 1) + { + _num_tokens = initial_tokens; + } //_token_length = 0; } + + inline unsigned current_tokens() const { return _num_tokens; } + + inline void advance_tokens() { _num_tokens++; } + + cudaStream_t GetCommStream(bool async_op = false) + { + if (!_comm_stream) + _comm_stream = async_op ? at::cuda::getStreamFromPool(true) + : at::cuda::getCurrentCUDAStream(); + return _comm_stream; + } + cudaStream_t GetCurrentStream(bool other_stream = false) + { + // get current pytorch stream. + if (other_stream) { + if (!_stream) _stream = at::cuda::getStreamFromPool(true); + return _stream; + } + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + return stream; + } + + void release_workspace() + { + cudaFree(_workspace); + _workspace = nullptr; + } + bool retake_workspace() + { + if (_workspace != nullptr || _workSpaceSize == 0) return true; + cudaMalloc(&_workspace, _workSpaceSize); + return _workspace != nullptr; + } + cublasHandle_t GetCublasHandle() { return _cublasHandle; } + + std::pair IncrementOffset(uint64_t offset_inc) + { + uint64_t offset = _curr_offset; + _curr_offset += offset_inc; + return std::pair(_seed, offset); + } + + void SetSeed(uint64_t new_seed) { _seed = new_seed; } + + const std::vector>& GetGemmAlgos() const { return _gemm_algos; } + + inline void SynchComp() + { + cudaEventRecord(_comp_event, _comp_stream); + cudaStreamWaitEvent(_comm_stream, _comp_event, 0); + } + inline void SynchComm() + { + cudaEventRecord(_comm_event, _comm_stream); + cudaStreamWaitEvent(_comp_stream, _comm_event, 0); + } + +private: + cublasHandle_t _cublasHandle; + + cudaEvent_t _comp_event; + cudaEvent_t _comm_event; + + void* _workspace; + // offset from _workspace for attention unfused memory + size_t _attention_unfused_workspace_offset; + uint64_t _seed; + uint64_t _curr_offset; + + size_t _workSpaceSize; + size_t _free_memory_size; + + size_t _max_seq_len; + + cudaEvent_t _comp1_event; + cudaEvent_t _comp2_event; + + cudaStream_t _stream; + + unsigned _token_length; + unsigned _num_tokens; + std::vector> _gemm_algos; + + cudaStream_t _comp_stream; + cudaStream_t _comm_stream; + + std::unordered_map _world_sizes; +}; diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_cublas_wrappers.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_cublas_wrappers.h new file mode 100644 index 0000000000000000000000000000000000000000..640751b12c8f1ea290e1803cac665f6bfbb72185 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_cublas_wrappers.h @@ -0,0 +1,435 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include +#ifdef BF16_AVAILABLE +#include +#endif +#include +#include +#ifndef __HIP_PLATFORM_AMD__ +#include +#endif +#include + +#ifdef __HIP_PLATFORM_AMD__ +int cublas_gemm_ex(rocblas_handle handle, + rocblas_operation transa, + rocblas_operation transb, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const float* A, + const float* B, + float* C, + rocblas_gemm_algo algo, + int b_stride = -1) +#else +int cublas_gemm_ex(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const float* A, + const float* B, + float* C, + cublasGemmAlgo_t algo, + int b_stride = -1) +#endif +{ + const int ldb = (b_stride == -1) ? ((transb == CUBLAS_OP_N) ? k : n) : b_stride; +#ifdef __HIP_PLATFORM_AMD__ + rocblas_status status = rocblas_gemm_ex(handle, + transa, + transb, + m, + n, + k, + (const void*)alpha, + (const void*)A, + rocblas_datatype_f32_r, + (transa == rocblas_operation_none) ? m : k, + (const void*)B, + rocblas_datatype_f32_r, + ldb, + (const void*)beta, + C, + rocblas_datatype_f32_r, + m, + C, + rocblas_datatype_f32_r, + m, + rocblas_datatype_f32_r, + algo, + 0, + 0); +#else + cublasStatus_t status = cublasGemmEx(handle, + transa, + transb, + m, + n, + k, + (const void*)alpha, + (const void*)A, + CUDA_R_32F, + (transa == CUBLAS_OP_N) ? m : k, + (const void*)B, + CUDA_R_32F, + ldb, + (const void*)beta, + C, + CUDA_R_32F, + m, + CUDA_R_32F, + algo); +#endif + +#ifdef __HIP_PLATFORM_AMD__ + if (status != rocblas_status_success) { +#else + if (status != CUBLAS_STATUS_SUCCESS) { +#endif + fprintf(stderr, + "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", + m, + n, + k, + (int)status); + return EXIT_FAILURE; + } + return 0; +} + +template +#ifdef __HIP_PLATFORM_AMD__ +int cublas_gemm_ex(rocblas_handle handle, + rocblas_operation transa, + rocblas_operation transb, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const T* A, + const T* B, + T* C, + rocblas_gemm_algo algo, + int b_stride = -1) +#else +int cublas_gemm_ex(cublasHandle_t handle, + cublasOperation_t transa, + cublasOperation_t transb, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const T* A, + const T* B, + T* C, + cublasGemmAlgo_t algo, + int b_stride = -1) +#endif +{ + const int ldb = (b_stride == -1) ? ((transb == CUBLAS_OP_N) ? k : n) : b_stride; +#ifdef __HIP_PLATFORM_AMD__ + constexpr auto rocblas_dtype_16 = std::is_same::value ? rocblas_datatype_f16_r + : rocblas_datatype_bf16_r; + rocblas_status status = rocblas_gemm_ex(handle, + transa, + transb, + m, + n, + k, + (const void*)alpha, + (const void*)A, + rocblas_dtype_16, + (transa == rocblas_operation_none) ? m : k, + (const void*)B, + rocblas_dtype_16, + ldb, + (const void*)beta, + (void*)C, + rocblas_dtype_16, + m, + (void*)C, + rocblas_dtype_16, + m, + rocblas_datatype_f32_r, + algo, + 0, + 0); +#else + constexpr auto cublas_dtype_16 = std::is_same::value ? CUDA_R_16F : CUDA_R_16BF; + cublasStatus_t status = cublasGemmEx(handle, + transa, + transb, + m, + n, + k, + (const void*)alpha, + (const void*)A, + cublas_dtype_16, + (transa == CUBLAS_OP_N) ? m : k, + (const void*)B, + cublas_dtype_16, + ldb, + (const void*)beta, + (void*)C, + cublas_dtype_16, + m, + CUDA_R_32F, + algo); +#endif + +#ifdef __HIP_PLATFORM_AMD__ + if (status != rocblas_status_success) { +#else + if (status != CUBLAS_STATUS_SUCCESS) { +#endif + fprintf(stderr, + "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", + m, + n, + k, + (int)status); + return EXIT_FAILURE; + } + return 0; +} + +#ifdef __HIP_PLATFORM_AMD__ +int cublas_strided_batched_gemm(rocblas_handle handle, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const float* A, + const float* B, + float* C, + rocblas_operation op_A, + rocblas_operation op_B, + int stride_A, + int stride_B, + int stride_C, + int batch, + rocblas_gemm_algo algo) +#else +int cublas_strided_batched_gemm(cublasHandle_t handle, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const float* A, + const float* B, + float* C, + cublasOperation_t op_A, + cublasOperation_t op_B, + int stride_A, + int stride_B, + int stride_C, + int batch, + cublasGemmAlgo_t algo) +#endif +{ +#ifdef __HIP_PLATFORM_AMD__ + rocblas_status status = + rocblas_gemm_strided_batched_ex(handle, + op_A, + op_B, + m, + n, + k, + alpha, + A, + rocblas_datatype_f32_r, + (op_A == rocblas_operation_none) ? m : k, + stride_A, + B, + rocblas_datatype_f32_r, + (op_B == rocblas_operation_none) ? k : n, + stride_B, + beta, + C, + rocblas_datatype_f32_r, + m, + stride_C, + C, + rocblas_datatype_f32_r, + m, + stride_C, + batch, + rocblas_datatype_f32_r, + algo, + 0, + 0); +#else + cublasStatus_t status = cublasGemmStridedBatchedEx(handle, + op_A, + op_B, + m, + n, + k, + alpha, + A, + CUDA_R_32F, + (op_A == CUBLAS_OP_N) ? m : k, + stride_A, + B, + CUDA_R_32F, + (op_B == CUBLAS_OP_N) ? k : n, + stride_B, + beta, + C, + CUDA_R_32F, + m, + stride_C, + batch, + CUDA_R_32F, + algo); +#endif + +#ifdef __HIP_PLATFORM_AMD__ + if (status != rocblas_status_success) { +#else + if (status != CUBLAS_STATUS_SUCCESS) { +#endif + fprintf(stderr, + "!!!! kernel execution error. (batch: %d, m: %d, n: %d, k: %d, error: %d) \n", + batch, + m, + n, + k, + (int)status); + return EXIT_FAILURE; + } + return 0; +} + +template +#ifdef __HIP_PLATFORM_AMD__ +int cublas_strided_batched_gemm(rocblas_handle handle, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const T* A, + const T* B, + T* C, + rocblas_operation op_A, + rocblas_operation op_B, + int stride_A, + int stride_B, + int stride_C, + int batch, + rocblas_gemm_algo algo) +#else +int cublas_strided_batched_gemm(cublasHandle_t handle, + int m, + int n, + int k, + const float* alpha, + const float* beta, + const T* A, + const T* B, + T* C, + cublasOperation_t op_A, + cublasOperation_t op_B, + int stride_A, + int stride_B, + int stride_C, + int batch, + cublasGemmAlgo_t algo) +#endif +{ +#ifdef __HIP_PLATFORM_AMD__ + constexpr auto rocblas_dtype_16 = std::is_same::value ? rocblas_datatype_f16_r + : rocblas_datatype_bf16_r; + rocblas_status status = + rocblas_gemm_strided_batched_ex(handle, + op_A, + op_B, + m, + n, + k, + alpha, + A, + rocblas_dtype_16, + (op_A == rocblas_operation_none) ? m : k, + stride_A, + B, + rocblas_dtype_16, + (op_B == rocblas_operation_none) ? k : n, + stride_B, + beta, + C, + rocblas_dtype_16, + m, + stride_C, + C, + rocblas_dtype_16, + m, + stride_C, + batch, + rocblas_datatype_f32_r, + algo, + 0, + 0); +#else + constexpr auto cublas_dtype_16 = std::is_same::value ? CUDA_R_16F : CUDA_R_16BF; + cublasStatus_t status = cublasGemmStridedBatchedEx(handle, + op_A, + op_B, + m, + n, + k, + alpha, + A, + cublas_dtype_16, + (op_A == CUBLAS_OP_N) ? m : k, + stride_A, + B, + cublas_dtype_16, + (op_B == CUBLAS_OP_N) ? k : n, + stride_B, + beta, + C, + cublas_dtype_16, + m, + stride_C, + batch, + CUDA_R_32F, + algo); +#endif + +#ifdef __HIP_PLATFORM_AMD__ + if (status != rocblas_status_success) { +#else + if (status != CUBLAS_STATUS_SUCCESS) { +#endif + fprintf(stderr, + "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", + m, + n, + k, + (int)status); + return EXIT_FAILURE; + } + + return 0; +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_cuda_layers.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_cuda_layers.h new file mode 100644 index 0000000000000000000000000000000000000000..dcc020483687f751f5997863307504449743fa35 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/includes/inference_cuda_layers.h @@ -0,0 +1,248 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include "ds_kernel_utils.h" + +#include +#ifdef BF16_AVAILABLE +#include +#endif +#include +#include +#include +#include +#include + +#define MAX_WARP_NUM 32 +#define WARP_SIZE 32 + +#define MAX_THREADS 1024 +#define SMs 80 + +#define MAX_REGISTERS 256 + +template +void launch_attn_softmax_v2(T* vals, + T* mask, + T* alibi, + float layer_scale, + bool triangular, + bool recompute, + bool local_attention, + int window_size, + int batch_size, + int heads, + int num_seq, + int sequence_length, + int offset, + int mask_stride, + int mp_size, + cudaStream_t stream); + +// Fused bias add with gelu activation +template +void launch_bias_gelu(T* input, + const T* bias, + int intermediate_size, + int batch_size, + cudaStream_t stream); + +template +void launch_gated_activation(T* output, + const T* activation, + const T* bias, + int rows, + int output_stride, + int elems_per_row, + bool use_gelu, + cudaStream_t stream); + +// Fused bias add with relu activation +template +void launch_bias_relu(T* input, + const T* bias, + int intermediate_size, + int batch_size, + cudaStream_t stream); + +template +void launch_bias_add(T* input, const T* bias, int hidden_size, int batch_size, cudaStream_t stream); + +template +void launch_bias_residual(T* input, + T* output, + T* attn, + T* bias, + T* attn_bias, + int batch, + int hidden_dim, + int mp_size, + bool preln, + cudaStream_t stream); + +template +void launch_fused_ln(T* output, + const T* vals, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream); + +template +void launch_fused_residual_ln(T* output, + const T* vals, + const T* residual, + const T* bias, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream); + +template +void launch_fused_residual_ln_store_pre_ln_res(T* norm_output, + T* res_output, + const T* vals, + const T* residual, + const T* bias, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream); + +template +void launch_rms_norm(T* norm_output, + T* res_output, + const T* vals, + const T* residual, + const T* gamma, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream); + +template +void launch_dequantize(T* output, + const int8_t* input, + const float* qscale, + unsigned output_size, + unsigned hidden_dim, + unsigned groups, + unsigned merge_count, + cudaStream_t stream); + +template +void launch_dequantize(T* output, + const int8_t* input, + const float* qscale, + unsigned output_size, + unsigned hidden_dim, + unsigned groups, + cudaStream_t stream); +template +void launch_gptj_residual_add(T* input, + T* output, + T* attn, + T* bias, + T* attn_bias, + int batch, + int head_size, + int mp_size, + cudaStream_t stream); + +template +void launch_apply_rotary_pos_emb(T* mixed_query, + T* key_layer, + unsigned head_size, + unsigned seq_len, + unsigned rotary_dim, + unsigned offset, + unsigned num_heads, + unsigned batch, + float rope_theta, + cudaStream_t stream, + int max_out_tokens); + +template +void launch_moe_res_matmul(T* residual, + T* coef, + T* mlp_out, + int seq_len, + int hidden_dim, + cudaStream_t stream); + +// 4D transform [0, 1, 2, 3] -> [0, 2, 1, 3] +template +void launch_transform4d_0213(T* out, + const T* in, + int batch_size, + int heads, + int seq_length, + int hidden_dim, + cudaStream_t stream, + int trans_count); +template +void launch_bias_add_transform_0213(T* outputs, + T* vals, + T* vals1, + const T* vals2, + const T* bias, + int batch_size, + int seq_length, + unsigned seq_offset, + int seq_length1, + int hidden_dim, + int heads, + int num_kv, + int rotary_dim, + bool rotate_half, + bool rotate_every_two, + cudaStream_t stream, + int trans_count, + int max_out_tokens, + float rope_theta); +template +void pad_data(T* padded_output, + T* output, + int bsz, + int head_size, + int padded_head_size, + cudaStream_t stream); + +template +void pad_head_seq(T* padded_output, + T* output, + int bsz, + int seq_len, + int padded_seq_len, + int head_size, + int padded_head_size, + cudaStream_t stream); + +template +void launch_pad_add_transform_0213(T* output, + const T* vals, + int batch_size, + int hidden_dim, + int seq_length, + int padded_seq_len, + int heads, + int padded_head_size, + cudaStream_t stream); + +template +void launch_vector_add(T* out, + const T* a, + const T* b, + float gamma, + int num_elems, + cudaStream_t stream); diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/normalize_kernels.cu b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/normalize_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..8b9dbaea6c86d98ed7e431073684b89b8d1499ee --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/normalize_kernels.cu @@ -0,0 +1,2134 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "custom_cuda_layers.h" + +namespace cg = cooperative_groups; + +/* +Fused bias add, residual (elementwise) add, and normalization layer. + +For FP16, this kernel does not promote to FP32 in order to utilize the 2x throughput for +__half2 instructions, and avoid the conversion overhead (1/8 of __hal2 arithmetic). + +For specific launch constraints, see the launch functions. +*/ + +#define NORM_REG (MAX_REGISTERS / 4) + +__global__ void fused_bias_residual_layer_norm(float* vals, + const float* residual, + const float* gamma, + const float* beta, + float epsilon, + bool preLayerNorm, + bool training, + float* vars, + float* means, + int row_stride) +{ + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int gid = id / WARP_SIZE; + + float vals_arr[NORM_REG]; + __shared__ float shr[MAX_WARP_NUM]; + + residual += (row * row_stride); + vals += (row * row_stride); + + float sum = 0.f; + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + vals_arr[i] = residual[i * iteration_stride + id]; + sum += vals_arr[i]; + } + if (high_index < row_stride) { + vals_arr[iterations] = residual[high_index]; + sum += vals_arr[iterations]; + iterations++; + } + + for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) shr[gid] = sum; + + b.sync(); + + if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; + +#if !defined(__STOCHASTIC_MODE__) || __CUDA_ARCH__ < 700 + b.sync(); +#endif + + for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { + sum += g.shfl_down(sum, i); + } + + sum = g.shfl(sum, 0); + float mean = sum / row_stride; + if (training) + if (threadIdx.x == 0) means[row] = mean; + float variance = 0.f; + for (int i = 0; i < iterations; i++) { + vals_arr[i] -= mean; + variance += vals_arr[i] * vals_arr[i]; + } + + for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } + + if (g.thread_rank() == 0) shr[gid] = variance; + + b.sync(); + + if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + b.sync(); +#endif + + for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { + variance += g.shfl_down(variance, i); + } + variance = g.shfl(variance, 0); + variance /= row_stride; + variance += epsilon; + if (training) + if (threadIdx.x == 0) vars[row] = variance; + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) { + vals_arr[i] = vals_arr[i] * rsqrtf(variance); + vals_arr[i] = + vals_arr[i] * gamma[i * iteration_stride + id] + beta[i * iteration_stride + id]; + vals[i * iteration_stride + id] = vals_arr[i]; + } + if ((high_index) < row_stride) { + vals_arr[iterations] = vals_arr[iterations] * rsqrtf(variance); + vals_arr[iterations] = vals_arr[iterations] * gamma[high_index] + beta[high_index]; + vals[high_index] = vals_arr[iterations]; + } +} + +__global__ void fused_bias_residual_layer_norm(__half* vals, + const __half* residual, + const __half* gamma, + const __half* beta, + float epsilon, + bool preLayerNorm, + bool training, + __half* vars, + __half* means, + int row_stride) +{ +#ifdef HALF_PRECISION_AVAILABLE + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int gid = id >> WARP_SIZE_BITS; + + float2 vals_f[NORM_REG]; + __shared__ float shr[MAX_WARP_NUM]; + + __half2* vals_cast = reinterpret_cast<__half2*>(vals); + const __half2* residual_cast = reinterpret_cast(residual); + + residual_cast += (row * row_stride); + vals_cast += (row * row_stride); + + float sum = 0.f; + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + vals_f[i] = __half22float2(residual_cast[i * iteration_stride + id]); + sum += vals_f[i].x; + sum += vals_f[i].y; + } + if ((high_index) < row_stride) { + vals_f[iterations] = __half22float2(residual_cast[high_index]); + sum += vals_f[iterations].x; + sum += vals_f[iterations].y; + iterations++; + } + + for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) shr[gid] = sum; + + b.sync(); + + if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + b.sync(); +#endif + + for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { + sum += g.shfl_down(sum, i); + } + sum = g.shfl(sum, 0); + float mean = sum / (row_stride * 2); + + float variance = 0.f; + for (int i = 0; i < iterations; i++) { + vals_f[i].x -= mean; + vals_f[i].y -= mean; + variance += vals_f[i].x * vals_f[i].x; + variance += vals_f[i].y * vals_f[i].y; + } + + for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } + + if (g.thread_rank() == 0) shr[gid] = variance; + + b.sync(); + + if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + b.sync(); +#endif + + for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { + variance += g.shfl_down(variance, i); + } + variance = g.shfl(variance, 0); + variance /= (row_stride * 2); + variance += epsilon; + + __half2 variance_h = __float2half2_rn(variance); + const __half2* gamma_cast = reinterpret_cast(gamma); + const __half2* beta_cast = reinterpret_cast(beta); + + if (training && threadIdx.x == 0) { + vars[row] = __float2half(variance); + means[row] = __float2half(mean); + } + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) { + __half2 vals_arr = __float22half2_rn(vals_f[i]); + vals_arr = vals_arr * h2rsqrt(variance_h); + vals_arr = + vals_arr * gamma_cast[i * iteration_stride + id] + beta_cast[i * iteration_stride + id]; + vals_cast[i * iteration_stride + id] = vals_arr; + } + if ((high_index) < row_stride) { + __half2 vals_arr = __float22half2_rn(vals_f[iterations]); + vals_arr = vals_arr * h2rsqrt(variance_h); + vals_arr = vals_arr * gamma_cast[high_index] + beta_cast[high_index]; + vals_cast[high_index] = vals_arr; + } +#endif +} + +template +void launch_bias_residual_layer_norm(T* vals, + const T* residual, + const T* gamma, + const T* beta, + float epsilon, + int batch_size, + int hidden_dim, + cudaStream_t stream, + bool preLayerNorm, + bool training, + T* vars, + T* means); + +template <> +void launch_bias_residual_layer_norm(float* vals, + const float* residual, + const float* gamma, + const float* beta, + float epsilon, + int batch_size, + int hidden_dim, + cudaStream_t stream, + bool preLayerNorm, + bool training, + float* vars, + float* means) +{ + int threads = THREADS; + + dim3 grid_dim(batch_size); + + if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 1; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 2; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim(threads); + + fused_bias_residual_layer_norm<<>>( + vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, means, hidden_dim); +} + +template <> +void launch_bias_residual_layer_norm<__half>(__half* vals, + const __half* residual, + const __half* gamma, + const __half* beta, + float epsilon, + int batch_size, + int hidden_dim, + cudaStream_t stream, + bool preLayerNorm, + bool training, + __half* vars, + __half* means) +{ + int threads = 128; + + dim3 grid_dim(batch_size); + + if (hidden_dim > 8192 && hidden_dim <= 16384) + threads <<= 1; + else if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 2; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 3; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim(threads); + + fused_bias_residual_layer_norm<<>>( + vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, means, hidden_dim / 2); +} + +__global__ void fused_bias_residual_layer_norm(float* vals, + const float* residual, + const float* gamma, + const float* beta, + float epsilon, + bool preLayerNorm, + bool training, + float* vars, + int row_stride) +{ + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int gid = id / 32; + + float vals_arr[NORM_REG]; + __shared__ float shr[MAX_WARP_NUM]; + + residual += (row * row_stride); + vals += (row * row_stride); + + float sum = 0.f; + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + vals_arr[i] = residual[i * iteration_stride + id]; + sum += vals_arr[i]; + } + if ((high_index) < row_stride) { + vals_arr[iterations] = residual[high_index]; + sum += vals_arr[iterations]; + iterations++; + } + + for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) shr[gid] = sum; + + b.sync(); + + if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; + +#if !defined(__STOCHASTIC_MODE__) || __CUDA_ARCH__ < 700 + b.sync(); +#endif + + for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { + sum += g.shfl_down(sum, i); + } + + sum = g.shfl(sum, 0); + float mean = sum / row_stride; + float variance = 0.f; + for (int i = 0; i < iterations; i++) { + vals_arr[i] -= mean; + variance += vals_arr[i] * vals_arr[i]; + } + + for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } + + if (g.thread_rank() == 0) shr[gid] = variance; + + b.sync(); + + if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + b.sync(); +#endif + + for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { + variance += g.shfl_down(variance, i); + } + variance = g.shfl(variance, 0); + variance /= row_stride; + variance += epsilon; + if (training) + if (threadIdx.x == 0) vars[row] = variance; + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) { + vals_arr[i] = vals_arr[i] * rsqrtf(variance); + vals_arr[i] = + vals_arr[i] * gamma[i * iteration_stride + id] + beta[i * iteration_stride + id]; + vals[i * iteration_stride + id] = vals_arr[i]; + } + if ((high_index) < row_stride) { + vals_arr[iterations] = vals_arr[iterations] * rsqrtf(variance); + vals_arr[iterations] = vals_arr[iterations] * gamma[high_index] + beta[high_index]; + vals[high_index] = vals_arr[iterations]; + } +} + +__global__ void fused_bias_residual_layer_norm(__half* vals, + const __half* residual, + const __half* gamma, + const __half* beta, + float epsilon, + bool preLayerNorm, + bool training, + __half* vars, + int row_stride) +{ +#ifdef HALF_PRECISION_AVAILABLE + + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile<32> g = cg::tiled_partition<32>(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int gid = id >> WARP_SIZE_BITS; + + float2 vals_f[NORM_REG]; + __shared__ float shr[MAX_WARP_NUM]; + + __half2* vals_cast = reinterpret_cast<__half2*>(vals); + const __half2* residual_cast = reinterpret_cast(residual); + + residual_cast += (row * row_stride); + vals_cast += (row * row_stride); + + float sum = 0.f; + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + vals_f[i] = __half22float2(residual_cast[i * iteration_stride + id]); + sum += vals_f[i].x; + sum += vals_f[i].y; + } + if ((high_index) < row_stride) { + vals_f[iterations] = __half22float2(residual_cast[high_index]); + sum += vals_f[iterations].x; + sum += vals_f[iterations].y; + iterations++; + } + + for (int i = 1; i < 32; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) shr[gid] = sum; + + b.sync(); + + if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) sum = shr[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + b.sync(); +#endif + + for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { + sum += g.shfl_down(sum, i); + } + sum = g.shfl(sum, 0); + float mean = sum / (row_stride * 2); + + float variance = 0.f; + for (int i = 0; i < iterations; i++) { + vals_f[i].x -= mean; + vals_f[i].y -= mean; + variance += vals_f[i].x * vals_f[i].x; + variance += vals_f[i].y * vals_f[i].y; + } + + for (int i = 1; i < 32; i *= 2) { variance += g.shfl_down(variance, i); } + + if (g.thread_rank() == 0) shr[gid] = variance; + + b.sync(); + + if (g.thread_rank() < (iteration_stride >> WARP_SIZE_BITS)) variance = shr[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + b.sync(); +#endif + + for (int i = 1; i < (iteration_stride >> WARP_SIZE_BITS); i *= 2) { + variance += g.shfl_down(variance, i); + } + variance = g.shfl(variance, 0); + variance /= (row_stride * 2); + variance += epsilon; + + __half2 variance_h = __float2half2_rn(variance); + const __half2* gamma_cast = reinterpret_cast(gamma); + const __half2* beta_cast = reinterpret_cast(beta); + + if (training && threadIdx.x == 0) vars[row] = __float2half(variance); + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) { + __half2 vals_arr = __float22half2_rn(vals_f[i]); + vals_arr = vals_arr * h2rsqrt(variance_h); + vals_arr = + vals_arr * gamma_cast[i * iteration_stride + id] + beta_cast[i * iteration_stride + id]; + vals_cast[i * iteration_stride + id] = vals_arr; + } + if ((high_index) < row_stride) { + __half2 vals_arr = __float22half2_rn(vals_f[iterations]); + vals_arr = vals_arr * h2rsqrt(variance_h); + vals_arr = vals_arr * gamma_cast[high_index] + beta_cast[high_index]; + vals_cast[high_index] = vals_arr; + } +#endif +} + +template +void launch_bias_residual_layer_norm(T* vals, + const T* residual, + const T* gamma, + const T* beta, + float epsilon, + int batch_size, + int hidden_dim, + cudaStream_t stream, + bool preLayerNorm, + bool training, + T* vars); + +/* +To tune this launch the following restrictions must be met: + +For float: +row_stride == hidden_size +threads * iterations == row_stride +threads is in [32, 64, 128, 256, 512, 1024] + +For half: +row_stride == hidden_size / 2 +threads * iterations == row_stride +threads is in [32, 64, 128, 256, 512, 1024] + +*/ + +template <> +void launch_bias_residual_layer_norm(float* vals, + const float* residual, + const float* gamma, + const float* beta, + float epsilon, + int batch_size, + int hidden_dim, + cudaStream_t stream, + bool preLayerNorm, + bool training, + float* vars) +{ + int threads = THREADS; + + dim3 grid_dim(batch_size); + + // There are some limitations to call below functions, now just enumerate the situations. + + if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 1; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 2; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim(threads); + + fused_bias_residual_layer_norm<<>>( + vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, hidden_dim); +} + +template <> +void launch_bias_residual_layer_norm<__half>(__half* vals, + const __half* residual, + const __half* gamma, + const __half* beta, + float epsilon, + int batch_size, + int hidden_dim, + cudaStream_t stream, + bool preLayerNorm, + bool training, + __half* vars) +{ + int threads = 128; + + dim3 grid_dim(batch_size); + + // There are some limitations to call below functions, now just enumerate the situations. + + if (hidden_dim > 8192 && hidden_dim <= 16384) + threads <<= 1; + else if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 2; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 3; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim(threads); + fused_bias_residual_layer_norm<<>>( + vals, residual, gamma, beta, epsilon, preLayerNorm, training, vars, hidden_dim / 2); +} + +/* Normalize Gamma & Betta gradients + * Compute gradients using either X_hat or + * normalize input (invertible). + * Combine transpose with gradients computation. + */ + +template +__global__ void LayerNormBackward1(const T* __restrict__ out_grad, + const T* __restrict__ vals_hat, + const T* __restrict__ gamma, + const T* __restrict__ betta, + T* __restrict__ gamma_grad, + T* __restrict__ betta_grad, + int rows, + int width, + bool invertible) +{ + __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; + __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + int offset = threadIdx.y * width + idx; + int y_stride = width * TILE_DIM; + + float betta_reg = (invertible ? (float)betta[idx] : 0.0f); + float gamma_reg = (float)gamma[idx]; + + // Loop across matrix height + float betta_tmp = 0; + float gamma_tmp = 0; + for (int r = threadIdx.y; r < rows; r += TILE_DIM) { + float grad = (float)out_grad[offset]; + float val = (invertible ? ((float)vals_hat[offset] - betta_reg) / gamma_reg + : (float)vals_hat[offset]); + betta_tmp += grad; + gamma_tmp += (val * grad); + + offset += y_stride; + } + + betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; + gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; + + __syncthreads(); + + // Sum the shared buffer. + float s1 = betta_buffer[threadIdx.y][threadIdx.x]; + float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < TILE_DIM; i <<= 1) { + s1 += g.shfl_down(s1, i); + s2 += g.shfl_down(s2, i); + } + + if (threadIdx.x == 0) { + int pos = blockIdx.x * TILE_DIM + threadIdx.y; + betta_grad[pos] = s1; + gamma_grad[pos] = s2; + } +} + +/* Normalize Gamma & Betta gradients + * Compute gradients using the input to + * the normalize. + * Combine transpose with gradients computation. + */ + +template +__global__ void LayerNormBackward1(const T* __restrict__ out_grad, + const T* __restrict__ X_data, + const T* __restrict__ vars, + const T* __restrict__ means, + T* __restrict__ gamma_grad, + T* __restrict__ betta_grad, + int rows, + int width) +{ + __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; + __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + int offset = threadIdx.y * width + idx; + int y_stride = width * TILE_DIM; + + int pos = blockIdx.x * TILE_DIM + threadIdx.y; + // Loop across matrix height + + float betta_tmp = 0; + float gamma_tmp = 0; + for (int r = threadIdx.y; r < rows; r += TILE_DIM) { + float grad = (float)out_grad[offset]; + float val = (float)X_data[offset]; + val = (val - (float)means[r]) * rsqrtf((float)vars[r]); + betta_tmp += grad; + gamma_tmp += (val * grad); + + offset += y_stride; + } + + betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; + gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; + + __syncthreads(); + + // Sum the shared buffer. + float s1 = betta_buffer[threadIdx.y][threadIdx.x]; + float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < TILE_DIM; i <<= 1) { + s1 += g.shfl_down(s1, i); + s2 += g.shfl_down(s2, i); + } + + if (threadIdx.x == 0) { + betta_grad[pos] = s1; + gamma_grad[pos] = s2; + } +} +/* + +/* Backward Normalize (Input-Gradient) + * Using the means and variances from the input + * This type of backward is invertible! + * We do the backward using the X_hat (X - u) / sqrt(variance) or the output of Normalization. + */ + +__global__ void LayerNormBackward2(const float* out_grad, + const float* vals_hat, + const float* gamma, + const float* betta, + const float* vars, + float* inp_grad, + bool invertible, + int row_stride) +{ + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int wid = id / WARP_SIZE; + int warp_num = iteration_stride >> WARP_SIZE_BITS; + __shared__ float partialSum[MAX_WARP_NUM]; + + out_grad += (row * row_stride); + vals_hat += (row * row_stride); + inp_grad += (row * row_stride); + + float vals_arr[NORM_REG]; + float vals_hat_arr[NORM_REG]; + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + float gamma_reg = gamma[i * iteration_stride + id]; + vals_arr[i] = out_grad[i * iteration_stride + id]; + vals_arr[i] *= gamma_reg; + vals_hat_arr[i] = + (invertible ? (vals_hat[i * iteration_stride + id] - betta[i * iteration_stride + id]) / + gamma_reg + : vals_hat[i * iteration_stride + id]); + } + if ((high_index) < row_stride) { + float gamma_reg = gamma[high_index]; + vals_arr[iterations] = out_grad[high_index]; + vals_arr[iterations] *= gamma_reg; + vals_hat_arr[iterations] = + (invertible ? (vals_hat[high_index] - betta[high_index]) / gamma_reg + : vals_hat[high_index]); + iterations++; + } + + float var_reg = vars[row]; + + float sum = 0; + for (int i = 0; i < iterations; i++) { + sum += vals_hat_arr[i] * vals_arr[i] * + sqrtf(var_reg); // dval_hat = gamma * (x - u) * out_grad + vals_arr[i] *= rsqrtf(var_reg); // dvar_inv = gamma * out_grad / sqrt(var) + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= row_stride; + + for (int i = 0; i < iterations; i++) { vals_arr[i] += ((-sum * vals_hat_arr[i]) / var_reg); } + + sum = 0; + for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + sum = g.shfl(sum, 0); + sum /= row_stride; + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) inp_grad[i * iteration_stride + id] = (vals_arr[i] - sum); + if ((high_index) < row_stride) inp_grad[high_index] = (vals_arr[iterations] - sum); +} + +__global__ void LayerNormBackward2(const __half* out_grad, + const __half* vals_hat, + const __half* gamma, + const __half* betta, + const __half* vars, + __half* inp_grad, + bool invertible, + int row_stride) +{ +#ifdef HALF_PRECISION_AVAILABLE + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int wid = id / WARP_SIZE; + int warp_num = iteration_stride >> WARP_SIZE_BITS; + __shared__ float partialSum[MAX_WARP_NUM]; + + __half2 vals_arr[NORM_REG]; + float2 vals_arr_f[NORM_REG]; + __half2 vals_hat_arr[NORM_REG]; + + __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); + const __half2* out_grad_h = reinterpret_cast(out_grad); + const __half2* vals_hat_h = reinterpret_cast(vals_hat); + + inp_grad_h += (row * row_stride); + out_grad_h += (row * row_stride); + vals_hat_h += (row * row_stride); + + const __half2* gamma_h = reinterpret_cast(gamma); + const __half2* betta_h = (invertible ? reinterpret_cast(betta) : nullptr); + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + __half2 gamma_reg = gamma_h[i * iteration_stride + id]; + vals_arr[i] = out_grad_h[i * iteration_stride + id]; + vals_arr[i] *= gamma_reg; + vals_hat_arr[i] = + (invertible + ? (vals_hat_h[i * iteration_stride + id] - betta_h[i * iteration_stride + id]) / + gamma_reg + : vals_hat_h[i * iteration_stride + id]); + } + if ((high_index) < row_stride) { + __half2 gamma_reg = gamma_h[high_index]; + vals_arr[iterations] = out_grad_h[high_index]; + vals_arr[iterations] *= gamma_reg; + vals_hat_arr[iterations] = + (invertible ? (vals_hat_h[high_index] - betta_h[high_index]) / gamma_reg + : vals_hat_h[high_index]); + iterations++; + } + __half var_h = vars[row]; + __half2 var_reg = __halves2half2(var_h, var_h); + + float sum = 0.f; + for (int i = 0; i < iterations; i++) { + __half2 result_h = (vals_hat_arr[i] * vals_arr[i] * h2sqrt(var_reg)); + float2 result_f = __half22float2(result_h); + sum += result_f.x; + sum += result_f.y; + vals_arr[i] *= h2rsqrt(var_reg); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= (2 * row_stride); + __half2 sum_h = __float2half2_rn(sum); + + for (int i = 0; i < iterations; i++) { + __half2 temp = ((-sum_h * vals_hat_arr[i]) / (var_reg)); + vals_arr_f[i] = __half22float2(vals_arr[i]); + float2 temp_f = __half22float2(temp); + vals_arr_f[i].x += temp_f.x; + vals_arr_f[i].y += temp_f.y; + } + sum = 0.f; + + for (int i = 0; i < iterations; i++) { + sum += (vals_arr_f[i].x); + sum += (vals_arr_f[i].y); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= (2 * row_stride); + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) { + vals_arr_f[i].x -= sum; + vals_arr_f[i].y -= sum; + __half2 temp = __float22half2_rn(vals_arr_f[i]); + + inp_grad_h[i * iteration_stride + id] = temp; + } + if ((high_index) < row_stride) { + vals_arr_f[iterations].x -= sum; + vals_arr_f[iterations].y -= sum; + __half2 temp = __float22half2_rn(vals_arr_f[iterations]); + + inp_grad_h[high_index] = temp; + } +#endif +} + +template <> +void launch_layerNorm_backward(const float* out_grad, + const float* vals_hat, + const float* vars, + const float* gamma, + float* gamma_grad, + float* betta_grad, + float* inp_grad, + int batch, + int hidden_dim, + cudaStream_t stream[2], + bool invertible, + const float* betta) +{ + int threads = THREADS; + + dim3 grid_dim(hidden_dim / TILE_DIM); + dim3 block_dim(TILE_DIM, TILE_DIM); + + LayerNormBackward1<<>>( + out_grad, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); + + dim3 grid_dim2(batch); + + if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 1; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 2; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim2(threads); + + LayerNormBackward2<<>>( + out_grad, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim); +} + +template <> +void launch_layerNorm_backward<__half>(const __half* out_grad, + const __half* vals_hat, + const __half* vars, + const __half* gamma, + __half* gamma_grad, + __half* betta_grad, + __half* inp_grad, + int batch, + int hidden_dim, + cudaStream_t stream[2], + bool invertible, + const __half* betta) +{ + int threads = THREADS; + + dim3 grid_dim(hidden_dim / TILE_DIM); + dim3 block_dim(TILE_DIM, TILE_DIM); + + // LayerNormBackward1<__half><<>>( + // out_grad, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); + + dim3 grid_dim2(batch); + + if (hidden_dim > 8192 && hidden_dim <= 16384) + threads <<= 1; + else if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 2; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 3; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim2(threads / 2); + + LayerNormBackward2<<>>( + out_grad, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim / 2); +} + +/* Backward Normalize (Input-Gradient) + * Using the means and variances from the input + * This type of backward is not invertible! + * We do the backward using the input (X) + */ + +__global__ void LayerNormBackward2(const float* out_grad, + const float* X_vals, + const float* gamma, + const float* vars, + const float* means, + float* inp_grad, + int row_stride) +{ + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int wid = id >> WARP_SIZE_BITS; + int warp_num = iteration_stride >> WARP_SIZE_BITS; + __shared__ float partialSum[MAX_WARP_NUM]; + + out_grad += (row * row_stride); + X_vals += (row * row_stride); + inp_grad += (row * row_stride); + + float vals_arr[NORM_REG]; + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + float gamma_reg = gamma[i * iteration_stride + id]; + vals_arr[i] = out_grad[i * iteration_stride + id]; + vals_arr[i] *= gamma_reg; + } + if ((high_index) < row_stride) { + float gamma_reg = gamma[high_index]; + vals_arr[iterations] = out_grad[high_index]; + vals_arr[iterations] *= gamma_reg; + iterations++; + } + + float var_reg = vars[row]; + float mean_reg = means[row]; + + float sum = 0; + float xu[NORM_REG]; + for (int i = 0; i < iterations; i++) { + xu[i] = (X_vals[i * iteration_stride + id] - mean_reg); + sum += vals_arr[i] * xu[i]; + vals_arr[i] *= rsqrtf(var_reg); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= row_stride; + + for (int i = 0; i < iterations; i++) { + vals_arr[i] += (-sum * xu[i] * rsqrtf(var_reg) / (var_reg)); + } + + sum = 0; + for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + sum = g.shfl(sum, 0); + sum /= row_stride; + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) inp_grad[i * iteration_stride + id] = (vals_arr[i] - sum); + if ((high_index) < row_stride) inp_grad[high_index] = (vals_arr[iterations] - sum); +} + +__global__ void LayerNormBackward2(const __half* out_grad, + const __half* X_vals, + const __half* gamma, + const __half* vars, + const __half* means, + __half* inp_grad, + int row_stride) +{ +#ifdef HALF_PRECISION_AVAILABLE + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int wid = id >> WARP_SIZE_BITS; + int warp_num = iteration_stride >> WARP_SIZE_BITS; + + __shared__ float partialSum[MAX_WARP_NUM]; + + __half2 vals_arr[NORM_REG]; + float2 vals_arr_f[NORM_REG]; + __half2 xu[NORM_REG]; + + __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); + const __half2* out_grad_h = reinterpret_cast(out_grad); + const __half2* vals_hat_h = reinterpret_cast(X_vals); + + inp_grad_h += (row * row_stride); + out_grad_h += (row * row_stride); + vals_hat_h += (row * row_stride); + + const __half2* gamma_h = reinterpret_cast(gamma); + int high_index = iterations * iteration_stride + id; + + __half mean_h = means[row]; + __half2 mean_reg = __halves2half2(mean_h, mean_h); +#pragma unroll + for (int i = 0; i < iterations; i++) { + __half2 gamma_reg = gamma_h[i * iteration_stride + id]; + vals_arr[i] = out_grad_h[i * iteration_stride + id]; + vals_arr[i] *= gamma_reg; // out_grad * gamma + xu[i] = (vals_hat_h[i * iteration_stride + id] - mean_reg); + } + if ((high_index) < row_stride) { + __half2 gamma_reg = gamma_h[high_index]; + vals_arr[iterations] = out_grad_h[high_index]; + vals_arr[iterations] *= gamma_reg; // out_grad * gamma + xu[iterations] = (vals_hat_h[high_index] - mean_reg); + iterations++; + } + __half var_h = vars[row]; + __half2 var_reg = __halves2half2(var_h, var_h); + + float sum = 0.f; + for (int i = 0; i < iterations; i++) { + __half2 result_h = (xu[i] * vals_arr[i]); + float2 result_f = __half22float2(result_h); + sum += result_f.x; + sum += result_f.y; + vals_arr[i] *= h2rsqrt(var_reg); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= (2 * row_stride); + __half2 sum_h = __float2half2_rn(sum); + + for (int i = 0; i < iterations; i++) { + __half2 xu_grad = ((-sum_h * xu[i] * h2rsqrt(var_reg)) / (var_reg)); + vals_arr_f[i] = __half22float2(vals_arr[i]); + float2 xu_grad_f = __half22float2(xu_grad); + vals_arr_f[i].x += xu_grad_f.x; + vals_arr_f[i].y += xu_grad_f.y; + } + + sum = 0.f; + for (int i = 0; i < iterations; i++) { + sum += (vals_arr_f[i].x); + sum += (vals_arr_f[i].y); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= (2 * row_stride); + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) { + vals_arr_f[i].x -= sum; + vals_arr_f[i].y -= sum; + __half2 temp = __float22half2_rn(vals_arr_f[i]); + inp_grad_h[i * iteration_stride + id] = temp; + } + if ((high_index) < row_stride) { + vals_arr_f[iterations].x -= sum; + vals_arr_f[iterations].y -= sum; + __half2 temp = __float22half2_rn(vals_arr_f[iterations]); + inp_grad_h[high_index] = temp; + } +#endif +} + +template <> +void launch_layerNorm_backward(const float* out_grad, + const float* X_data, + const float* vars, + const float* means, + const float* gamma, + float* gamma_grad, + float* betta_grad, + float* inp_grad, + int batch, + int hidden_dim, + cudaStream_t stream[2]) +{ + int threads = THREADS; + + dim3 grid_dim(hidden_dim / TILE_DIM); + dim3 block_dim(TILE_DIM, TILE_DIM); + + LayerNormBackward1<<>>( + out_grad, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); + + dim3 grid_dim2(batch); + + if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 1; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 2; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim2(threads); + LayerNormBackward2<<>>( + out_grad, X_data, gamma, vars, means, inp_grad, hidden_dim); +} + +template <> +void launch_layerNorm_backward<__half>(const __half* out_grad, + const __half* X_data, + const __half* vars, + const __half* means, + const __half* gamma, + __half* gamma_grad, + __half* betta_grad, + __half* inp_grad, + int batch, + int hidden_dim, + cudaStream_t stream[2]) +{ + int threads = THREADS; + + dim3 grid_dim(hidden_dim / TILE_DIM); + dim3 block_dim(TILE_DIM, TILE_DIM); + + LayerNormBackward1<__half><<>>( + out_grad, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); + + dim3 grid_dim2(batch); + + if (hidden_dim > 8192 && hidden_dim <= 16384) + threads <<= 1; + else if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 2; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 3; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim2(threads / 2); + LayerNormBackward2<<>>( + out_grad, X_data, gamma, vars, means, inp_grad, hidden_dim / 2); +} + +template +__global__ void LayerNormBackward1_fused_add(const T* __restrict__ out_grad1, + const T* __restrict__ out_grad2, + const T* __restrict__ vals_hat, + const T* __restrict__ gamma, + const T* __restrict__ betta, + T* __restrict__ gamma_grad, + T* __restrict__ betta_grad, + int rows, + int width, + bool invertible) +{ + __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; + __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + int offset = threadIdx.y * width + idx; + int y_stride = width * TILE_DIM; + + float betta_reg = (invertible ? (float)betta[idx] : 0.0f); + float gamma_reg = (float)gamma[idx]; + + // Loop across matrix height + float betta_tmp = 0; + float gamma_tmp = 0; + for (int r = threadIdx.y; r < rows; r += TILE_DIM) { + float grad = (float)out_grad1[offset] + (float)out_grad2[offset]; + float val = (invertible ? ((float)vals_hat[offset] - betta_reg) / gamma_reg + : (float)vals_hat[offset]); + betta_tmp += grad; + gamma_tmp += (val * grad); + + offset += y_stride; + } + + betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; + gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; + + __syncthreads(); + + // Sum the shared buffer. + float s1 = betta_buffer[threadIdx.y][threadIdx.x]; + float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < TILE_DIM; i <<= 1) { + s1 += g.shfl_down(s1, i); + s2 += g.shfl_down(s2, i); + } + + if (threadIdx.x == 0) { + int pos = blockIdx.x * TILE_DIM + threadIdx.y; + betta_grad[pos] = s1; + gamma_grad[pos] = s2; + } +} + +template +__global__ void LayerNormBackward1_fused_add(const T* __restrict__ out_grad1, + const T* __restrict__ out_grad2, + const T* __restrict__ X_data, + const T* __restrict__ vars, + const T* __restrict__ means, + T* __restrict__ gamma_grad, + T* __restrict__ betta_grad, + int rows, + int width) +{ + __shared__ float betta_buffer[TILE_DIM][TILE_DIM + 1]; + __shared__ float gamma_buffer[TILE_DIM][TILE_DIM + 1]; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + int offset = threadIdx.y * width + idx; + int y_stride = width * TILE_DIM; + + int pos = blockIdx.x * TILE_DIM + threadIdx.y; + // Loop across matrix height + + float betta_tmp = 0; + float gamma_tmp = 0; + for (int r = threadIdx.y; r < rows; r += TILE_DIM) { + float grad = (float)out_grad1[offset] + (float)out_grad2[offset]; + float val = (float)X_data[offset]; + val = (val - (float)means[r]) * rsqrtf((float)vars[r]); + betta_tmp += grad; + gamma_tmp += (val * grad); + + offset += y_stride; + } + + betta_buffer[threadIdx.x][threadIdx.y] = betta_tmp; + gamma_buffer[threadIdx.x][threadIdx.y] = gamma_tmp; + + __syncthreads(); + + // Sum the shared buffer. + float s1 = betta_buffer[threadIdx.y][threadIdx.x]; + float s2 = gamma_buffer[threadIdx.y][threadIdx.x]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < TILE_DIM; i <<= 1) { + s1 += g.shfl_down(s1, i); + s2 += g.shfl_down(s2, i); + } + + if (threadIdx.x == 0) { + betta_grad[pos] = s1; + gamma_grad[pos] = s2; + } +} + +__global__ void LayerNormBackward2_fused_add(const float* out_grad1, + const float* out_grad2, + const float* vals_hat, + const float* gamma, + const float* betta, + const float* vars, + float* inp_grad, + bool invertible, + int row_stride) +{ + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int wid = id / WARP_SIZE; + int warp_num = iteration_stride >> WARP_SIZE_BITS; + __shared__ float partialSum[MAX_WARP_NUM]; + + out_grad1 += (row * row_stride); + out_grad2 += (row * row_stride); + vals_hat += (row * row_stride); + inp_grad += (row * row_stride); + + float vals_arr[NORM_REG]; + float vals_hat_arr[NORM_REG]; + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + float gamma_reg = gamma[i * iteration_stride + id]; + vals_arr[i] = out_grad1[i * iteration_stride + id]; + vals_arr[i] *= gamma_reg; + vals_hat_arr[i] = + (invertible ? (vals_hat[i * iteration_stride + id] - betta[i * iteration_stride + id]) / + gamma_reg + : vals_hat[i * iteration_stride + id]); + } + if ((high_index) < row_stride) { + float gamma_reg = gamma[high_index]; + vals_arr[iterations] = out_grad1[high_index]; + vals_arr[iterations] *= gamma_reg; + vals_hat_arr[iterations] = + (invertible ? (vals_hat[high_index] - betta[high_index]) / gamma_reg + : vals_hat[high_index]); + iterations++; + } + + float var_reg = vars[row]; + + float sum = 0; + for (int i = 0; i < iterations; i++) { + sum += vals_hat_arr[i] * vals_arr[i] * sqrtf(var_reg); + vals_arr[i] *= rsqrtf(var_reg); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= row_stride; + + for (int i = 0; i < iterations; i++) { vals_arr[i] += ((-sum * vals_hat_arr[i]) / var_reg); } + + sum = 0; + for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + sum = g.shfl(sum, 0); + sum /= row_stride; + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) + inp_grad[i * iteration_stride + id] = + (vals_arr[i] - sum) + out_grad2[i * iteration_stride + id]; + if ((high_index) < row_stride) + inp_grad[high_index] = (vals_arr[iterations] - sum) + out_grad2[high_index]; +} + +__global__ void LayerNormBackward2_fused_add(const __half* out_grad1, + const __half* out_grad2, + const __half* vals_hat, + const __half* gamma, + const __half* betta, + const __half* vars, + __half* inp_grad, + bool invertible, + int row_stride) +{ +#ifdef HALF_PRECISION_AVAILABLE + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int wid = id / WARP_SIZE; + int warp_num = iteration_stride >> WARP_SIZE_BITS; + __shared__ float partialSum[MAX_WARP_NUM]; + + __half2 vals_arr[NORM_REG]; + float2 vals_arr_f[NORM_REG]; + __half2 vals_hat_arr[NORM_REG]; + + // float2 result[iterations]; + + __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); + const __half2* out_grad_h1 = reinterpret_cast(out_grad1); + const __half2* out_grad_h2 = reinterpret_cast(out_grad2); + const __half2* vals_hat_h = reinterpret_cast(vals_hat); + + inp_grad_h += (row * row_stride); + out_grad_h1 += (row * row_stride); + out_grad_h2 += (row * row_stride); + vals_hat_h += (row * row_stride); + + const __half2* gamma_h = reinterpret_cast(gamma); + const __half2* betta_h = (invertible ? reinterpret_cast(betta) : nullptr); + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + __half2 gamma_reg = gamma_h[i * iteration_stride + id]; + vals_arr[i] = out_grad_h1[i * iteration_stride + id]; + vals_arr[i] *= gamma_reg; // out_grad * gamma + vals_hat_arr[i] = + (invertible + ? (vals_hat_h[i * iteration_stride + id] - betta_h[i * iteration_stride + id]) / + gamma_reg + : vals_hat_h[i * iteration_stride + id]); + } + if ((high_index) < row_stride) { + __half2 gamma_reg = gamma_h[high_index]; + vals_arr[iterations] = out_grad_h1[high_index]; + vals_arr[iterations] *= gamma_reg; // out_grad * gamma + vals_hat_arr[iterations] = + (invertible ? (vals_hat_h[high_index] - betta_h[high_index]) / gamma_reg + : vals_hat_h[high_index]); + iterations++; + } + __half var_h = vars[row]; + __half2 var_reg = __halves2half2(var_h, var_h); + + float sum = 0.f; + for (int i = 0; i < iterations; i++) { + __half2 result_h = (vals_hat_arr[i] * vals_arr[i] * h2sqrt(var_reg)); + float2 result_f = __half22float2(result_h); + sum += result_f.x; + sum += result_f.y; + vals_arr[i] *= h2rsqrt(var_reg); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= (2 * row_stride); + __half2 sum_h = __float2half2_rn(sum); + + for (int i = 0; i < iterations; i++) { + __half2 temp = ((-sum_h * vals_hat_arr[i]) / (var_reg)); + vals_arr_f[i] = __half22float2(vals_arr[i]); + float2 temp_f = __half22float2(temp); + vals_arr_f[i].x += temp_f.x; + vals_arr_f[i].y += temp_f.y; + } + sum = 0.f; + for (int i = 0; i < iterations; i++) { + sum += (vals_arr_f[i].x); + sum += (vals_arr_f[i].y); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= (2 * row_stride); + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) { + vals_arr_f[i].x -= sum; + vals_arr_f[i].y -= sum; + __half2 temp = __float22half2_rn(vals_arr_f[i]); + + inp_grad_h[i * iteration_stride + id] = temp + out_grad_h2[i * iteration_stride + id]; + } + if ((high_index) < row_stride) { + vals_arr_f[iterations].x -= sum; + vals_arr_f[iterations].y -= sum; + __half2 temp = __float22half2_rn(vals_arr_f[iterations]); + + inp_grad_h[high_index] = temp + out_grad_h2[high_index]; + } +#endif +} + +template <> +void launch_layerNorm_backward_fused_add(const float* out_grad1, + const float* out_grad2, + const float* vals_hat, + const float* vars, + const float* gamma, + float* gamma_grad, + float* betta_grad, + float* inp_grad, + int batch, + int hidden_dim, + cudaStream_t stream[2], + bool invertible, + const float* betta) +{ + int threads = THREADS; + + dim3 grid_dim(hidden_dim / TILE_DIM); + dim3 block_dim(TILE_DIM, TILE_DIM); + LayerNormBackward1<<>>( + out_grad1, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); + + dim3 grid_dim2(batch); + + if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 1; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 2; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim2(threads); + LayerNormBackward2_fused_add<<>>( + out_grad1, out_grad2, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim); +} + +template <> +void launch_layerNorm_backward_fused_add<__half>(const __half* out_grad1, + const __half* out_grad2, + const __half* vals_hat, + const __half* vars, + const __half* gamma, + __half* gamma_grad, + __half* betta_grad, + __half* inp_grad, + int batch, + int hidden_dim, + cudaStream_t stream[2], + bool invertible, + const __half* betta) +{ + int threads = THREADS; + + dim3 grid_dim(hidden_dim / TILE_DIM); + dim3 block_dim(TILE_DIM, TILE_DIM); + + LayerNormBackward1<__half><<>>( + out_grad1, vals_hat, gamma, betta, gamma_grad, betta_grad, batch, hidden_dim, invertible); + + dim3 grid_dim2(batch); + + if (hidden_dim > 8192 && hidden_dim <= 16384) + threads <<= 1; + else if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 2; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 3; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim2(threads / 2); + LayerNormBackward2_fused_add<<>>( + out_grad1, out_grad2, vals_hat, gamma, betta, vars, inp_grad, invertible, hidden_dim / 2); +} + +/* Backward Normalize (Input-Gradient) + * Using the means and variances from the input + * This type of backward is not invertible! + * We do the backward using the input (X) + */ + +__global__ void LayerNormBackward2_fused_add(const float* out_grad1, + const float* out_grad2, + const float* X_vals, + const float* gamma, + const float* vars, + const float* means, + float* inp_grad, + int row_stride) +{ + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int wid = id / WARP_SIZE; + int warp_num = iteration_stride >> WARP_SIZE_BITS; + __shared__ float partialSum[MAX_WARP_NUM]; + + float vals_arr[NORM_REG]; + float vals_hat_arr[NORM_REG]; + + out_grad1 += (row * row_stride); + out_grad2 += (row * row_stride); + X_vals += (row * row_stride); + inp_grad += (row * row_stride); + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + float gamma_reg = gamma[i * iteration_stride + id]; + vals_arr[i] = out_grad1[i * iteration_stride + id]; + vals_arr[i] *= gamma_reg; + vals_hat_arr[i] = X_vals[i * iteration_stride + id]; + } + if ((high_index) < row_stride) { + float gamma_reg = gamma[high_index]; + vals_arr[iterations] = out_grad1[high_index]; + vals_arr[iterations] *= gamma_reg; + vals_hat_arr[iterations] = X_vals[high_index]; + iterations++; + } + + float var_reg = vars[row]; + float mean_reg = means[row]; + + float sum = 0; + float xu[NORM_REG]; + for (int i = 0; i < iterations; i++) { + xu[i] = (vals_hat_arr[i] - mean_reg); + sum += vals_arr[i] * xu[i]; + vals_arr[i] *= rsqrtf(var_reg); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= row_stride; + + for (int i = 0; i < iterations; i++) { + vals_arr[i] += (-sum * xu[i] * rsqrtf(var_reg) / (var_reg)); + } + + sum = 0; + for (int i = 0; i < iterations; i++) { sum += vals_arr[i]; } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + sum = g.shfl(sum, 0); + sum /= row_stride; + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) + inp_grad[i * iteration_stride + id] = + (vals_arr[i] - sum) + out_grad2[i * iteration_stride + id]; + if ((high_index) < row_stride) + inp_grad[high_index] = (vals_arr[iterations] - sum) + out_grad2[high_index]; +} + +__global__ void LayerNormBackward2_fused_add(const __half* out_grad1, + const __half* out_grad2, + const __half* X_vals, + const __half* gamma, + const __half* vars, + const __half* means, + __half* inp_grad, + int row_stride) +{ +#ifdef HALF_PRECISION_AVAILABLE + int iteration_stride = blockDim.x; + int iterations = row_stride / iteration_stride; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int row = blockIdx.x; + int id = threadIdx.x; + int wid = id / WARP_SIZE; + int warp_num = iteration_stride >> WARP_SIZE_BITS; + + __shared__ float partialSum[MAX_WARP_NUM]; + + __half2 vals_arr[NORM_REG]; + float2 vals_arr_f[NORM_REG]; + __half2 vals_hat_arr[NORM_REG]; + + __half2* inp_grad_h = reinterpret_cast<__half2*>(inp_grad); + const __half2* out_grad_h1 = reinterpret_cast(out_grad1); + const __half2* out_grad_h2 = reinterpret_cast(out_grad2); + const __half2* vals_hat_h = reinterpret_cast(X_vals); + + out_grad_h1 += (row * row_stride); + out_grad_h2 += (row * row_stride); + inp_grad_h += (row * row_stride); + vals_hat_h += (row * row_stride); + + const __half2* gamma_h = reinterpret_cast(gamma); + int high_index = iterations * iteration_stride + id; +#pragma unroll + for (int i = 0; i < iterations; i++) { + __half2 gamma_reg = gamma_h[i * iteration_stride + id]; + vals_arr[i] = out_grad_h1[i * iteration_stride + id]; + vals_arr[i] *= gamma_reg; // out_grad * gamma + vals_hat_arr[i] = vals_hat_h[i * iteration_stride + id]; + } + if ((high_index) < row_stride) { + __half2 gamma_reg = gamma_h[high_index]; + vals_arr[iterations] = out_grad_h1[high_index]; + vals_arr[iterations] *= gamma_reg; // out_grad * gamma + vals_hat_arr[iterations] = vals_hat_h[high_index]; + iterations++; + } + + __half mean_h = means[row]; + __half var_h = vars[row]; + __half2 var_reg = __halves2half2(var_h, var_h); + __half2 mean_reg = __halves2half2(mean_h, mean_h); + __half2 xu[NORM_REG]; + + float sum = 0.f; + for (int i = 0; i < iterations; i++) { + xu[i] = (vals_hat_arr[i] - mean_reg); + __half2 result_h = (xu[i] * vals_arr[i]); + float2 result_f = __half22float2(result_h); + sum += result_f.x; + sum += result_f.y; + vals_arr[i] *= h2rsqrt(var_reg); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= (2 * row_stride); + __half2 sum_h = __float2half2_rn(sum); + + for (int i = 0; i < iterations; i++) { + __half2 xu_grad = ((-sum_h * xu[i] * h2rsqrt(var_reg)) / (var_reg)); + vals_arr_f[i] = __half22float2(vals_arr[i]); + float2 xu_grad_f = __half22float2(xu_grad); + vals_arr_f[i].x += xu_grad_f.x; + vals_arr_f[i].y += xu_grad_f.y; + } + + sum = 0.f; + for (int i = 0; i < iterations; i++) { + sum += (vals_arr_f[i].x); + sum += (vals_arr_f[i].y); + } + + for (int i = 1; i < WARP_SIZE; i *= 2) { sum += g.shfl_down(sum, i); } + + if (g.thread_rank() == 0) partialSum[wid] = sum; + + __syncthreads(); + + if (g.thread_rank() < warp_num) sum = partialSum[g.thread_rank()]; + +#ifndef __STOCHASTIC_MODE__ + __syncthreads(); +#endif + + for (int i = 1; i < warp_num; i *= 2) sum += g.shfl_down(sum, i); + + sum = g.shfl(sum, 0); + sum /= (2 * row_stride); + + iterations = row_stride / iteration_stride; + for (int i = 0; i < iterations; i++) { + vals_arr_f[i].x -= sum; + vals_arr_f[i].y -= sum; + __half2 temp = __float22half2_rn(vals_arr_f[i]); + inp_grad_h[i * iteration_stride + id] = temp + out_grad_h2[i * iteration_stride + id]; + } + if ((high_index) < row_stride) { + vals_arr_f[iterations].x -= sum; + vals_arr_f[iterations].y -= sum; + __half2 temp = __float22half2_rn(vals_arr_f[iterations]); + inp_grad_h[high_index] = temp + out_grad_h2[high_index]; + } +#endif +} + +template <> +void launch_layerNorm_backward_fused_add(const float* out_grad1, + const float* out_grad2, + const float* X_data, + const float* vars, + const float* means, + const float* gamma, + float* gamma_grad, + float* betta_grad, + float* inp_grad, + int batch, + int hidden_dim, + cudaStream_t stream[2]) +{ + int threads = THREADS; + + dim3 grid_dim(hidden_dim / TILE_DIM); + dim3 block_dim(TILE_DIM, TILE_DIM); + + LayerNormBackward1<<>>( + out_grad1, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); + + dim3 grid_dim2(batch); + + if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 1; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 2; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim2(threads); + LayerNormBackward2_fused_add<<>>( + out_grad1, out_grad2, X_data, gamma, vars, means, inp_grad, hidden_dim); +} + +template <> +void launch_layerNorm_backward_fused_add<__half>(const __half* out_grad1, + const __half* out_grad2, + const __half* X_data, + const __half* vars, + const __half* means, + const __half* gamma, + __half* gamma_grad, + __half* betta_grad, + __half* inp_grad, + int batch, + int hidden_dim, + cudaStream_t stream[2]) +{ + int threads = THREADS; + + dim3 grid_dim(hidden_dim / TILE_DIM); + dim3 block_dim(TILE_DIM, TILE_DIM); + + LayerNormBackward1<__half><<>>( + out_grad1, X_data, vars, means, gamma_grad, betta_grad, batch, hidden_dim); + + dim3 grid_dim2(batch); + + if (hidden_dim > 8192 && hidden_dim <= 16384) + threads <<= 1; + else if (hidden_dim > 16384 && hidden_dim <= 32768) + threads <<= 2; + else if (hidden_dim > 32768 && hidden_dim <= 65536) + threads <<= 3; + else if (hidden_dim > 65536) + throw std::runtime_error("Unsupport hidden_dim."); + + dim3 block_dim2(threads / 2); + LayerNormBackward2_fused_add<<>>( + out_grad1, out_grad2, X_data, gamma, vars, means, inp_grad, hidden_dim / 2); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/softmax_kernels.cu b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/softmax_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..d9efed286462f1be971922c2345efb8d2049e227 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/softmax_kernels.cu @@ -0,0 +1,701 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include "custom_cuda_layers.h" +#include "general_kernels.h" + +namespace cg = cooperative_groups; + +dim3 get_attn_softmax_grid(int batch_size, int heads, int sequence_length, int threads) +{ + int seq_length4 = sequence_length / 4; + int block_compute_size = + (seq_length4 < threads ? (int)pow(2.0, floor(log2((float)(threads / seq_length4)))) : 1); + // Note that the Y and Z dimensions are limited to 65535, while X is basically unlimited: + // https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#features-and-technical-specifications + // The batch size is typically relatively small, while the sequence length could potentially be + // arbitrarily large. We therefore place the batch size second to avoid hitting the Y limit. + unsigned x = heads * sequence_length / block_compute_size; + unsigned y = batch_size; + return {x, y}; +} + +// Fused attention + softmax +template +__global__ void attn_softmax(float* vals, + const float* attn_mask, + int heads, + int seq_length, + int iterations) +{ + __shared__ float partialSum[MAX_WARP_NUM]; + + int warp_num = blockDim.x >> WARP_SIZE_BITS; + + int iteration_stride = blockDim.x; + int block_width = blockStride * seq_length; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int batch = blockIdx.y; + int row = blockIdx.x; + int max_threads_in_sequence = std::max(seq_length, tbSeq); + int seq_lane = threadIdx.x % max_threads_in_sequence; + + int data_offset = batch * (gridDim.x * block_width) + row * block_width + + (threadIdx.x / max_threads_in_sequence) * seq_length; + int mask_offset = batch * seq_length; + + int wid = threadIdx.x >> WARP_SIZE_BITS; + int lane = threadIdx.x & 0x1f; + + float4* val_cast = reinterpret_cast(vals); + const float4* attn_mask_cast = reinterpret_cast(attn_mask); + + float4 data[MAX_THREAD_ITERATIONS]; + + float max_val = minus_infinity; + + for (int i = 0; i < iterations; i++) { + int data_id = i * iteration_stride + seq_lane; + if (data_id < seq_length) { + float4 mask = attn_mask_cast[mask_offset + data_id]; + data[i] = val_cast[data_offset + data_id]; + + data[i].x += mask.x; + data[i].y += mask.y; + data[i].z += mask.z; + data[i].w += mask.w; + + max_val = (data[i].x > max_val ? data[i].x : max_val); + max_val = (data[i].y > max_val ? data[i].y : max_val); + max_val = (data[i].z > max_val ? data[i].z : max_val); + max_val = (data[i].w > max_val ? data[i].w : max_val); + } else { + data[i].x = minus_infinity; + data[i].y = minus_infinity; + data[i].z = minus_infinity; + data[i].w = minus_infinity; + } + } + + for (int i = 1; i < tbSize; i *= 2) { + auto temp = g.shfl_xor(max_val, i); + max_val = (temp > max_val ? temp : max_val); + } + + if (seq_length > tbSize) { + if (lane == 0) partialSum[wid] = max_val; + b.sync(); + + if (lane < warp_num) max_val = partialSum[lane]; + +#ifndef __STOCHASTIC_MODE__ + b.sync(); +#endif + + int iters = warp_num; + if (seq_length < iteration_stride) + iters = warp_num / (iteration_stride / max_threads_in_sequence); + + for (int i = 1; i < iters; i *= 2) { + auto temp = g.shfl_xor(max_val, i); + max_val = (temp > max_val ? temp : max_val); + } + + max_val = g.shfl(max_val, threadIdx.x / tbSize); + } + + float sum = 0; + for (int i = 0; i < iterations; i++) { + data[i].x = __expf(data[i].x - max_val); + data[i].y = __expf(data[i].y - max_val); + data[i].z = __expf(data[i].z - max_val); + data[i].w = __expf(data[i].w - max_val); + + sum += (data[i].x + data[i].y + data[i].z + data[i].w); + } + + for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); } + + if (seq_length > tbSize) { + if (lane == 0) partialSum[wid] = sum; + b.sync(); + + if (lane < warp_num) sum = partialSum[lane]; + +#ifndef __STOCHASTIC_MODE__ + b.sync(); +#endif + + int iters = warp_num; + if (seq_length < iteration_stride) + iters = warp_num / (iteration_stride / max_threads_in_sequence); + + for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); } + + sum = g.shfl(sum, threadIdx.x / tbSize); + } + + sum += 1e-6; + + for (int i = 0; i < iterations; i++) { + data[i].x /= sum; + data[i].y /= sum; + data[i].z /= sum; + data[i].w /= sum; + + int data_id = i * iteration_stride + seq_lane; + if (data_id < seq_length) val_cast[data_offset + data_id] = data[i]; + } +} + +template +__global__ void attn_softmax(__half* vals, + const __half* attn_mask, + int heads, + int seq_length, + int iterations) +{ +#ifdef HALF_PRECISION_AVAILABLE + __shared__ float partialSum[MAX_WARP_NUM]; + + int warp_num = blockDim.x >> WARP_SIZE_BITS; + + int iteration_stride = blockDim.x; + int block_width = blockStride * seq_length; + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int batch = blockIdx.y; + int row = blockIdx.x; + int max_threads_in_sequence = std::max(seq_length, tbSeq); + int seq_lane = threadIdx.x % max_threads_in_sequence; + + int data_offset = batch * (gridDim.x * block_width) + row * block_width + + (threadIdx.x / max_threads_in_sequence) * seq_length; + int mask_offset = batch * seq_length; + + int wid = threadIdx.x >> WARP_SIZE_BITS; + int lane = threadIdx.x & 0x1f; + + float2* val_cast = reinterpret_cast(vals); + const float2* attn_mask_cast = reinterpret_cast(attn_mask); + + val_cast += data_offset; + attn_mask_cast += mask_offset; + + float2 low_data[MAX_THREAD_ITERATIONS]; + float2 high_data[MAX_THREAD_ITERATIONS]; + + float max_val = minus_infinity; + + for (int i = 0; i < iterations; i++) { + int data_id = i * iteration_stride + seq_lane; + if (data_id < seq_length) { + float2 data = val_cast[data_id]; + float2 mask = attn_mask_cast[data_id]; + + __half2* data_arr = reinterpret_cast<__half2*>(&data); + __half2* mask_arr = reinterpret_cast<__half2*>(&mask); + + low_data[i] = __half22float2(data_arr[0]); + high_data[i] = __half22float2(data_arr[1]); + float2 low_mask = __half22float2(mask_arr[0]); + float2 high_mask = __half22float2(mask_arr[1]); + + low_data[i].x += low_mask.x; + low_data[i].y += low_mask.y; + high_data[i].x += high_mask.x; + high_data[i].y += high_mask.y; + + max_val = (low_data[i].x > max_val ? low_data[i].x : max_val); + max_val = (low_data[i].y > max_val ? low_data[i].y : max_val); + max_val = (high_data[i].x > max_val ? high_data[i].x : max_val); + max_val = (high_data[i].y > max_val ? high_data[i].y : max_val); + } + } + + for (int i = 1; i < tbSize; i *= 2) { + auto temp = g.shfl_xor(max_val, i); + max_val = (temp > max_val ? temp : max_val); + } + + if (seq_length > tbSize) { + if (lane == 0) partialSum[wid] = max_val; + b.sync(); + + if (lane < warp_num) max_val = partialSum[lane]; + +#ifndef __STOCHASTIC_MODE__ + b.sync(); +#endif + + int iters = warp_num; + if (seq_length < iteration_stride) + iters = warp_num / (iteration_stride / max_threads_in_sequence); + + for (int i = 1; i < iters; i *= 2) { + auto temp = g.shfl_xor(max_val, i); + max_val = (temp > max_val ? temp : max_val); + } + + max_val = g.shfl(max_val, threadIdx.x / tbSize); + } + + float sum = 0; + for (int i = 0; i < iterations; i++) { + int data_id = i * iteration_stride + seq_lane; + if (data_id < seq_length) { + low_data[i].x = __expf(low_data[i].x - max_val); + low_data[i].y = __expf(low_data[i].y - max_val); + high_data[i].x = __expf(high_data[i].x - max_val); + high_data[i].y = __expf(high_data[i].y - max_val); + + sum += (low_data[i].x + low_data[i].y + high_data[i].x + high_data[i].y); + } + } + + for (int i = 1; i < tbSize; i *= 2) { sum += g.shfl_xor(sum, i); } + + if (seq_length > tbSize) { + if (lane == 0) partialSum[wid] = sum; + b.sync(); + + if (lane < warp_num) sum = partialSum[lane]; + +#ifndef __STOCHASTIC_MODE__ + b.sync(); +#endif + + int iters = warp_num; + if (seq_length < iteration_stride) + iters = warp_num / (iteration_stride / max_threads_in_sequence); + + for (int i = 1; i < iters; i *= 2) { sum += g.shfl_xor(sum, i); } + + sum = g.shfl(sum, threadIdx.x / tbSize); + } + + sum += 1e-6; + + for (int i = 0; i < iterations; i++) { + int data_id = i * iteration_stride + seq_lane; + if (data_id < seq_length) { + float2 result_f; + __half2* result_h = reinterpret_cast<__half2*>(&result_f); + + low_data[i].x /= sum; + low_data[i].y /= sum; + high_data[i].x /= sum; + high_data[i].y /= sum; + + result_h[0] = __float22half2_rn(low_data[i]); + result_h[1] = __float22half2_rn(high_data[i]); + + val_cast[data_id] = result_f; + } + } + +#endif +} + +template +void launch_attn_softmax(T*, const T*, int, int, int, cudaStream_t); + +template <> +void launch_attn_softmax(float* vals, + const float* attn_mask, + int batch_size, + int heads, + int sequence_length, + cudaStream_t stream) +{ + const int threads = 128; + int seq_length4 = sequence_length / 4; + + dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); + + int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; + + dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / + subblock_max_workload * threads) + : threads); + int iterations = + (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads + : MAX_THREAD_ITERATIONS); + + if (sequence_length <= 8) + attn_softmax<2, (threads / 2), 2> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length <= 16) + attn_softmax<4, (threads / 4), 4> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length <= 32) + attn_softmax<8, (threads / 8), 8> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length <= 64) + attn_softmax<16, (threads / 16), 16> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length <= 128) + attn_softmax<32, (threads / 32), 32> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length <= 256) + attn_softmax<32, (threads / 64), 64> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else { + const int threads = 256; + dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); + + int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; + + dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / + subblock_max_workload * threads) + : threads); + iterations = + (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads + : MAX_THREAD_ITERATIONS); + if (sequence_length <= 512) + attn_softmax<32, (threads / 128), 128><<>>( + vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4)) + attn_softmax<32, 1, 128><<>>( + vals, attn_mask, heads, seq_length4, iterations); + else + throw std::runtime_error( + "Unsupport Seq_Length! Check the restriction of the max_threads and " + "max_thread_iterations!"); + } +} + +template <> +void launch_attn_softmax<__half>(__half* vals, + const __half* attn_mask, + int batch_size, + int heads, + int sequence_length, + cudaStream_t stream) +{ + const int threads = 128; + int seq_length4 = sequence_length / 4; + + dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); + + int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; + + dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / + subblock_max_workload * threads) + : threads); + + int iterations = + (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads + : MAX_THREAD_ITERATIONS); + + if (sequence_length <= 8) + attn_softmax<2, (threads / 2), 2> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length <= 16) + attn_softmax<4, (threads / 4), 4> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length <= 32) + attn_softmax<8, (threads / 8), 8> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length <= 64) + attn_softmax<16, (threads / 16), 16> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length <= 128) + attn_softmax<32, (threads / 32), 32> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length <= 256) + attn_softmax<32, (threads / 64), 64> + <<>>(vals, attn_mask, heads, seq_length4, iterations); + else { + const int threads = 256; + dim3 grid_dim = get_attn_softmax_grid(batch_size, heads, sequence_length, threads); + + int subblock_max_workload = MAX_THREAD_ITERATIONS * 4 * threads; + + dim3 block_dim(seq_length4 > threads ? ((sequence_length + subblock_max_workload - 1) / + subblock_max_workload * threads) + : threads); + iterations = + (sequence_length < subblock_max_workload ? (seq_length4 + threads - 1) / threads + : MAX_THREAD_ITERATIONS); + if (sequence_length <= 512) + attn_softmax<32, (threads / 128), 128><<>>( + vals, attn_mask, heads, seq_length4, iterations); + else if (sequence_length < (MAX_THREADS * MAX_THREAD_ITERATIONS * 4)) + attn_softmax<32, 1, 128><<>>( + vals, attn_mask, heads, seq_length4, iterations); + else + throw std::runtime_error( + "Unsupport Seq_Length! Check the restriction of the max_threads and " + "max_thread_iterations!"); + } +} + +template +__global__ void softmax_backward_kernel(T* out_grad, const T* soft_inp, int seq_length) +{ + __shared__ float partialSum[MAX_WARP_NUM]; + + int warp_num = blockDim.x >> WARP_SIZE_BITS; // warp-count = num_threads / WARP_SIZE (32) + + int iteration_stride = blockDim.x; + int block_width = blockStride * seq_length; + + int iterations = (seq_length < (MAX_THREAD_ITERATIONS * iteration_stride) + ? (seq_length + iteration_stride - 1) / iteration_stride + : MAX_THREAD_ITERATIONS); + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + int row = blockIdx.x; + int id = threadIdx.x; + + int wid = id >> WARP_SIZE_BITS; + int lane = id & 0x1f; + + T val_reg[MAX_THREAD_ITERATIONS]; + T soft_reg[MAX_THREAD_ITERATIONS]; + float grad_reg = 0.0f; + +#pragma unroll + for (int i = 0; i < iterations; i++) { + int data_id = i * iteration_stride + id; + if (data_id < block_width) { + val_reg[i] = out_grad[row * block_width + data_id]; + soft_reg[i] = soft_inp[row * block_width + data_id]; + + grad_reg += ((float)val_reg[i] * + (float)soft_reg[i]); // if done in half, the multiplication, we may lose + // 2% of accuracy in computation!! + } + } + for (int i = 1; i < tbSize; i *= 2) grad_reg += g.shfl_xor(grad_reg, i); + + if (seq_length > tbSize) { + if (lane == 0) partialSum[wid] = grad_reg; + b.sync(); + + if (lane < warp_num) grad_reg = partialSum[lane]; + + int iters = warp_num; + if (seq_length < iteration_stride) iters = warp_num / (iteration_stride / seq_length); + + for (int i = 1; i < iters; i *= 2) grad_reg += g.shfl_xor(grad_reg, i); + + grad_reg = g.shfl(grad_reg, id / tbSize); + } + + for (int i = 0; i < iterations; i++) { + int data_id = i * iteration_stride + id; + if (data_id < block_width) { + float temp = (float)soft_reg[i] * ((float)val_reg[i] - grad_reg); + out_grad[row * block_width + data_id] = (T)temp; + } + } +} + +template +__global__ void softmax_backward_kernel_v2(T* grad /* input & output*/, + const T* output, + int softmax_length) +{ + int batch_idx = blockIdx.x * blockDim.y + threadIdx.y; + int offset = batch_idx * softmax_length + threadIdx.x; + + grad += offset; + output += offset; + + T grad_reg[ITERATIONS]; + T output_reg[ITERATIONS]; + float sum = 0.0; + +#pragma unroll + for (int i = 0; i < ITERATIONS; ++i) { + int curr_idx = threadIdx.x + i * WARP_SIZE; + if (curr_idx < softmax_length) { + grad_reg[i] = grad[i * WARP_SIZE]; + output_reg[i] = output[i * WARP_SIZE]; + sum += (float)grad_reg[i] * (float)output_reg[i]; + } + } + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + + for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i); + +#pragma unroll + for (int i = 0; i < ITERATIONS; ++i) { + int curr_idx = threadIdx.x + i * WARP_SIZE; + if (curr_idx < softmax_length) + grad[i * WARP_SIZE] = (float)output_reg[i] * ((float)grad_reg[i] - sum); + } +} + +__global__ void softmax_backward_kernel_arbitrary_length(__half* grad /* input & output*/, + const __half* output, + int softmax_length) +{ + int batch_idx = blockIdx.x * blockDim.y + threadIdx.y; + int offset = batch_idx * softmax_length + threadIdx.x; + + const float4* output_cast = reinterpret_cast(output); + float4* grad_cast = reinterpret_cast(grad); + + grad_cast += offset; + output_cast += offset; + + float sum = 0.0; + int curr_idx = threadIdx.x; + while (curr_idx < softmax_length) { + float4 out_reg = output_cast[curr_idx]; + float4 grad_reg = grad_cast[curr_idx]; + __half2* out_h = reinterpret_cast<__half2*>(&out_reg); + __half2* grad_h = reinterpret_cast<__half2*>(&grad_reg); +#pragma unroll + for (int m = 0; m < 4; m++) grad_h[m] *= out_h[m]; + sum += ((float)grad_h[0].x + (float)grad_h[0].y + (float)grad_h[1].x + (float)grad_h[1].y) + + ((float)grad_h[2].x + (float)grad_h[2].y + (float)grad_h[3].x + (float)grad_h[3].y); + curr_idx += WARP_SIZE; + } + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + +#pragma unroll + for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i); + + curr_idx = threadIdx.x; + while (curr_idx < softmax_length) { + float4 out_reg = output_cast[curr_idx]; + float4 grad_reg = grad_cast[curr_idx]; + __half* grad_h = reinterpret_cast<__half*>(&grad_reg); + __half* out_h = reinterpret_cast<__half*>(&out_reg); + +#pragma unroll + for (int m = 0; m < 8; m++) grad_h[m] = (float)out_h[m] * ((float)grad_h[m] - sum); + + grad_cast[curr_idx] = grad_reg; + curr_idx += WARP_SIZE; + } +} + +__global__ void softmax_backward_kernel_arbitrary_length(float* grad /* input & output*/, + const float* output, + int softmax_length) +{ + int batch_idx = blockIdx.x * blockDim.y + threadIdx.y; + int offset = batch_idx * softmax_length + threadIdx.x; + + const float4* output_cast = reinterpret_cast(output); + float4* grad_cast = reinterpret_cast(grad); + + grad_cast += offset; + output_cast += offset; + + float sum = 0.0; + int curr_idx = threadIdx.x; + while (curr_idx < softmax_length) { + float4 out_reg = output_cast[curr_idx]; + float4 grad_reg = grad_cast[curr_idx]; + + grad_reg.x *= out_reg.x; + grad_reg.y *= out_reg.y; + grad_reg.z *= out_reg.z; + grad_reg.w *= out_reg.w; + sum += (grad_reg.x + grad_reg.y + grad_reg.z + grad_reg.w); + + curr_idx += WARP_SIZE; + } + + cg::thread_block b = cg::this_thread_block(); + cg::thread_block_tile g = cg::tiled_partition(b); + +#pragma unroll + for (int i = 1; i < WARP_SIZE; i <<= 1) sum += g.shfl_xor(sum, i); + + curr_idx = threadIdx.x; + while (curr_idx < softmax_length) { + float4 out_reg = output_cast[curr_idx]; + float4 grad_reg = grad_cast[curr_idx]; + grad_reg.x = out_reg.x * (grad_reg.x - sum); + grad_reg.y = out_reg.y * (grad_reg.y - sum); + grad_reg.z = out_reg.z * (grad_reg.z - sum); + grad_reg.w = out_reg.w * (grad_reg.w - sum); + + grad_cast[curr_idx] = grad_reg; + curr_idx += WARP_SIZE; + } +} + +template +void launch_attn_softmax_backward_v2(T* out_grad, + const T* soft_inp, + int batch_size, + int heads, + int seq_length, + cudaStream_t stream) +{ + const int warps_per_block = 4; + dim3 grid_dim(batch_size * heads * seq_length / warps_per_block); + dim3 block_dim(WARP_SIZE, warps_per_block); + + if (seq_length <= 32) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 64) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 128) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 256) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 384) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 512) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 768) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 1024) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 2048) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 4096) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else if (seq_length <= 8192) + softmax_backward_kernel_v2 + <<>>(out_grad, soft_inp, seq_length); + else + softmax_backward_kernel_arbitrary_length<<>>( + out_grad, soft_inp, seq_length / (4 << ((sizeof(T) & 2) >> 1))); +} + +template void launch_attn_softmax_backward_v2<__half>(__half* out_grad, + const __half* soft_inp, + int batch_size, + int heads, + int seq_length, + cudaStream_t stream); +template void launch_attn_softmax_backward_v2(float* out_grad, + const float* soft_inp, + int batch_size, + int heads, + int seq_length, + cudaStream_t stream); diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/transform_kernels.cu b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/transform_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..dbecc31536c6c9355c4a3e243f2a0219bc476e6e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/transform_kernels.cu @@ -0,0 +1,580 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "custom_cuda_layers.h" + +#define rows_trans 16 +#define cols_trans 16 + +template +__global__ void Transpose_Kernel(const T* inp, T* out, int row_width, int col_width) +{ + __shared__ T data_block[rows_trans * (cols_trans + 1)]; + + int r = threadIdx.x / cols_trans; + int c = threadIdx.x % cols_trans; + + int m = row_width / cols_trans; + + int i = blockIdx.x / m * rows_trans + r; + int j = blockIdx.x % m * cols_trans + c; + + int row_stride = rows_trans / ((rows_trans * cols_trans + THREADS - 1) / THREADS); + + for (int k = 0; k < rows_trans; k += row_stride) + data_block[(k + r) * cols_trans + c] = inp[(i + k) * row_width + j]; + + __syncthreads(); + + i = blockIdx.x % m * rows_trans + r; + j = blockIdx.x / m * cols_trans + c; + + for (int k = 0; k < rows_trans; k += row_stride) + out[(i + k) * col_width + j] = data_block[c * cols_trans + r + k]; +} + +template <> +void Transpose<__half>(const __half* inp_mat, + __half* out_mat, + int rows, + int cols, + cudaStream_t stream) +{ + int threads = THREADS; + + Transpose_Kernel<__half><<<(rows * cols + threads - 1) / threads, threads, 0, stream>>>( + inp_mat, out_mat, cols, rows); +} + +template <> +void Transpose(const float* inp_mat, float* out_mat, int rows, int cols, cudaStream_t stream) +{ + int threads = THREADS; + + Transpose_Kernel<<<(rows * cols + threads - 1) / threads, threads, 0, stream>>>( + inp_mat, out_mat, cols, rows); +} + +template +__global__ void transform_0213(T* output, + const T* vals, + int hidden_dim, + int seq_length, + int heads, + int head_ext); + +template <> +__global__ void transform_0213(float* output, + const float* vals, + int hidden_dim, + int seq_length, + int heads, + int head_ext) +{ + int d0_stride = hidden_dim * seq_length; + int d1_stride = hidden_dim; + int d2_stride = hidden_dim / heads; + + int d0_out_stride = d0_stride; + int d1_out_stride = d2_stride; + int d2_out_stride = d2_stride * seq_length; + + int d0 = blockIdx.x; // Batch + int d1 = blockIdx.y / head_ext; // Sequence ID (0-127) + int d2 = threadIdx.y + (blockIdx.y % head_ext) * (heads / head_ext); // Head (0-11) + int d3 = threadIdx.x; // Values (groups of 4) + + const float4* vals_vec = reinterpret_cast(vals); + float4* output_vec = reinterpret_cast(output); + + float4 inputs = vals_vec[d0 * d0_stride + d1 * d1_stride + d2 * d2_stride + d3]; + output_vec[d0 * d0_out_stride + d1 * d1_out_stride + d2 * d2_out_stride + d3] = inputs; +} + +template <> +__global__ void transform_0213<__half>(__half* output, + const __half* vals, + int hidden_dim, + int seq_length, + int heads, + int head_ext) +{ +#ifdef HALF_PRECISION_AVAILABLE + + int d0_stride = hidden_dim * seq_length; + int d1_stride = hidden_dim; + int d2_stride = hidden_dim / heads; + + int d0_out_stride = d0_stride; + int d1_out_stride = d2_stride; + int d2_out_stride = d2_stride * seq_length; + + int d0 = blockIdx.x; // Batch + int d1 = blockIdx.y / head_ext; // Sequence ID (0-127) + int d2 = threadIdx.y + (blockIdx.y % head_ext) * (heads / head_ext); // Head (0-11) + int d3 = threadIdx.x; // Values (groups of 4) + + float4 vals_arr[1]; + + const float4* vals_vec = reinterpret_cast(vals); + float4* output_vec = reinterpret_cast(output); + + vals_arr[0] = vals_vec[d0 * d0_stride + d1 * d1_stride + d2 * d2_stride + d3]; + output_vec[d0 * d0_out_stride + d1 * d1_out_stride + d2 * d2_out_stride + d3] = vals_arr[0]; +#endif +} + +template <> +void launch_transform_0213(float* output, + const float* vals, + int batch_size, + int seq_length, + int hidden_dim, + int heads, + cudaStream_t stream) +{ + hidden_dim >>= 2; + int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; + dim3 block_dim(hidden_dim / heads, (heads / head_ext)); + dim3 grid_dim(batch_size, (seq_length * head_ext)); + + transform_0213 + <<>>(output, vals, hidden_dim, seq_length, heads, head_ext); +} + +template <> +void launch_transform_0213<__half>(__half* output, + const __half* vals, + int batch_size, + int seq_length, + int hidden_dim, + int heads, + cudaStream_t stream) +{ + hidden_dim >>= 3; + int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; + dim3 block_dim(hidden_dim / heads, (heads / head_ext)); + dim3 grid_dim(batch_size, (seq_length * head_ext)); + transform_0213<__half> + <<>>(output, vals, hidden_dim, seq_length, heads, head_ext); +} + +// Bias add +template +__global__ void bias_add_transform_0213(T* output, + const T* vals, + const T* bias, + int hidden_dim, + int seq_length, + int heads, + int head_ext); + +template <> +__global__ void bias_add_transform_0213(float* output, + const float* vals, + const float* bias, + int hidden_dim, + int seq_length, + int heads, + int head_ext) +{ + int d0_stride = hidden_dim * seq_length; + int d1_stride = hidden_dim; + int d2_stride = hidden_dim / heads; + + int d0_out_stride = d0_stride; + int d1_out_stride = d2_stride; + int d2_out_stride = d2_stride * seq_length; + + int d0 = blockIdx.x; // Batch + int d1 = blockIdx.y; // Sequence ID (0-127) + int cnt = blockIdx.z / head_ext; // Hidden count + int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11) + int d3 = threadIdx.x; // Values (groups of 4) + + const float4* vals_vec = reinterpret_cast(vals); + const float4* bias_vec = reinterpret_cast(bias); + float4* output_vec = reinterpret_cast(output); + + float4 inputs = vals_vec[d0 * d0_stride * (gridDim.z / head_ext) + cnt * d1_stride + + d1 * d1_stride * (gridDim.z / head_ext) + d2 * d2_stride + d3]; + float4 biases = bias_vec[cnt * d1_stride + d2 * d2_stride + d3]; + + float4 outputs; + outputs.x = inputs.x + biases.x; + outputs.y = inputs.y + biases.y; + outputs.z = inputs.z + biases.z; + outputs.w = inputs.w + biases.w; + + output_vec[cnt * d0_out_stride * gridDim.x + d0 * d0_out_stride + d1 * d1_out_stride + + d2 * d2_out_stride + d3] = outputs; +} + +#define ATTN_H 3 +#define MAX_SEQ_LINE 10 + +template <> +__global__ void bias_add_transform_0213<__half>(__half* output, + const __half* vals, + const __half* bias, + int hidden_dim, + int seq_length, + int heads, + int head_ext) +{ +#ifdef HALF_PRECISION_AVAILABLE + + int d0_stride = hidden_dim * seq_length; + int d1_stride = hidden_dim; + int d2_stride = hidden_dim / heads; + + int d2_out_stride = d2_stride * seq_length; + + int d0 = blockIdx.x; // Batch + int d1 = blockIdx.y; // Sequence ID (0-127) + int cnt = blockIdx.z / head_ext; // Hidden count + int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11) + int d3 = threadIdx.x; // Values (groups of 4) + + float4 vals_arr; + float4 bias_arr; + float4 output_arr; + __half2* vals_half = reinterpret_cast<__half2*>(&vals_arr); + __half2* bias_half = reinterpret_cast<__half2*>(&bias_arr); + __half2* output_half = reinterpret_cast<__half2*>(&output_arr); + + const float4* vals_vec = reinterpret_cast(vals); + const float4* bias_vec = reinterpret_cast(bias); + float4* output_vec = reinterpret_cast(output); + + vals_vec += (d0 * d0_stride * (gridDim.z / head_ext)); + vals_vec += (d1 * d1_stride * (gridDim.z / head_ext)); + vals_vec += (cnt * d1_stride); + vals_vec += (d2 * d2_stride); + + bias_vec += (cnt * d1_stride); + bias_vec += (d2 * d2_stride); + + output_vec += (cnt * d0_stride * gridDim.x); + output_vec += (d1 * d2_stride); + output_vec += (d0 * d0_stride); + output_vec += (d2 * d2_out_stride); + + bias_arr = bias_vec[d3]; + vals_arr = vals_vec[d3]; + +#if defined(__ACC_HALF__) + output_half[0] = vals_half[0] + bias_half[0]; + output_half[1] = vals_half[1] + bias_half[1]; + output_half[2] = vals_half[2] + bias_half[2]; + output_half[3] = vals_half[3] + bias_half[3]; +#else + float2 bias_arr_f[4]; + float2 vals_arr_f[4]; +#pragma unroll + for (int l = 0; l < 4; l++) { + bias_arr_f[l] = __half22float2(bias_half[l]); + vals_arr_f[l] = __half22float2(vals_half[l]); + vals_arr_f[l].x += bias_arr_f[l].x; + vals_arr_f[l].y += bias_arr_f[l].y; + output_half[l] = __float22half2_rn(vals_arr_f[l]); + } +#endif + output_vec[d3] = output_arr; + +#endif +} + +__global__ void bias_add_transform_0213_v2(__half* output, + const __half* vals, + const __half* bias, + int hidden_dim, + int seq_length, + int heads) +{ +#ifdef HALF_PRECISION_AVAILABLE + __shared__ float4 in_data[3072]; + + int d0_stride = hidden_dim * seq_length; + int d1_stride = hidden_dim; + int d2_stride = hidden_dim / heads; + int iteration_stride = d1_stride * blockDim.z; // Hidden * 3 / 8 + int batch_stride = d0_stride * blockDim.z; // Hidden * S * 3 / 8 + + int d0_out_stride = d0_stride; + int d1_out_stride = d2_stride; + int d2_out_stride = d2_stride * seq_length; + + int d0 = blockIdx.x; // Batch + int d1 = blockIdx.y; // Sequence ID (0-127) + int cnt = threadIdx.z; // blockIdx.z; // Hidden count + int d2 = threadIdx.y; // Head (0-11) + int d3 = threadIdx.x; // Values (groups of 4) + + float4 vals_arr[1]; + float4 bias_arr[1]; + float4 output_arr[1]; + __half2* vals_half = reinterpret_cast<__half2*>(vals_arr); + __half2* bias_half = reinterpret_cast<__half2*>(bias_arr); + __half2* output_half = reinterpret_cast<__half2*>(output_arr); + + const float4* vals_vec = reinterpret_cast(vals); + const float4* bias_vec = reinterpret_cast(bias); + float4* output_vec = reinterpret_cast(output); + + int iter_index = cnt * d1_stride + d2 * d2_stride + d3; + int input_offset = d0 * batch_stride + d1 * (iteration_stride << 1); + bias_arr[0] = bias_vec[iter_index]; + +#pragma unroll + for (int iter = 0; iter < 2; iter++) { + int iter_id = iter * iteration_stride + iter_index; + vals_arr[0] = vals_vec[input_offset + iter_id]; + + output_half[0] = vals_half[0] + bias_half[0]; + output_half[1] = vals_half[1] + bias_half[1]; + output_half[2] = vals_half[2] + bias_half[2]; + output_half[3] = vals_half[3] + bias_half[3]; + + in_data[iter_id] = output_arr[0]; + } + __syncthreads(); + + iteration_stride = blockDim.z * (blockDim.y >> 1); + int matrix_stride = (d0_out_stride * gridDim.x); + int head_count = (d2 >> 1) + cnt * (blockDim.y >> 1); + + int out_index = d0 * d0_out_stride + d1 * (d1_out_stride << 1) + d3 + (d2 % 2) * d2_stride; + +#pragma unroll + for (int iter = 0; iter < 2; iter++) { + int iter_row = (iter * iteration_stride) + head_count; + int iter_offset = + (iter_row % blockDim.y) * d2_out_stride + (iter_row / blockDim.y) * matrix_stride; + output_vec[out_index + iter_offset] = + in_data[iter_row * d2_stride + d3 + (d2 % 2) * (d1_stride * blockDim.z)]; + } +#endif +} + +// [B S C*H] - > C * [B A S N] +template <> +void launch_bias_add_transform_0213(float* output, + const float* vals, + const float* bias, + int batch_size, + int seq_length, + int hidden_dim, + int heads, + cudaStream_t stream, + int trans_count) +{ + hidden_dim >>= 2; + int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; + + dim3 block_dim(hidden_dim / heads, (heads / head_ext)); + dim3 grid_dim(batch_size, seq_length, (trans_count * head_ext)); + + bias_add_transform_0213<<>>( + output, vals, bias, hidden_dim, seq_length, heads, head_ext); +} + +template <> +void launch_bias_add_transform_0213<__half>(__half* output, + const __half* vals, + const __half* bias, + int batch_size, + int seq_length, + int hidden_dim, + int heads, + cudaStream_t stream, + int trans_count) +{ + hidden_dim >>= 3; + if (hidden_dim > 128 || hidden_dim < 16) { + int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; + dim3 block_dim(hidden_dim / heads, (heads / head_ext)); + dim3 grid_dim(batch_size, seq_length, (trans_count * head_ext)); + bias_add_transform_0213<__half><<>>( + output, vals, bias, hidden_dim, seq_length, heads, head_ext); + } else { + dim3 block_dim(hidden_dim / heads, heads, trans_count); + dim3 grid_dim(batch_size, seq_length / 2); + bias_add_transform_0213_v2<<>>( + output, vals, bias, hidden_dim, seq_length, heads); + } +} + +template +__global__ void transform4d_0213(T* out, + const T* in, + int heads, + int seq_length, + int hidden_dim, + int head_ext); + +template <> +__global__ void transform4d_0213(float* out, + const float* in, + int heads, + int seq_length, + int hidden_dim, + int head_ext) +{ + int d0_stride = hidden_dim * seq_length; + int d1_stride = d0_stride / heads; + int d2_stride = hidden_dim / heads; + + int d0_out_stride = d0_stride; + int d1_out_stride = d2_stride; + int d2_out_stride = hidden_dim; + + int d0 = blockIdx.x; // Batch + int d1 = blockIdx.y / ((seq_length - 1) / blockDim.y + 1); // Head + int d2 = (threadIdx.y + blockDim.y * blockIdx.y) % seq_length; + int cnt = blockIdx.z; + int d3 = threadIdx.x; // Values (groups of 8) + + if (d2 < seq_length) { + const float4* in_vec = reinterpret_cast(in); + float4* out_vec = reinterpret_cast(out); + + float4 vals_vec = in_vec[cnt * d0_stride * gridDim.x + d0 * d0_stride + d1 * d1_stride + + d2 * d2_stride + d3]; + out_vec[d0 * d0_out_stride * gridDim.z + cnt * d2_out_stride + d1 * d1_out_stride + + d2 * d2_out_stride * gridDim.z + d3] = vals_vec; + } +} + +template <> +__global__ void transform4d_0213<__half>(__half* out, + const __half* in, + int heads, + int seq_length, + int hidden_dim, + int head_ext) +{ +#ifdef HALF_PRECISION_AVAILABLE + + int d0_stride = hidden_dim * (seq_length / head_ext); + int d1_stride = hidden_dim; + int d2_stride = hidden_dim / heads; + + int d0 = blockIdx.x; // Batch + int d1 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head + int d2 = blockIdx.z / head_ext; // Sequence + int cnt = blockIdx.y; // Hidden count + int d3 = threadIdx.x; // Values (groups of 8) + + const float4* in_vec = reinterpret_cast(in); + float4* out_vec = reinterpret_cast(out); + + in_vec += (cnt * d0_stride * gridDim.x); + in_vec += (d0 * d0_stride); + in_vec += (d2 * d2_stride); + in_vec += (d1 * d2_stride * seq_length); + + out_vec += (cnt * d1_stride); + out_vec += (d1 * d2_stride); + out_vec += (d0 * d0_stride * gridDim.y); + out_vec += (d2 * d1_stride * gridDim.y); + + out_vec[d3] = in_vec[d3]; + +#endif +} + +__global__ void transform4d_0213_v2(__half* out, + const __half* in, + int heads, + int seq_length, + int hidden_dim) +{ +#ifdef HALF_PRECISION_AVAILABLE + __shared__ float4 in_data[3072]; + + int d0_stride = hidden_dim * seq_length; + int d1_stride = hidden_dim; + int d2_stride = hidden_dim / heads; + + int d0 = blockIdx.x; // Batch + int d1 = threadIdx.y; // Head + int d2 = blockIdx.y; // Sequence + int cnt = threadIdx.z; // Hidden count + int d3 = threadIdx.x; // Values (groups of 8) + + const float4* in_vec = reinterpret_cast(in); + float4* out_vec = reinterpret_cast(out); + + int input_offset = d0 * d0_stride + d2 * (d2_stride << 1) + d3 + (d1 % 2) * d2_stride; + int head_count = (d1 >> 1) + cnt * (blockDim.y >> 1); + int iteration_stride = blockDim.z * (blockDim.y >> 1); + int matrix_stride = (d0_stride * gridDim.x); + +#pragma unroll + for (int iter = 0; iter < 2; iter++) { + int iter_row = iter * iteration_stride + head_count; + int iter_offset = (iter_row % blockDim.y) * d2_stride; + + in_data[d3 + iter_offset + (iter_row / blockDim.y + (d1 % 2) * blockDim.z) * d1_stride] = + in_vec[input_offset + iter_offset * seq_length + + (iter_row / blockDim.y) * matrix_stride]; + } + __syncthreads(); + + iteration_stride = d1_stride * blockDim.z; + int iter_index = cnt * d1_stride + d1 * d2_stride + d3; + int output_offset = d0 * d0_stride * blockDim.z + d2 * (iteration_stride << 1); + +#pragma unroll + for (int iter = 0; iter < 2; iter++) { + int iter_id = iter * iteration_stride + iter_index; + out_vec[output_offset + iter_id] = in_data[iter_id]; + } +#endif +} + +// 3 * [B A S N] - > [B S C*H] +template <> +void launch_transform4d_0213(float* out, + const float* in, + int batch_size, + int heads, + int seq_length, + int hidden_dim, + cudaStream_t stream, + int trans_count) +{ + hidden_dim >>= 2; + dim3 grid_dims(batch_size, heads * ((seq_length - 1) / 8 + 1), trans_count); + dim3 block_dims(hidden_dim / heads, 8); + transform4d_0213 + <<>>(out, in, heads, seq_length, hidden_dim, 1); +} + +template <> +void launch_transform4d_0213<__half>(__half* out, + const __half* in, + int batch_size, + int heads, + int seq_length, + int hidden_dim, + cudaStream_t stream, + int trans_count) +{ + hidden_dim >>= 3; + if (hidden_dim > 128 || hidden_dim < 16) { + int head_ext = (hidden_dim - 1) / MAX_THREADS + 1; + dim3 grid_dims(batch_size, trans_count, (seq_length * head_ext)); + dim3 block_dims(hidden_dim / heads, (heads / head_ext)); + transform4d_0213<__half><<>>( + out, in, heads, seq_length, hidden_dim, head_ext); + } else { + dim3 grid_dims(batch_size, seq_length / 2); + dim3 block_dims(hidden_dim / heads, heads, trans_count); + transform4d_0213_v2<<>>( + out, in, heads, seq_length, hidden_dim); + } +} diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jd-1119.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jd-1119.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..8e23c4a4051b50c2a5dbe0b93f4619bbed92b9f3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/api-v1-jd-1119.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c41e5fbb3e59cd4de881ed7c8f88f9b03a750d537ba63581cafde6aafd77adc1 +size 711 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/data-v1-dl-54002.arff.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/data-v1-dl-54002.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..8f610044b5cc550df4d4ef18cd2131306dba05be --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1119/data-v1-dl-54002.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6931af256195fcdd2e47dd8b0f9edf16fbf03b198e77b70e3dfd9877cdf09515 +size 1190 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jd-3.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jd-3.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..65982d59860e015a25a42d8bb57f72bf327c9e0b --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jd-3.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:066a216679b197cc51946e17ee9a2e28215425991b0ceb7f10988c14f7f3f869 +size 2473 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jdf-3.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jdf-3.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..faf70da9cea25d998883721d679ca9f0030d9575 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jdf-3.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec4f2d6bc4df3882b08bba01571e0792a56f79e0a922d984897773acd284b426 +size 535 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jdq-3.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jdq-3.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..24537ca9b1e5187b37136b19898ab370dec315d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_3/api-v1-jdq-3.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09ef19cfad25c5de487ddbaef3c4d068ca3063777730a288dfd6f5096a0c6f46 +size 1407 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jdl-dn-glass2-l-2-dv-1.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jdl-dn-glass2-l-2-dv-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..577840cd46f47e22c75975d855fe21c9b997ee22 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40675/api-v1-jdl-dn-glass2-l-2-dv-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ad0a4a5477605380f8819ce840dbb928a3d084267c512f6cb50d5be2f7c76bc2 +size 85 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/api-v1-jd-40945.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/api-v1-jd-40945.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..06446ec67eeede9b6d48f044d8ae402fe11bb90e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/api-v1-jd-40945.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:02882c6b02c4e068ef2b16f37f33ae3d5e9dd17ca29d01662c6924e16427eb5d +size 437 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/api-v1-jdf-40945.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/api-v1-jdf-40945.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..24e0e87d484661242d46a4cf18e2e6695736fa26 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/api-v1-jdf-40945.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:95f0938dfdf1b87d0ffc4d526f2c91e097ef7689480b693970126d908f291030 +size 320 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/api-v1-jdq-40945.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/api-v1-jdq-40945.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..adb1b0a58ae958ab00a906b0287f416a4ab48ace --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/api-v1-jdq-40945.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9c7e5a46554ab6a8121832dc0cd9f7a60f5034cef1a5a7d61346bbd912516b54 +size 1042 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/data-v1-dl-16826755.arff.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/data-v1-dl-16826755.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..5d8d5ae4fd5692b26e281928f6a1baad008f2008 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40945/data-v1-dl-16826755.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:516e961f519876e5f89b339a0364a08dd64160ac3a4d76d5ec62955bfd6d6ce5 +size 32243