diff --git a/.gitattributes b/.gitattributes index 307ea240cc444a1ecc687b9c045ff6022ffcf1d0..1a32496470ba954b28750c02fa8506398d3b65c9 100644 --- a/.gitattributes +++ b/.gitattributes @@ -59,3 +59,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.webm filter=lfs diff=lfs merge=lfs -text venv/bin/python3.10 filter=lfs diff=lfs merge=lfs -text venv/bin/python3 filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/numexpr/interpreter.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/sklearn/_loss/_loss.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1d16b484a56031ff83cc731082af7d512edbce85 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .bias_activations import * +from .blas_kernels import * +from .cuda_layer_norm import * +from .cuda_rms_norm import * +from .gated_activations import * +from .cuda_linear import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ffcce5aa740d18728d40afc4d2d9a060b22d1cde Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ea7f8a7d19965caf6d622d226fde6e4e5d0fd9ff --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .bias_activation import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec9eacb2fb712ec933fcb0cf4d647962057f7990 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/__pycache__/bias_activation.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/__pycache__/bias_activation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5754b2ba9ae5a1ea65be0f087dcd1e8169329a86 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/__pycache__/bias_activation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/bias_activation.cpp b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/bias_activation.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4f0cc9cbd77c123f7183741c34c0d865009d2cbf --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/bias_activation.cpp @@ -0,0 +1,68 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "bias_activation.h" +#include +#include "ds_kernel_utils.h" + +#ifdef BF16_AVAILABLE +#define DTYPE_SWITCH(DTYPE, ...) \ + [&] { \ + if (DTYPE == torch::kFloat16) { \ + using scalar_t = __half; \ + return __VA_ARGS__(); \ + } else if (DTYPE == torch::kBFloat16) { \ + using scalar_t = __nv_bfloat16; \ + return __VA_ARGS__(); \ + } else { \ + TORCH_CHECK(false, "Unsupported dtype for BiasActivation"); \ + } \ + }() +#else +#define DTYPE_SWITCH(DTYPE, ...) \ + [&] { \ + if (DTYPE == torch::kFloat16) { \ + using scalar_t = __half; \ + return __VA_ARGS__(); \ + } else { \ + TORCH_CHECK(false, "Unsupported dtype for BiasActivation"); \ + } \ + }() +#endif + +/* +In-place bias and activation fusion kernel. +*/ +void bias_activation(torch::Tensor& activation, + c10::optional& bias, + const int32_t act_type) +{ + const ActivationType atype = static_cast(act_type); + const int32_t rows = activation.size(0); + const int32_t cols = activation.size(1); + + TORCH_CHECK(atype == ActivationType::GELU || atype == ActivationType::RELU || + atype == ActivationType::SILU || atype == ActivationType::IDENTITY, + "Unsupported activation type for BiasActivation"); + TORCH_CHECK(activation.dim() == 2, "BiasActivation only supports 2D activation tensors"); + + DTYPE_SWITCH(activation.scalar_type(), [&] { + scalar_t* activation_ptr = reinterpret_cast(activation.data_ptr()); + + const scalar_t* bias_ptr; + if (bias.has_value()) { + TORCH_CHECK(activation.scalar_type() == bias.value().scalar_type(), + "BiasActivation activation and bias must have same dtype"); + bias_ptr = reinterpret_cast(bias.value().data_ptr()); + } else { + bias_ptr = nullptr; + } + + if (atype == ActivationType::IDENTITY && bias_ptr == nullptr) { return; } + + launch_bias_activation( + activation_ptr, bias_ptr, rows, cols, atype, c10::cuda::getCurrentCUDAStream()); + }); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/bias_activation.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/bias_activation.h new file mode 100644 index 0000000000000000000000000000000000000000..db6174633a092aeb81fb86dd47c1036e3a9c5fcb --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/bias_activation.h @@ -0,0 +1,22 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include "activation_type.h" + +template +void launch_bias_activation(T* activation, + const T* bias, + const int32_t n_rows, + const int32_t n_cols, + const ActivationType activation_type, + cudaStream_t stream); + +void bias_activation(torch::Tensor& activation, + c10::optional& bias, + const int32_t activation_type); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/bias_activation.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/bias_activation.py new file mode 100644 index 0000000000000000000000000000000000000000..436d7f8805d5c4df552d239a5a64b620bd57eea9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/bias_activation.py @@ -0,0 +1,62 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Optional + +import torch + +from ....inference_utils import ActivationType, DtypeEnum +from deepspeed.ops.op_builder import InferenceCoreBuilder +from ... import DSKernelBase + + +class CUDABiasActivation(DSKernelBase): + """ + CUDA implementation of bias activation kernel. This kernel should be deprecated once + we are fusing the bias activation into the linear kernel in all scenarios. + """ + + supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16] + supported_act_fns = [ActivationType.IDENTITY, ActivationType.GELU, ActivationType.RELU, ActivationType.SILU] + + def __init__(self, channels: int, dtype: DtypeEnum, act_fn: ActivationType) -> None: + """ + Compile and validate for the fused bias-activation kernel. + + Parameters: + channels (int): Number of channels to expect in the activation. + dtype (torch.dtype): Data type for the input/output. Supported values + are DtypeEnum.fp16 and DtypeEnum.bf16. + act_fn (ActivationType): Activation function to use. Only IDENTITY, GELU, RELU, and SILU are supported. + """ + + if channels % 8 != 0: + raise ValueError("channels must be divisible by 8") + + if DtypeEnum(dtype) not in CUDABiasActivation.supported_dtypes: + raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format( + dtype, CUDABiasActivation.supported_dtypes)) + + act_fn = ActivationType(act_fn) + if act_fn not in CUDABiasActivation.supported_act_fns: + raise ValueError("Unsupported activation function: {}, supported_act_fns are {}".format( + act_fn, CUDABiasActivation.supported_act_fns)) + + inf_module = InferenceCoreBuilder().load() + self.kernel = inf_module.bias_activation + self.act_fn = act_fn + + def __call__(self, activation: torch.Tensor, bias: Optional[torch.Tensor] = None) -> torch.Tensor: + """ + Add an optional bias and perform the non-linear activation function. + + Parameters: + activation (torch.Tensor): Input tensor of shape [tokens, channels] + bias (torch.Tensor): Optional bias tensor of shape [channels] + + Returns: + activation that has been updated in-place + """ + self.kernel(activation, bias, self.act_fn.value) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/bias_activation_cuda.cu b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/bias_activation_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..66bca0c175c3721a924174474ead932b4c1d09ef --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/bias_activation_cuda.cu @@ -0,0 +1,140 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include "activation_type.h" +#include "conversion_utils.h" +#include "ds_kernel_utils.h" +#include "memory_access_utils.h" + +// Default activation function will error out +template +DS_D_INLINE float act_fn(float val); + +template <> +DS_D_INLINE float act_fn(float val) +{ + return val; +} + +template <> +DS_D_INLINE float act_fn(float val) +{ + return val > 0.0f ? val : 0.0f; +} + +template <> +DS_D_INLINE float act_fn(float val) +{ + constexpr float sqrt_param = 0.79788456080286535587989211986876f; + constexpr float mul_param = 0.044715f; + return val * 0.5f * (1.0f + tanhf(sqrt_param * (val + mul_param * val * val * val))); +} + +template <> +DS_D_INLINE float act_fn(float val) +{ + return val / (1.0f + expf(-val)); +} + +namespace bias_act { + +constexpr int access_size = 16; +constexpr int threads = 512; +constexpr int unroll = 4; + +} // namespace bias_act + +template +__global__ void bias_activation_kernel(T* activation, + const T* bias, + const int32_t rows, + const int32_t cols) +{ + constexpr int vector_T = bias_act::access_size / sizeof(T); + + const int32_t thread_offset = threadIdx.x * vector_T; + const int32_t block_offset = blockIdx.x * vector_T * bias_act::unroll * bias_act::threads; + const int32_t base_offset = block_offset + thread_offset; + + const int32_t thread_stride = bias_act::threads * vector_T; + +#pragma unroll + for (int i = 0; i < bias_act::unroll; i++) { + const int32_t iter_offset = base_offset + i * thread_stride; + + const int32_t row = iter_offset / cols; + + T buffer[vector_T]; + T bias_buffer[vector_T]; + + if (row < rows) { + const int32_t col = iter_offset % cols; + + mem_access::load_global(buffer, activation + iter_offset); + mem_access::load_global( + bias_buffer, bias + col, bias != nullptr); + +#pragma unroll + for (int j = 0; j < vector_T; j++) { + float val = + conversion::to(buffer[j]) + conversion::to(bias_buffer[j]); + buffer[j] = conversion::to(act_fn(val)); + } + + mem_access::store_global(activation + iter_offset, buffer); + } + } +} + +#define ACT_TYPE_SWITCH(ACT_TYPE, ...) \ + if (ACT_TYPE == ActivationType::IDENTITY) { \ + constexpr ActivationType act_fn_t = ActivationType::IDENTITY; \ + return __VA_ARGS__(); \ + } else if (ACT_TYPE == ActivationType::RELU) { \ + constexpr ActivationType act_fn_t = ActivationType::RELU; \ + return __VA_ARGS__(); \ + } else if (ACT_TYPE == ActivationType::GELU) { \ + constexpr ActivationType act_fn_t = ActivationType::GELU; \ + return __VA_ARGS__(); \ + } else if (ACT_TYPE == ActivationType::SILU) { \ + constexpr ActivationType act_fn_t = ActivationType::SILU; \ + return __VA_ARGS__(); \ + } else { \ + assert(false); \ + } + +template +void launch_bias_activation(T* activation, + const T* bias, + const int32_t n_rows, + const int32_t n_cols, + const ActivationType activation_type, + cudaStream_t stream) +{ + constexpr int32_t elems_per_block = + bias_act::threads * bias_act::unroll * bias_act::access_size / sizeof(T); + const int32_t total_elems = n_rows * n_cols; + + const int32_t blocks = (total_elems + elems_per_block - 1) / elems_per_block; + + const dim3 grid(blocks); + const dim3 block(bias_act::threads); + + ACT_TYPE_SWITCH(activation_type, [&] { + bias_activation_kernel + <<>>(activation, bias, n_rows, n_cols); + }); +} + +#define INSTANTIATE_FOR_T(T) \ + template void launch_bias_activation( \ + T*, const T*, const int32_t, const int32_t, const ActivationType, cudaStream_t); + +INSTANTIATE_FOR_T(__half); + +#ifdef BF16_AVAILABLE +INSTANTIATE_FOR_T(__nv_bfloat16); +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4af5a579ca1bb253b044c36c44b72e8cc0caf931 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .blas_linear import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc8edd12443135b93a24affef1d95d5b0cf22a3e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/__pycache__/blas_linear.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/__pycache__/blas_linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63e9ed66982c664584674d5e6fda11a3f01499f1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/__pycache__/blas_linear.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/blas.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/blas.h new file mode 100644 index 0000000000000000000000000000000000000000..1854e40a227d3fd8ed0e279435e78b887db5bf5e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/blas.h @@ -0,0 +1,138 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include +#include "blas_utils.h" + +#define DISPATCH_BLAS_MATMUL(T_TYPE, C_TYPE) \ + if (output.options().dtype() == torch::T_TYPE) { \ + blas_gemm_ex(output.data_ptr(), \ + (const void*)weights.data_ptr(), \ + (const void*)hidden_states.data_ptr(), \ + m, \ + n, \ + k, \ + lda, \ + ldb, \ + ldc, \ + trans_a, \ + trans_b, \ + &alpha, \ + &beta, \ + C_TYPE); \ + } + +void blas_linear(at::Tensor& output, at::Tensor& hidden_states, at::Tensor& weights) +{ + /* + Expected shape: output([total_tokens_across_dims], out_neurons) + hidden_states([total_tokens_across_dims], in_neurons) + weights(out_neurons, in_neurons) + + We are going to assume contiguous for the above shapes. + + The shapes are going to get messed with a little internally to handle column-major + GEMMs. + */ + + // Number of tokens is N (since the GEMM output is column-major but our Tensor + // is row-major, we need to transpose the shapes) + const int n = output.numel() / output.size(-1); + const int k = weights.size(1); + const int m = weights.size(0); + + // A strides + const bool trans_a = weights.stride(1) == 1; + const int lda = (trans_a) ? weights.stride(0) : weights.stride(1); + + // B strides + const bool trans_b = hidden_states.stride(-1) != 1; + const int ldb = (trans_b) ? hidden_states.stride(-1) : hidden_states.stride(-2); + + // C strides + const int ldc = output.stride(-2); + + const float alpha = 1.0f; + const float beta = 0.0f; + + TORCH_CHECK(output.scalar_type() == hidden_states.scalar_type(), + "Output and hidden states must have the same scalar type"); + TORCH_CHECK(output.scalar_type() == weights.scalar_type(), + "Output and weights must have the same scalar type"); + + // Dispatch the datatypes + DISPATCH_BLAS_MATMUL(kFloat, BlasType::FP32); + DISPATCH_BLAS_MATMUL(kHalf, BlasType::FP16); +#ifdef BF16_AVAILABLE + DISPATCH_BLAS_MATMUL(kBFloat16, BlasType::BF16); +#endif +} + +#define DISPATCH_4D_BLAS(T_TYPE, C_TYPE) \ + if (C.options().dtype() == torch::T_TYPE) { \ + blas_strided_batched_gemm(C.data_ptr(), \ + (const void*)A.data_ptr(), \ + (const void*)B.data_ptr(), \ + m, \ + n, \ + k, \ + lda, \ + ldb, \ + ldc, \ + trans_a, \ + trans_b, \ + &alpha, \ + &beta, \ + stride_a, \ + stride_b, \ + stride_c, \ + batch, \ + C_TYPE); \ + } + +void blas_4d_matmul(at::Tensor& C, at::Tensor& B, at::Tensor& A) +{ + /* + C shape: (batch_size, N, M) + A shape: (batch_size, N, K) + B shape: (batch_size, K, M) + */ + + const int n = C.size(-2); + const int k = C.size(-1); + const int m = B.size(-1); + + // A strides + const bool trans_a = A.stride(-1) == 1; + const int lda = (trans_a) ? A.stride(-2) : A.stride(-1); + const int stride_a = A.stride(-3); + + // B strides + const bool trans_b = B.stride(-1) != 1; + const int ldb = (trans_b) ? B.stride(-1) : B.stride(-2); + const int stride_b = B.stride(-3); + + // C strides + const int ldc = C.stride(-2); + const int stride_c = C.stride(-3); + + const float alpha = 1.0f; + const float beta = 0.0f; + + const int batch = C.numel() / (n * m); + + // Dispatch the datatypes + DISPATCH_4D_BLAS(kFloat, BlasType::FP32); + DISPATCH_4D_BLAS(kHalf, BlasType::FP16); +#ifdef BF16_AVAILABLE + DISPATCH_4D_BLAS(kBFloat16, BlasType::BF16); +#endif +} + +void create_handle() { BlasContext::getInstance().get_handle(); } diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/blas_linear.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/blas_linear.py new file mode 100644 index 0000000000000000000000000000000000000000..9a151ce36dc49b4bf92b8fe2656eb69a0a7256cf --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/blas_linear.py @@ -0,0 +1,55 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ....inference_utils import DtypeEnum +from deepspeed.ops.op_builder import InferenceCoreBuilder +from ... import DSKernelBase + + +class BlasLibLinear(DSKernelBase): + """ + Wrapper around the BLAS matmul kernel for FP16/BF16/FP32 for CUDA/RoCM. + + Performs z = x @ y + """ + + supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16, DtypeEnum.fp32] + + def __init__(self, fp_dtype: DtypeEnum): + """ + Parameters: + fp_dtype (torch.dtype): Data type for the input/output. Supported values + are torch.float16, torch.bfloat16, and torch.float32. + """ + fp_dtype = DtypeEnum(fp_dtype) + if fp_dtype not in BlasLibLinear.supported_dtypes: + raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format( + fp_dtype, BlasLibLinear.supported_dtypes)) + + self.inf_module = InferenceCoreBuilder().load() + self.inf_module.create_handle() + self.kernel = self.inf_module.blas_linear + + def __call__(self, output: torch.Tensor, hidden_states: torch.Tensor, weights: torch.Tensor) -> torch.Tensor: + """ + Matmul kernel as implemented by platform BLAS library. The input must be 2D or larger. If + n-dimensional, the leading dimensions are folded into each other: + 2D: m = x.size(0) + 3D: m = x.size(0) * x.size(1) + 4D: m = x.size(0) * x.size(1) * x.size(2) (etc...) + All inputs should be contiguous. + + Parameters: + output (torch.Tensor): Output tensor. Shape is of [*, out_features] + hidden_states (torch.Tensor): Input tensor. Shape is of [*, in_features] + weights (torch.Tensor): Input tensor. Shape is of [out_features, in_features] + + Returns: + z (torch.Tensor): Output tensor. Shape is of [m, n] + """ + self.kernel(output, hidden_states, weights) + return output diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/blas_utils.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/blas_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..c02cc76905e03d027ab27d67a1ea83ce7c98a468 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/blas_utils.h @@ -0,0 +1,275 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include +#ifdef BF16_AVAILABLE +#include +#endif +#include +#include +#ifndef __HIP_PLATFORM_AMD__ +#include +#endif +#include +#include +#include + +class BlasContext { + /* + Slim wrapper for managing the lifetime of the platform's BLAS handle. This should + be hipified for ROCm. + */ +public: + BlasContext() + { + if (cublasCreate(&_handle) != CUBLAS_STATUS_SUCCESS) { + auto message = std::string("Fail to create cublas handle."); + std::cerr << message << std::endl; + throw std::runtime_error(message); + } +#ifndef __HIP_PLATFORM_AMD__ + cublasSetMathMode(_handle, CUBLAS_TENSOR_OP_MATH); +#endif + } + + virtual ~BlasContext() { cublasDestroy(_handle); } + + static BlasContext& getInstance() + { + // Should always access the singleton through this function. + static BlasContext _instance; + return _instance; + } + + cublasHandle_t get_handle() const { return _handle; } + +private: + cublasHandle_t _handle; +}; + +enum class BlasType { FP32, FP16, BF16 }; + +#ifdef __HIP_PLATFORM_AMD__ +rocblas_operation get_trans_op(bool do_trans) +{ + return (do_trans) ? rocblas_operation_transpose : rocblas_operation_none; +} + +rocblas_datatype get_datatype(BlasType type) +{ + switch (type) { + case BlasType::FP32: return rocblas_datatype_f32_r; + case BlasType::FP16: return rocblas_datatype_f16_r; + case BlasType::BF16: return rocblas_datatype_bf16_r; + default: throw std::runtime_error("Unsupported BlasType"); + } +} +#else +cublasOperation_t get_trans_op(bool do_trans) { return (do_trans) ? CUBLAS_OP_T : CUBLAS_OP_N; } + +cublasDataType_t get_datatype(BlasType type) +{ + switch (type) { + case BlasType::FP32: return CUDA_R_32F; + case BlasType::FP16: return CUDA_R_16F; + case BlasType::BF16: return CUDA_R_16BF; + default: throw std::runtime_error("Unsupported BlasType"); + } +} +#endif + +int blas_gemm_ex(void* C, + const void* A, + const void* B, + int m, + int n, + int k, + int lda, + int ldb, + int ldc, + bool transa, + bool transb, + const float* alpha, + const float* beta, + BlasType type) +{ +#ifdef __HIP_PLATFORM_AMD__ + rocblas_operation_t transa_op = get_trans_op(transa); + rocblas_operation_t transb_op = get_trans_op(transb); + + rocblas_datatype_t abc_type = get_datatype(type); + + rocblas_status status = rocblas_gemm_ex(BlasContext::getInstance().get_handle(), + transa_op, + transb_op, + m, + n, + k, + (const void*)alpha, + A, + abc_type, + lda, + B, + abc_type, + ldb, + (const void*)beta, + C, + abc_type, + ldc, + C, + abc_type, + ldc, + rocblas_datatype_f32_r, + rocblas_gemm_algo_standard, + 0, + 0); +#else + cublasOperation_t transa_op = get_trans_op(transa); + cublasOperation_t transb_op = get_trans_op(transb); + + cublasDataType_t abc_type = get_datatype(type); + cublasStatus_t status = cublasGemmEx(BlasContext::getInstance().get_handle(), + transa_op, + transb_op, + m, + n, + k, + (const void*)alpha, + A, + abc_type, + lda, + B, + abc_type, + ldb, + (const void*)beta, + C, + abc_type, + ldc, + CUDA_R_32F, + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + +#ifdef __HIP_PLATFORM_AMD__ + if (status != rocblas_status_success) { +#else + if (status != CUBLAS_STATUS_SUCCESS) { +#endif + fprintf(stderr, + "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n", + m, + n, + k, + (int)status); + return EXIT_FAILURE; + } + return 0; +} + +int blas_strided_batched_gemm(void* C, + const void* A, + const void* B, + int m, + int n, + int k, + int lda, + int ldb, + int ldc, + bool transa, + bool transb, + const float* alpha, + const float* beta, + int stride_A, + int stride_B, + int stride_C, + int batch, + BlasType type) +{ +#ifdef __HIP_PLATFORM_AMD__ + rocblas_operation_t transa_op = get_trans_op(transa); + rocblas_operation_t transb_op = get_trans_op(transb); + + rocblas_datatype_t abc_type = get_datatype(type); + + rocblas_status status = + rocblas_gemm_strided_batched_ex(BlasContext::getInstance()::get_handle(), + transa_op, + transb_op, + m, + n, + k, + (const void*)alpha, + A, + abc_type, + lda, + stride_A, + B, + abc_type, + ldb, + stride_B, + (const void*)beta, + C, + abc_type, + ldc, + stride_C, + C, + abc_type, + ldc, + stride_C, + batch, + rocblas_datatype_f32_r, + rocblas_gemm_algo_standard, + 0, + 0); +#else + cublasOperation_t transa_op = get_trans_op(transa); + cublasOperation_t transb_op = get_trans_op(transb); + + cublasDataType_t abc_type = get_datatype(type); + + cublasStatus_t status = cublasGemmStridedBatchedEx(BlasContext::getInstance().get_handle(), + transa_op, + transb_op, + m, + n, + k, + (const void*)alpha, + A, + abc_type, + lda, + stride_A, + B, + abc_type, + ldb, + stride_B, + (const void*)beta, + C, + abc_type, + ldc, + stride_C, + batch, + CUDA_R_32F, + CUBLAS_GEMM_DEFAULT_TENSOR_OP); +#endif + +#ifdef __HIP_PLATFORM_AMD__ + if (status != rocblas_status_success) { +#else + if (status != CUBLAS_STATUS_SUCCESS) { +#endif + fprintf(stderr, + "!!!! kernel execution error. (batch: %d, m: %d, n: %d, k: %d, error: %d) \n", + batch, + m, + n, + k, + (int)status); + return EXIT_FAILURE; + } + return 0; +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/core_ops.cpp b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/core_ops.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3f36a6bf01cb01773b1584a1bb8a23b318705584 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/core_ops.cpp @@ -0,0 +1,42 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include + +#include "bias_activation.h" +#include "blas.h" +#include "gated_activation_kernels.h" +#include "layer_norm.h" +#include "linear_kernels.h" +#include "rms_norm.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + // bias_activation.h + m.def("bias_activation", &bias_activation, "DeepSpeed bias activation in CUDA"); + + // layer_norm.h + m.def("layer_norm", &ds_layer_norm, "DeepSpeed layer norm in CUDA"); + m.def("pre_layer_norm", &ds_pre_layer_norm, "DeepSpeed pre layer norm in CUDA"); + m.def("post_layer_norm", &ds_post_layer_norm, "DeepSpeed pre layer norm in CUDA"); + + // blas.h + m.def("blas_linear", &blas_linear, "Linear implemented by vendor BLAS"); + m.def("blas_4d_matmul", &blas_4d_matmul, "4D matmul implemented by vendor BLAS"); + m.def("create_handle", &create_handle, "Create a handle for vendor BLAS"); + + // gated_activation_kernels.h + m.def("gated_activation", &ds_gated_activation, "DeepSpeed gated activation in CUDA"); + + // rms_norm.h + m.def("rms_norm", &rms_norm, "DeepSpeed rms norm in CUDA"); + m.def("rms_pre_norm", &rms_pre_norm, "DeepSpeed rms pre norm in CUDA"); + + // linear_kernels.h + m.def("cuda_wf6af16_linear", &cuda_wf6af16_linear, "DeepSpeed Wf6Af16 linear in CUDA"); + m.def( + "preprocess_weight", &preprocess_weight, "preprocess the FP16 weight to be 2bit and 4 bit"); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..bed7688b15d2674d65cf653b4375db97a3851cbe --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .cuda_ln import * +from .cuda_post_ln import * +from .cuda_pre_ln import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e8c8640ffbdf6bc0d16ac6a3e1f0081c21991b9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_fp_ln_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_fp_ln_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9a1f1315dc588c0e4b82691bd5b9828cc6bde06 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_fp_ln_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_ln.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_ln.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf78394a90311c59d435c6b49bc63ee779234888 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_ln.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_post_ln.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_post_ln.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5ba1db3397d8487e7d0ed843c0788b916f2bdd4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_post_ln.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_pre_ln.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_pre_ln.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b0f52c823c66dc76916289c0701409a4d167f8a0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_pre_ln.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_fp_ln_base.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_fp_ln_base.py new file mode 100644 index 0000000000000000000000000000000000000000..3c2aa5cb5eb4553010bede7c777268757bd897d4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_fp_ln_base.py @@ -0,0 +1,37 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ... import DSKernelBase +from ....inference_utils import elem_size +from deepspeed.ops.op_builder import InferenceCoreBuilder + + +class CUDAFPLNBase(DSKernelBase): + """ + Base class for CUDA LN kernels. They all same the same validation logic, + so we can share it here. + """ + + supported_dtypes = [torch.float16, torch.bfloat16, torch.float32] + + def __init__(self, channels: int, fp_dtype: torch.dtype, epsilon: float = 1e-5): + """ + Parameters: + channels (int): Number of channels in the input tensor. Must be divisible to align + to 16 bytes. + fp_dtype (torch.dtype): Data type for the input/output/gamma. Supported values + are torch.float16, torch.bfloat16, and torch.float32. + """ + if fp_dtype not in CUDAFPLNBase.supported_dtypes: + raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format( + fp_dtype, CUDAFPLNBase.supported_dtypes)) + + if elem_size(fp_dtype) * channels % 16 != 0: + raise ValueError("channels must be divisible by 16 bytes") + + self.inf_module = InferenceCoreBuilder().load() + self.epsilon = epsilon diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_ln.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_ln.py new file mode 100644 index 0000000000000000000000000000000000000000..583736fb8bbc0ebcd07a63452ce029b002d6d937 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_ln.py @@ -0,0 +1,30 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from .cuda_fp_ln_base import CUDAFPLNBase + + +class CUDAFPLN(CUDAFPLNBase): + """ + Floating point layer norm kernel for CUDA/RoCM. + + Performs: z = ln(x) + """ + + def __call__(self, output_z: torch.Tensor, input_x: torch.Tensor, gamma: torch.Tensor, + beta: torch.Tensor) -> torch.Tensor: + """ + output_z may alias input_x directly. All Tensors should have the same shape. + + Parameters: + output_z (torch.Tensor): Output tensor. + input_x (torch.Tensor): Input tensor. + gamma (torch.Tensor): Gamma tensor. + beta (torch.Tensor): Beta tensor. + """ + self.inf_module.layer_norm(output_z, input_x, gamma, beta, self.epsilon) + return output_z diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_post_ln.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_post_ln.py new file mode 100644 index 0000000000000000000000000000000000000000..0ced1ecf207e80157e30fcb9f0b1893597d899be --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_post_ln.py @@ -0,0 +1,34 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from .cuda_fp_ln_base import CUDAFPLNBase + + +class CUDAFPPostLN(CUDAFPLNBase): + """ + Floating point post-LayerNorm kernel for CUDA/RoCM. + + Performs: z = ln(x + y) + """ + + def __call__(self, output_z: torch.Tensor, input_x: torch.Tensor, input_y: torch.Tensor, gamma: torch.Tensor, + beta: torch.Tensor) -> torch.Tensor: + """ + Either input_x or input_y can alias output_z. + + Parameters: + output_z (torch.Tensor): Output tensor. + input_x (torch.Tensor): Input tensor. + input_y (torch.Tensor): Input tensor. + gamma (torch.Tensor): Gamma tensor. + beta (torch.Tensor): Beta tensor. + + Returns: + output (torch.Tensor): Output tensor. + """ + self.inf_module.post_layer_norm(output_z, input_x, input_y, gamma, beta, self.epsilon) + return output_z diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_pre_ln.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_pre_ln.py new file mode 100644 index 0000000000000000000000000000000000000000..74b2d9cf58801c6273ce1647091752ebae3e147d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_pre_ln.py @@ -0,0 +1,39 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Tuple + +import torch + +from .cuda_fp_ln_base import CUDAFPLNBase + + +class CUDAFPPreLN(CUDAFPLNBase): + """ + Floating point pre-LayerNorm kernel for CUDA/RoCM. + + Performs: z_res = x_res + y_hid + z_hid = ln(z_hid) + """ + + def __call__(self, z_res: torch.Tensor, z_hid: torch.Tensor, x_res: torch.Tensor, y_hid: torch.Tensor, + gamma: torch.Tensor, beta: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + z_res can alias x_res. All non-parameter input/output tensors + must have the same shape. z_hid can alias y_hid. + + Parameters: + z_res (torch.Tensor): Output residual. + z_hid (torch.Tensor): Output hidden states. + x_res (torch.Tensor): Input residual. + y_hid (torch.Tensor): Input hidden states. + gamma (torch.Tensor): Gamma tensor. + beta (torch.Tensor): Beta tensor. + + Returns: + output (torch.Tensor): Output tensor. + """ + self.inf_module.pre_layer_norm(z_res, z_hid, x_res, y_hid, gamma, beta, self.epsilon) + return z_res, z_hid diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.cpp b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b2c95d410a1f2354a45724b44064ff26e2321ccf --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.cpp @@ -0,0 +1,102 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "layer_norm.h" + +#define DISPATCH_LAYER_NORM(T_TYPE, C_TYPE) \ + if (input.options().dtype() == torch::T_TYPE) { \ + launch_fused_ln((C_TYPE*)output.data_ptr(), \ + (const C_TYPE*)input.data_ptr(), \ + (const C_TYPE*)gamma.data_ptr(), \ + (const C_TYPE*)beta.data_ptr(), \ + epsilon, \ + rows, \ + elems_per_row, \ + at::cuda::getCurrentCUDAStream()); \ + } + +void ds_layer_norm(at::Tensor& output, + at::Tensor& input, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon) +{ + bool ragged_input = input.dim() == 2; + + const int rows = ragged_input ? input.size(0) : input.size(0) * input.size(1); + const int elems_per_row = ragged_input ? input.size(1) : input.size(2); + + DISPATCH_LAYER_NORM(kFloat, float); + DISPATCH_LAYER_NORM(kHalf, __half); +#ifdef BF16_AVAILABLE + DISPATCH_LAYER_NORM(kBFloat16, __nv_bfloat16); +#endif +} + +#define DISPATCH_LAYER_NORM_RESIDUAL(T_TYPE, C_TYPE) \ + if (input.options().dtype() == torch::T_TYPE) { \ + launch_fused_post_ln((C_TYPE*)output.data_ptr(), \ + (const C_TYPE*)input.data_ptr(), \ + (const C_TYPE*)residual.data_ptr(), \ + (const C_TYPE*)gamma.data_ptr(), \ + (const C_TYPE*)beta.data_ptr(), \ + epsilon, \ + rows, \ + elems_per_row, \ + at::cuda::getCurrentCUDAStream()); \ + } + +void ds_post_layer_norm(at::Tensor& output, + at::Tensor& input, + at::Tensor& residual, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon) +{ + bool ragged_input = input.dim() == 2; + + const int rows = ragged_input ? input.size(0) : input.size(0) * input.size(1); + const int elems_per_row = ragged_input ? input.size(1) : input.size(2); + + DISPATCH_LAYER_NORM_RESIDUAL(kFloat, float); + DISPATCH_LAYER_NORM_RESIDUAL(kHalf, __half); +#ifdef BF16_AVAILABLE + DISPATCH_LAYER_NORM_RESIDUAL(kBFloat16, __nv_bfloat16); +#endif +} + +#define DISPATCH_PRE_LAYER_NORM_RESIDUAL(T_TYPE, C_TYPE) \ + if (input.options().dtype() == torch::T_TYPE) { \ + launch_fused_pre_ln((C_TYPE*)norm_output.data_ptr(), \ + (C_TYPE*)res_output.data_ptr(), \ + (const C_TYPE*)input.data_ptr(), \ + (const C_TYPE*)residual.data_ptr(), \ + (const C_TYPE*)gamma.data_ptr(), \ + (const C_TYPE*)beta.data_ptr(), \ + epsilon, \ + rows, \ + elems_per_row, \ + at::cuda::getCurrentCUDAStream()); \ + } + +void ds_pre_layer_norm(at::Tensor& res_output, + at::Tensor& norm_output, + at::Tensor& input, + at::Tensor& residual, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon) +{ + bool ragged_input = input.dim() == 2; + + const int rows = ragged_input ? input.size(0) : input.size(0) * input.size(1); + const int elems_per_row = ragged_input ? input.size(1) : input.size(2); + + DISPATCH_PRE_LAYER_NORM_RESIDUAL(kFloat, float); + DISPATCH_PRE_LAYER_NORM_RESIDUAL(kHalf, __half); +#ifdef BF16_AVAILABLE + DISPATCH_PRE_LAYER_NORM_RESIDUAL(kBFloat16, __nv_bfloat16); +#endif +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..9ea3a8c4252415953b1f30208257fdc52075063a --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.h @@ -0,0 +1,67 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include "ds_kernel_utils.h" + +/* +Kernel launch methods for layer norm variants. +*/ + +template +void launch_fused_ln(T* output, + const T* vals, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream); + +template +void launch_fused_post_ln(T* output, + const T* vals, + const T* residual, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream); +template +void launch_fused_pre_ln(T* norm_output, + T* res_output, + const T* vals, + const T* residual, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream); + +void ds_layer_norm(at::Tensor& output, + at::Tensor& input, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon); + +void ds_post_layer_norm(at::Tensor& output, + at::Tensor& input, + at::Tensor& residual, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon); + +void ds_pre_layer_norm(at::Tensor& res_output, + at::Tensor& norm_output, + at::Tensor& input, + at::Tensor& residual, + at::Tensor& gamma, + at::Tensor& beta, + float epsilon); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm_cuda.cu b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..15f52c46622b5f4aca534dc8e6cb749819846d92 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm_cuda.cu @@ -0,0 +1,490 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "conversion_utils.h" +#include "ds_kernel_utils.h" +#include "memory_access_utils.h" +#include "reduction_utils.h" + +namespace cg = cooperative_groups; +using rop = reduce::ROpType; + +namespace ln { +constexpr int granularity = 16; +} // namespace ln + +/* +Regular layer norm implementation. Assumes elems_per_row % 8 +is equal to 0. + +Args: + output: buffer for output data + vals: buffer for input data + gamma: gain for normalization + beta: bias for normalization + epsilon: numeric stability + elems_per_row: number of elements each block will normalize +*/ +template +__global__ void fused_ln(T* output, + const T* vals, + const T* gamma, + const T* beta, + float epsilon, + int elems_per_row) +{ + constexpr int T_per_load = ln::granularity / sizeof(T); + + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + // X-dimension of the block + const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) + + (tb.thread_index().y * elems_per_row); + const int thread_offset = tb.thread_index().x * T_per_load; + const int base_offset = block_offset + thread_offset; + const int stride = blockDim.x * T_per_load; + + float sum = reduce::init(); + + const T* input_base = vals + base_offset; + + T local_buffer[unRoll * T_per_load]; + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + + mem_access::load_global( + iteration_buffer, input_base + i * stride, thread_offset + i * stride < elems_per_row); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + float vals_up_cast = conversion::to(iteration_buffer[j]); + sum = reduce::element(sum, vals_up_cast); + } + } + + reduce::partitioned_block(tb, warp, sum); + const float mean = sum / elems_per_row; + + float mean_diff = reduce::init(); + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + // Using a 0 value here skews the variance, have to if-guard + if (thread_offset + i * stride < elems_per_row) { + float diff = (conversion::to(local_buffer[i * T_per_load + j]) - mean); + mean_diff = reduce::element(mean_diff, diff * diff); + } + } + } + + reduce::partitioned_block(tb, warp, mean_diff); + const float variance = mean_diff / elems_per_row; + const float denom = __frsqrt_rn(variance + epsilon); + + T* block_output = output + block_offset; + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + const int iter_idx = i * stride + thread_offset; + const bool do_loads = iter_idx < elems_per_row; + + T gamma_local[T_per_load], beta_local[T_per_load]; + + mem_access::load_global(gamma_local, gamma + iter_idx, do_loads); + mem_access::load_global(beta_local, beta + iter_idx, do_loads); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + float val = conversion::to(iteration_buffer[j]); + val = (val - mean) * denom; + val = + val * conversion::to(gamma_local[j]) + conversion::to(beta_local[j]); + iteration_buffer[j] = conversion::to(val); + } + + if (do_loads) { + mem_access::store_global(block_output + iter_idx, iteration_buffer); + } + } +} + +#define LAUNCH_FUSED_LN(unRollFactor, threadsPerGroup, maxThreads) \ + fused_ln \ + <<>>(output, vals, gamma, beta, epsilon, elems_per_row); + +template +void launch_fused_ln(T* output, + const T* vals, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream) +{ + // 8 for __half, 4 for float + constexpr int T_per_load = ln::granularity / sizeof(T); + + constexpr int maxThreads = 256; + + // For Flaoat, unRoll 4, for __half, unRoll 2 + constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2; + + const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step); + const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads; + + const int groups_per_block_max = + is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1; + const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max; + const int groups_launch = (groups_per_block + rows - 1) / groups_per_block; + + dim3 block(threadsPerGroup, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threadsPerGroup * h_per_step; + const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step; + + if (is_subblock_schedule) { + // <=128 + if (threadsPerGroup == 1) { + LAUNCH_FUSED_LN(1, 1, maxThreads); + } else if (threadsPerGroup == 2) { + LAUNCH_FUSED_LN(1, 2, maxThreads); + } else if (threadsPerGroup == 4) { + LAUNCH_FUSED_LN(1, 4, maxThreads); + } else if (threadsPerGroup == 8) { + LAUNCH_FUSED_LN(1, 8, maxThreads); + } else if (threadsPerGroup == 16) { + LAUNCH_FUSED_LN(1, 16, maxThreads); + } + } else if (external_unRoll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_FUSED_LN(1 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 2) { + // 4097 - 8192 elems + LAUNCH_FUSED_LN(2 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 3) { + // 8193 - 12288 elems + LAUNCH_FUSED_LN(3 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 4) { + // 12289 - 16384 elems + LAUNCH_FUSED_LN(4 * internal_unRoll, maxThreads, maxThreads); + } +} + +#define INSTANTIATE_FUSED_LN(T) \ + template void launch_fused_ln(T*, const T*, const T*, const T*, float, int, int, cudaStream_t); + +INSTANTIATE_FUSED_LN(__half); +#ifdef BF16_AVAILABLE +INSTANTIATE_FUSED_LN(__nv_bfloat16); +#endif +INSTANTIATE_FUSED_LN(float); + +/* +Fused resiual + bias + layer norm implementation. Assumes elems_per_row % 8 +is equal to 0. + +TODO(cmikeh2): Goal is to deprecate this implementation. The bias + residual +need to be fused into compute-bound producer operations. + +Args: + output: buffer for output data + res_output: output of residual addition + vals: buffer for input data + residual: residual data + bias: bias of of input data + gamma: gain for normalization + beta: bias for normalization + epsilon: numeric stability + elems_per_row: number of elements each block will normalize +Template arg: + StoreResidual: controls whether the residual calculation is stored + or not. When set to false, the input `res_output` is unused. +*/ +template +__global__ void fused_residual_ln(T* output, + T* res_output, + const T* vals, + const T* residual, + const T* gamma, + const T* beta, + float epsilon, + int elems_per_row) +{ + constexpr int T_per_load = ln::granularity / sizeof(T); + + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + // X-dimension of the block + const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) + + (tb.thread_index().y * elems_per_row); + const int thread_offset = tb.thread_index().x * T_per_load; + const int base_offset = block_offset + thread_offset; + const int stride = tb.size() * T_per_load; + + float sum = reduce::init(); + + const T* input_base = vals + base_offset; + const T* residual_base = residual + base_offset; + + T local_buffer[unRoll * T_per_load]; + + // Unlike a vanilla layernorm, since we're fusing the two adds as well + // an inner unRoll seems to be less valuable. If anything, a double unRoll + // makes the most sense if we find we are having performance issues. +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + T residual_buffer[T_per_load]; + T bias_buffer[T_per_load]; + + mem_access::load_global( + iteration_buffer, input_base + i * stride, thread_offset + i * stride < elems_per_row); + mem_access::load_global(residual_buffer, + residual_base + i * stride, + thread_offset + i * stride < elems_per_row); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + float vals_up_cast = conversion::to(iteration_buffer[j]); + float res_up_cast = conversion::to(residual_buffer[j]); + vals_up_cast += res_up_cast; + sum = reduce::element(sum, vals_up_cast); + iteration_buffer[j] = conversion::to(vals_up_cast); + } + + if (preLnResidual && (thread_offset + i * stride < elems_per_row)) { + mem_access::store_global(res_output + base_offset + i * stride, + iteration_buffer); + } + } + + reduce::partitioned_block(tb, warp, sum); + const float mean = sum / elems_per_row; + + float mean_diff = reduce::init(); +#pragma unRoll + for (int i = 0; i < unRoll; i++) { +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + // Using a 0 value here skews the variance, have to if-guard + if (thread_offset + i * stride < elems_per_row) { + float diff = (conversion::to(local_buffer[i * T_per_load + j]) - mean); + mean_diff = reduce::element(mean_diff, diff * diff); + } + } + } + + reduce::partitioned_block(tb, warp, mean_diff); + const float variance = mean_diff / elems_per_row; + const float denom = __frsqrt_rn(variance + epsilon); + + T* block_output = output + block_offset; + +#pragma unRoll + for (int i = 0; i < unRoll; i++) { + T* iteration_buffer = local_buffer + i * T_per_load; + const int iter_idx = i * stride + thread_offset; + const bool do_loads = iter_idx < elems_per_row; + + T gamma_local[T_per_load], beta_local[T_per_load]; + + mem_access::load_global(gamma_local, gamma + iter_idx, do_loads); + mem_access::load_global(beta_local, beta + iter_idx, do_loads); + +#pragma unRoll + for (int j = 0; j < T_per_load; j++) { + float val = conversion::to(iteration_buffer[j]); + val = (val - mean) * denom; + val = + val * conversion::to(gamma_local[j]) + conversion::to(beta_local[j]); + iteration_buffer[j] = conversion::to(val); + } + + if (do_loads) { + mem_access::store_global(block_output + iter_idx, iteration_buffer); + } + } +} + +// TODO(cmikeh2): There's a bunch of redundancy here that needs to be removed/simplified. +#define LAUNCH_FUSED_RES_LN(unRollFactor, threadsPerGroup, maxThreads) \ + fused_residual_ln \ + <<>>( \ + output, nullptr, vals, residual, gamma, beta, epsilon, elems_per_row); + +template +void launch_fused_post_ln(T* output, + const T* vals, + const T* residual, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream) +{ + // 8 for __half, 4 for float + constexpr int T_per_load = ln::granularity / sizeof(T); + + constexpr int maxThreads = 256; + + // For Flaoat, unRoll 4, for __half, unRoll 2 + constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2; + + const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step); + const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads; + + const int groups_per_block_max = + is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1; + const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max; + const int groups_launch = (groups_per_block + rows - 1) / groups_per_block; + + dim3 block(threadsPerGroup, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threadsPerGroup * h_per_step; + const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step; + + if (is_subblock_schedule) { + // <=128 + if (threadsPerGroup == 1) { + LAUNCH_FUSED_RES_LN(1, 1, maxThreads); + } else if (threadsPerGroup == 2) { + LAUNCH_FUSED_RES_LN(1, 2, maxThreads); + } else if (threadsPerGroup == 4) { + LAUNCH_FUSED_RES_LN(1, 4, maxThreads); + } else if (threadsPerGroup == 8) { + LAUNCH_FUSED_RES_LN(1, 8, maxThreads); + } else if (threadsPerGroup == 16) { + LAUNCH_FUSED_RES_LN(1, 16, maxThreads); + } + } else if (external_unRoll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_FUSED_RES_LN(1 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 2) { + // 4097 - 8192 elems + LAUNCH_FUSED_RES_LN(2 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 3) { + // 8193 - 12288 elems + LAUNCH_FUSED_RES_LN(3 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 4) { + // 12289 - 16384 elems + LAUNCH_FUSED_RES_LN(4 * internal_unRoll, maxThreads, maxThreads); + } +} + +#define LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(unRollFactor, threadsPerGroup, maxThreads) \ + fused_residual_ln \ + <<>>( \ + norm_output, res_output, vals, residual, gamma, beta, epsilon, elems_per_row); + +template +void launch_fused_pre_ln(T* norm_output, + T* res_output, + const T* vals, + const T* residual, + const T* gamma, + const T* beta, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream) +{ + // 8 for __half, 4 for float + constexpr int T_per_load = ln::granularity / sizeof(T); + + constexpr int maxThreads = 256; + + // For Flaoat, unRoll 4, for __half, unRoll 2 + constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2; + + const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step); + const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads; + + const int groups_per_block_max = + is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1; + const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max; + const int groups_launch = (groups_per_block + rows - 1) / groups_per_block; + + dim3 block(threadsPerGroup, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threadsPerGroup * h_per_step; + const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step; + + if (is_subblock_schedule) { + // <=128 + if (threadsPerGroup == 1) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 1, maxThreads); + } else if (threadsPerGroup == 2) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 2, maxThreads); + } else if (threadsPerGroup == 4) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 4, maxThreads); + } else if (threadsPerGroup == 8) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 8, maxThreads); + } else if (threadsPerGroup == 16) { + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 16, maxThreads); + } + } else if (external_unRoll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 2) { + // 4097 - 8192 elems + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(2 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 3) { + // 8193 - 12288 elems + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(3 * internal_unRoll, maxThreads, maxThreads); + } else if (external_unRoll == 4) { + // 12289 - 16384 elems + LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(4 * internal_unRoll, maxThreads, maxThreads); + } +} + +#define INSTANTIATE_RES_LN(T) \ + template void launch_fused_post_ln( \ + T*, const T*, const T*, const T*, const T*, float, int, int, cudaStream_t); + +#define INSTANTIATE_PRE_LN_RES(T) \ + template void launch_fused_pre_ln( \ + T*, T*, const T*, const T*, const T*, const T*, float, int, int, cudaStream_t); + +INSTANTIATE_RES_LN(__half); +INSTANTIATE_RES_LN(float); +#ifdef BF16_AVAILABLE +INSTANTIATE_RES_LN(__nv_bfloat16); +#endif + +INSTANTIATE_PRE_LN_RES(__half); +INSTANTIATE_PRE_LN_RES(float); +#ifdef BF16_AVAILABLE +INSTANTIATE_PRE_LN_RES(__nv_bfloat16); +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cd08409c0a7a51e595c63f419e3e55b1fe787e1b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .cuda_linear import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c686ed2fe3eafdd92aa3c5d5059b0bac4763fbf Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/__pycache__/cuda_linear.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/__pycache__/cuda_linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..657edf50dca99e7d62d38441db15f05aa71d35bc Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/__pycache__/cuda_linear.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/cuda_linear.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/cuda_linear.py new file mode 100644 index 0000000000000000000000000000000000000000..69aa9e8920e28cad8236b18ca8153c6608547008 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/cuda_linear.py @@ -0,0 +1,207 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ....inference_utils import DtypeEnum +from ....logging import inference_logger +from deepspeed.ops.op_builder import InferenceCoreBuilder +from ... import DSKernelBase + + +class CUDAWf6Af16Linear(DSKernelBase): + """ + Wrapper around the CUDA kernel of Wf6Af16 quantized linear. + + Performs z = x @ y + """ + supported_dtypes = [DtypeEnum.fp16] + + def __init__(self): + self.inf_module = InferenceCoreBuilder().load() + self.inf_module.create_handle() + self.kernel = self.inf_module.cuda_wf6af16_linear + # The split_k_map is profiled on A100-80G GPU for some common shapes. + # It is an array of dictionaries, where the array index is the tokens chunk id. + # The dictionary is the mapping from the output channel to the split-K size. + self.split_k_map = [ + { # tokens: [1, 64] + 3072: 18, + 4096: 13, + 5120: 10, + 6144: 9, + 8192: 6, + 10240: 5, + 14336: 7, + 28672: 7, + 57344: 7 + }, + { # tokens: [65:128] + 3072: 9, + 4096: 6, + 5120: 5, + 6144: 9, + 8192: 3, + 10240: 5, + 14336: 7, + 28672: 7, + 57344: 6 + }, + { # tokens: [129:192] + 3072: 6, + 4096: 4, + 5120: 7, + 6144: 3, + 8192: 2, + 10240: 5, + 14336: 5, + 28672: 5, + 57344: 4 + }, + { # tokens: [193:256] + 3072: 9, + 4096: 3, + 5120: 5, + 6144: 2, + 8192: 5, + 10240: 4, + 14336: 8, + 28672: 6, + 57344: 4 + }, + { # tokens: [257:320] + 3072: 7, + 4096: 5, + 5120: 2, + 6144: 5, + 8192: 4, + 10240: 1, + 14336: 3, + 28672: 3, + 57344: 4 + }, + { # tokens: [321:384] + 3072: 3, + 4096: 2, + 5120: 5, + 6144: 3, + 8192: 1, + 10240: 8, + 14336: 3, + 28672: 4, + 57344: 3 + }, + { # tokens: [385:448] + 3072: 5, + 4096: 7, + 5120: 3, + 6144: 5, + 8192: 7, + 10240: 3, + 14336: 1, + 28672: 1, + 57344: 3 + }, + { # tokens: [449:512] + 3072: 2, + 4096: 5, + 5120: 4, + 6144: 1, + 8192: 5, + 10240: 2, + 14336: 6, + 28672: 4, + 57344: 1 + }, + { # tokens: [513:576] + 3072: 2, + 4096: 3, + 5120: 1, + 6144: 1, + 8192: 3, + 10240: 3, + 14336: 3, + 28672: 1, + 57344: 1 + }, + { # tokens: [577:640] + 3072: 5, + 4096: 4, + 5120: 1, + 6144: 4, + 8192: 2, + 10240: 1, + 14336: 1, + 28672: 1, + 57344: 1 + }, + { # tokens: [641:704] + 3072: 3, + 4096: 1, + 5120: 2, + 6144: 2, + 8192: 1, + 10240: 2, + 14336: 1, + 28672: 1, + 57344: 1 + }, + { # tokens: [705:768] + 3072: 3, + 4096: 1, + 5120: 3, + 6144: 2, + 8192: 1, + 10240: 1, + 14336: 1, + 28672: 1, + 57344: 1 + } + ] + + def __call__(self, output: torch.Tensor, hidden_states: torch.Tensor, weights_2bit: torch.Tensor, + weights_4bit: torch.Tensor, scale: torch.Tensor, out_channels, tokens, in_channels) -> torch.Tensor: + """ + Matmul kernel of FP6 weight-only quantized linear. All inputs should be contiguous. + It does not support batched-matmul. + + Parameters: + output (torch.Tensor): Output tensor. Shape is of [token_number, out_features] + hidden_states (torch.Tensor): Input tensor. Shape is of [token_number, in_features] + weights_2bit (torch.Tensor): Input tensor of the 2-bit slice. Shape is of [out_features*2/8, in_features] + weights_4bit (torch.Tensor): Input tensor of the 4-bit slice. Shape is of [out_features*4/8, in_features] + scale (torch.Tensor): Input tensor. Shape is of [out_features], since the scale is per output channel + out_channels (int): The number of output channels + tokens (int): The number of tokens + in_channels (int): The number of input channels + """ + + if out_channels % 256 != 0 or in_channels % 64 != 0: + raise ValueError("The out and in channel should be multiple of 256 and 64 respectively.") + + # TODO: add a more general heuristic to determine the split-K. + split_k = -1 # not initialized + if tokens <= 768: + # Try to find the split-K from the pre-profiled map. + tokens_chunk_id = (tokens - 1) // 64 + split_k = self.split_k_map[tokens_chunk_id].get(out_channels, -1) + if split_k == -1: + split_k = 1 + inference_logger().warning( + f"The split-K setting may be suboptimal for shape {tokens}x{in_channels}x{out_channels}...") + + workspace = self.get_workspace(out_channels, tokens, in_channels, split_k, torch.float, hidden_states.device) + self.kernel(output, hidden_states, weights_2bit, weights_4bit, scale, workspace, out_channels, tokens, + in_channels, split_k) + + def get_workspace(self, out_channels: int, tokens: int, in_channels: int, split_k: int, dtype, + device) -> torch.Tensor: + """ + Allocate workspace for the kernel. The workspace is used to store the intermediate results of the matmul before + split-K. The split-K size is determined by the size of the matmul. + """ + workspace = torch.empty((split_k, out_channels, tokens), dtype=dtype, device=device) + + return workspace diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/kernel_reduction.cuh b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/kernel_reduction.cuh new file mode 100644 index 0000000000000000000000000000000000000000..c417e6a46a7c2bda487f20ca92dbe5c02c185ec1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/kernel_reduction.cuh @@ -0,0 +1,43 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +// This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112 + +#ifndef DEEPSPEED_CUDA_LINEAR_KERNEL_REDUCTION_CUH +#define DEEPSPEED_CUDA_LINEAR_KERNEL_REDUCTION_CUH + +#include +#include +#include + +#define REDUCTION_ELEMENT_PER_THREADBLOCK 256 +#define HALF_PER_128BIT 8 + +__global__ void SplitK_Reduction(half* C, + float* Reduction_Workspace, + size_t M_Global, + size_t N_Global, + int Split_K) +{ + half* WARP_GPTR_C = C + REDUCTION_ELEMENT_PER_THREADBLOCK * blockIdx.x; + float* WARP_GPTR_R = Reduction_Workspace + REDUCTION_ELEMENT_PER_THREADBLOCK * blockIdx.x; + half* THREAD_GPTR_C = WARP_GPTR_C + threadIdx.x * HALF_PER_128BIT; + float* THREAD_GPTR_R = WARP_GPTR_R + threadIdx.x * HALF_PER_128BIT; + // Initializing Thread-Local Results + float Results[HALF_PER_128BIT]; +#pragma unroll + for (int i = 0; i < HALF_PER_128BIT; i++) Results[i] = 0.0f; + // Reduction + for (int i = 0; i < Split_K; i++) { +#pragma unroll + for (int j = 0; j < HALF_PER_128BIT; j++) Results[j] += THREAD_GPTR_R[j]; + THREAD_GPTR_R += M_Global * N_Global; + } +// Writing to global memory +#pragma unroll + for (int i = 0; i < HALF_PER_128BIT; i++) THREAD_GPTR_C[i] = __float2half_rn(Results[i]); +} + +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/ptx_cp.async.cuh b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/ptx_cp.async.cuh new file mode 100644 index 0000000000000000000000000000000000000000..39874e023539a6dbe33075165315bd993b1d5432 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/ptx_cp.async.cuh @@ -0,0 +1,71 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +// This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112 + +#ifndef DEEPSPEED_CUDA_LINEAR_PTX_CP_ASYNC_CUH +#define DEEPSPEED_CUDA_LINEAR_PTX_CP_ASYNC_CUH + +#include +#include +#include + +template +__device__ __forceinline__ void cp_async(half* smem_ptr, + const half* global_ptr, + bool pred_guard = true) +{ +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 + static_assert(SizeInBytes == 16, "Size is not supported"); + unsigned smem_int_ptr = __cvta_generic_to_shared(smem_ptr); + asm volatile( + "{ \n" + " .reg .pred p;\n" + " setp.ne.b32 p, %0, 0;\n" + " @p cp.async.cg.shared.global [%1], [%2], %3;\n" + "}\n" ::"r"((int)pred_guard), + "r"(smem_int_ptr), + "l"(global_ptr), + "n"(SizeInBytes)); +#else +#warning "The async copy functions are only supported on Ampere and newer architectures" +#endif +} + +/// Establishes an ordering w.r.t previously issued cp.async instructions. Does not block. +__device__ __forceinline__ void cp_async_group_commit() +{ +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 + asm volatile("cp.async.commit_group;\n" ::); +#else +#warning "The async copy functions are only supported on Ampere and newer architectures" +#endif +} + +/// Blocks until all but previous cp.async.commit_group operations have committed. +template +__device__ __forceinline__ void cp_async_wait_group() +{ +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 + asm volatile("cp.async.wait_group %0;\n" ::"n"(N)); +#else +#warning "The async copy functions are only supported on Ampere and newer architectures" +#endif +} + +/// Blocks until all previous cp.async.commit_group operations have committed. +// cp.async.wait_all is equivalent to : +// cp.async.commit_group; +// cp.async.wait_group 0; +__device__ __forceinline__ void cp_async_wait_all() +{ +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 + asm volatile("cp.async.wait_all;\n" ::); +#else +#warning "The async copy functions are only supported on Ampere and newer architectures" +#endif +} + +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels.cpp b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3b4966eb822b245990a6523fd93e6727e277730c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels.cpp @@ -0,0 +1,224 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include + +#include "linear_kernels.h" + +namespace { + +// For bit-level debugging. +template +void print_bits(T num) +{ + char bits[sizeof(T) * 8 + 1] = {'\0'}; + for (int bit = 0; bit < (sizeof(T) * 8); bit++) { + bits[sizeof(T) * 8 - 1 - bit] = '0' + (num & 0x01); + num = num >> 1; + } + printf("%s\n", bits); +} + +void print_bits(half num) +{ + char bits[sizeof(half) * 8 + 1] = {'\0'}; + auto int_num = *reinterpret_cast(&num); + for (int bit = 0; bit < (sizeof(half) * 8); bit++) { + bits[sizeof(half) * 8 - 1 - bit] = '0' + (int_num & 0x01); + int_num = int_num >> 1; + } + printf("%s\n", bits); +} + +/* + * Function to pack 4 fake quantized FP16 value into continuously stored 4 FP6 values. + */ +void cast_fp16_fp6(uint16_t* FP16x4, uint8_t* FP6x4) +{ + // Constants for FP6 + constexpr int exponent_nbits_fp6 = 3; + constexpr int mantissa_nbits_fp6 = 2; + constexpr int exp_bias_fp6 = (1 << (exponent_nbits_fp6 - 1)) - 1; + // Constants for FP16 + constexpr int exponent_nbits_fp16 = 5; + constexpr int mantissa_nbits_fp16 = 10; + constexpr int exp_bias_fp16 = (1 << (exponent_nbits_fp16 - 1)) - 1; + + int fp6_temp[4]; + + float absmin_nonzero_fp6 = 0.0625; + // Note that we regard the exponent of '111' as a regular value rather than NaN or inf. This is + // the same with that in qtorch. + float absmax_fp6 = 28; + + for (int i = 0; i < 4; ++i) { + uint16_t source = FP16x4[i]; + float fp6_value_abs = std::abs(__half2float(*((half*)(&source)))); + if ((fp6_value_abs != 0 && fp6_value_abs < absmin_nonzero_fp6) || + fp6_value_abs > absmax_fp6) { + // TODO(zhen): a better way may be rounding it to the nearest FP6 value. + throw std::invalid_argument("Input value out of range for FP6."); + } + + // It is not safe to do shift operation on uint16_t. So we promote it to int. + int source_promote = int(source); + + int sign_bit = (source_promote >> 15); + // Extracting exponent represented in FP16. The sign mask 0x7FFF is '0111 1111 1111 1111' + int exp_bit = (source_promote & 0x7FFF) >> mantissa_nbits_fp16; + // Extracting mantissa represented in FP16 + int mant_bit = source_promote & ((1 << mantissa_nbits_fp16) - 1); + + int new_exp_bit; + int new_mant_bit; + + if (exp_bit == 0) { + // Subnormal FP16 number. Too small for FP6. + new_exp_bit = 0; + new_mant_bit = 0; + } else { + new_mant_bit = mant_bit >> (mantissa_nbits_fp16 - mantissa_nbits_fp6); + new_exp_bit = exp_bit - exp_bias_fp16 + exp_bias_fp6; + + // Deal with subnormal FP6 values. + int target_exp_val = exp_bit - exp_bias_fp16; + int min_fp6_exp_val = -exp_bias_fp6 + 1; + bool subnormal_fp6 = target_exp_val < min_fp6_exp_val; + if (subnormal_fp6) { + // TODO(zhen): add the rounding logic. + new_exp_bit = 0; + // The implicit 1 in the mantissa of FP16 is not present in subnormal FP6. Thus we + // need to add it + new_mant_bit = (new_mant_bit | (1 << mantissa_nbits_fp6)) >> + (min_fp6_exp_val - target_exp_val); + } + } + + fp6_temp[i] = (sign_bit << (exponent_nbits_fp6 + mantissa_nbits_fp6)) | + (new_exp_bit << mantissa_nbits_fp6) | new_mant_bit; + } + // Pack the values + FP6x4[0] = fp6_temp[0] << 2 | (fp6_temp[1] >> 4); + FP6x4[1] = (fp6_temp[1] & 0x0F) << 4 | (fp6_temp[2] >> 2); + FP6x4[2] = (fp6_temp[2] & 0x03) << 6 | fp6_temp[3]; +} + +/* + * Function to prepack FP16 weights into continuous FP6 values. + * + * Parameters: + * weight_16bit: input weight in FP16, size M*K + * weight_6bit: output weight in packed FP6, continuously stored, size M*K*6/8 + * M, K: the shape of the weight + */ +void weight_prepacking_fp16_to_fp6(uint16_t* weight_16bit, + uint8_t* weight_6bit_packed, + size_t M, + size_t K) +{ + // Every four 16-bit elements are packed into three 6-bit values (4*6bit == 3*8bit). + if (K * 6 % 8 != 0) { throw std::invalid_argument("(K * 6 % 8) should be 0"); } + size_t K_fp6_packed = K * 6 / 8; + // #pragma omp parallel for + for (auto m = 0; m < M; m++) { + uint8_t* ptr_6bit = weight_6bit_packed + m * K_fp6_packed; + uint16_t* ptr_16bit = weight_16bit + m * K; + for (auto k = 0; k < K; k += 4) { + cast_fp16_fp6(ptr_16bit, ptr_6bit); + ptr_16bit += 4; + ptr_6bit += 3; + } + } +} + +} // namespace + +/* + * Function to execute the FP6 linear kernel. + * + * Parameters: + * output: output tensor, size M*N + * hidden_states: input activation tensor, size N*K + * weights_2bit: packed 2bit weights, size M*K*2/8 + * weights_4bit: packed 4bit weights, size M*K*4/8 + * scales: scale tensor, size M + * workspace: workspace tensor, size M*N*split_k + * M: the output channel number of the weight + * N: the token number of the activation + * K: the input channel number of the weight + * split_k: the split size of the GEMM calculation + */ +void cuda_wf6af16_linear(torch::Tensor& output, + torch::Tensor& hidden_states, + torch::Tensor& weights_2bit, + torch::Tensor& weights_4bit, + torch::Tensor& scales, + torch::Tensor& workspace, + int M, + int N, + int K, + int split_k) +{ + TORCH_CHECK(weights_2bit.device().type() == torch::kCUDA, "weight_2bit must be on CUDA"); + TORCH_CHECK(weights_4bit.device().type() == torch::kCUDA, "weight_4bit must be on CUDA"); + TORCH_CHECK(hidden_states.device().type() == torch::kCUDA, "X must be on CUDA"); + TORCH_CHECK(scales.device().type() == torch::kCUDA, "scales must be on CUDA"); + + auto status = fp6_linear_kernel(at::cuda::getCurrentCUDAStream(), + (uint4*)(weights_2bit.data_ptr()), + (uint4*)(weights_4bit.data_ptr()), + (half*)(scales.data_ptr()), + (half*)(hidden_states.data_ptr()), + (half*)(output.data_ptr()), + M, + N, + K, + workspace.data_ptr(), + split_k); + if (status != cudaSuccess) { + AT_ERROR("fp6_linear_kernel failed with error: ", cudaGetErrorString(status)); + } +} + +/* + * Function to prepack the fake 6-bit-quantized FP16 weights into 2bit and 4bit. + * + * Parameters: + * weight: input weight in FP16 (containing the quantized FP6-ranged value), size M*K + * Returns: + * weight_2bit: output weight in 2bit, size M*K*2/8 + * weight_4bit: output weight in 4bit, size M*K*4/8 + */ +std::vector preprocess_weight(torch::Tensor& weight) +{ + TORCH_CHECK(weight.dim() == 2, "weight must be 2-dimensional"); + TORCH_CHECK(weight.scalar_type() == torch::kFloat16, "weight must be FP16"); + TORCH_CHECK(weight.is_contiguous(), "weight must be contiguous"); + TORCH_CHECK(weight.device().type() == torch::kCPU, "weight must be on CPU"); + auto M = weight.size(0); + auto K = weight.size(1); + TORCH_CHECK(K % 4 == 0, "K must be multiple of 4"); + + // Pack weight from FP16 to FP6. + uint16_t* weight_16bit_ptr = reinterpret_cast(weight.data_ptr()); + std::vector weight_6bit_packed(M * K * 6 / 8); + uint8_t* weight_6bit_ptr = weight_6bit_packed.data(); + weight_prepacking_fp16_to_fp6(weight_16bit_ptr, weight_6bit_ptr, M, K); + + // Split weight into 2bit and 4bit. + weight_matrix_prepacking(reinterpret_cast(weight_6bit_ptr), M, K); + uint8_t* weight_2bit_ptr = weight_6bit_ptr; + + // Make sure that the new split tensor does not share the underlying memory with the original + // one. Otherwise it will incur some problems when the original tensor is deleted. It also + // makes the memory flattern risky. + auto weight_2bit = + torch::from_blob(weight_2bit_ptr, {M * K * 2 / 8}, torch::kUInt8).clone().detach(); + uint8_t* weight_4bit_ptr = weight_2bit_ptr + M * K * 2 / 8; + auto weight_4bit = + torch::from_blob(weight_4bit_ptr, {M * K * 4 / 8}, torch::kUInt8).clone().detach(); + + return {weight_2bit, weight_4bit}; +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels.h new file mode 100644 index 0000000000000000000000000000000000000000..01a6b7c18af8a4b68865cea54c4cb06b905e5dd7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels.h @@ -0,0 +1,28 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#ifndef DEEPSPEED_CUDA_LINEAR_KERNELS_H +#define DEEPSPEED_CUDA_LINEAR_KERNELS_H + +#include +#include +#include "ds_kernel_utils.h" + +#include "linear_kernels_cuda.h" + +void cuda_wf6af16_linear(torch::Tensor& output, + torch::Tensor& hidden_states, + torch::Tensor& weights_2bit, + torch::Tensor& weights_4bit, + torch::Tensor& scale, + torch::Tensor& workspace, + int M, + int N, + int K, + int split_k); + +std::vector preprocess_weight(torch::Tensor& Weight); + +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels_cuda.cu b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..ea0203c42f84007b09d77588cb216645a388bcc2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels_cuda.cu @@ -0,0 +1,318 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +// This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112 + +// clang-format off +// Put the torch headers at the front to avoid conflict with other headers on +// `at::nullopt` and `at::optional`. +#include +#include +// clang-format on + +#include "include/kernel_matmul.cuh" +#include "include/kernel_reduction.cuh" +#include "include/weight_prepacking.h" + +#include +#include + +#include "linear_kernels_cuda.h" + +template +static void Kernel_Ex(cudaStream_t stream, + const uint4* Weight1, + const uint4* Weight2, + const half* Scales, + const half* B, + OutputDataType* C, + const size_t M_Global, + const size_t N_Global, + const size_t K_Global, + int Split_K) +{ +#ifdef DEBUG_MODE + printf("\n"); + printf("Launcher.cu->Kernel_Ex():\n"); + printf("M: %d, N: %d, K: %d, SplitK: %d\n", M_Global, N_Global, K_Global, Split_K); + printf("TILE_M: %d, TILE_K: %d, TILE_N: %d\n", + TilingConfig::TILE_M, + TilingConfig::TILE_K, + TilingConfig::TILE_N); +#endif + static size_t SHMEM_SZ = + max(TilingConfig::SMEM_SIZE_B_TILE + SMEM_SIZE_A1_TILE + SMEM_SIZE_A2_TILE, + TilingConfig::SMEM_SIZE_C_TILE); + cudaFuncSetAttribute(QUANT_GEMM_Kernel, + cudaFuncAttributeMaxDynamicSharedMemorySize, + SHMEM_SZ); + size_t dimN = (N_Global - 1) / TilingConfig::TILE_N + 1; + size_t dimM = M_Global * Split_K / TilingConfig::TILE_M; + dim3 GridDim(dimN, dimM, 1); + dim3 BlockDim(WARP_SIZE * TilingConfig::BLOCK_WARPS, 1, 1); + +#ifdef DEBUG_MODE + printf( + "GridDim.x: %d, GridDim.y: %d, GridDim.z: %d, BlockDim.x: %d, BlockDim.y: %d, BlockDim.z: " + "%d SHMEM_SZ: %d\n", + GridDim.x, + GridDim.y, + GridDim.z, + BlockDim.x, + BlockDim.y, + BlockDim.z, + SHMEM_SZ); + printf("\n"); +#endif + + QUANT_GEMM_Kernel<<>>( + Weight1, Weight2, Scales, B, C, M_Global, N_Global, K_Global, Split_K); +} + +/* + * + */ +cudaError_t fp6_linear_kernel(cudaStream_t stream, + const uint4* Weight1, + const uint4* Weight2, + const half* Scales, + const half* B, + half* C, + const size_t M_Global, + const size_t N_Global, + const size_t K_Global, + float* Reduction_Workspace, // Reduction_Workspace_Size = Split_K * + // M_Global * N_Global * sizeof(fp32) + int Split_K) +{ + assert(M_Global % 256 == 0); + assert(K_Global % 64 == 0); + assert(N_Global > 0); + + // Work around to support more N shapes: + size_t N_PowerOf2; + if (N_Global > 0 && N_Global <= 8) N_PowerOf2 = 8; + if (N_Global > 8 && N_Global <= 16) N_PowerOf2 = 16; + if (N_Global > 16 && N_Global <= 32) N_PowerOf2 = 32; + if (N_Global > 32 && N_Global <= 64) N_PowerOf2 = 64; + if (N_Global > 64 && N_Global <= 128) N_PowerOf2 = 128; + if (N_Global > 128) N_PowerOf2 = ((N_Global - 1) / 128 + 1) * 128; + + if (Split_K == 1) { + switch (N_PowerOf2) { + case 8: + Kernel_Ex, half>( + stream, Weight1, Weight2, Scales, B, C, M_Global, N_Global, K_Global, Split_K); + break; + case 16: + Kernel_Ex, half>( + stream, Weight1, Weight2, Scales, B, C, M_Global, N_Global, K_Global, Split_K); + break; + case 32: + Kernel_Ex, half>( + stream, Weight1, Weight2, Scales, B, C, M_Global, N_Global, K_Global, Split_K); + break; + case 64: + Kernel_Ex, half>( + stream, Weight1, Weight2, Scales, B, C, M_Global, N_Global, K_Global, Split_K); + break; + case 128: + Kernel_Ex, half>( + stream, Weight1, Weight2, Scales, B, C, M_Global, N_Global, K_Global, Split_K); + break; + default: + if (N_PowerOf2 % 128 != 0) { + printf("QuantLLM_API Error: Unsupported N dimension %lu!\n", N_PowerOf2); + return cudaErrorUnknown; + } + Kernel_Ex, half>( + stream, Weight1, Weight2, Scales, B, C, M_Global, N_Global, K_Global, Split_K); + break; + } + } else { + switch (N_PowerOf2) { + case 8: + Kernel_Ex, float>(stream, + Weight1, + Weight2, + Scales, + B, + Reduction_Workspace, + M_Global, + N_Global, + K_Global, + Split_K); + break; + case 16: + Kernel_Ex, float>(stream, + Weight1, + Weight2, + Scales, + B, + Reduction_Workspace, + M_Global, + N_Global, + K_Global, + Split_K); + break; + case 32: + Kernel_Ex, float>(stream, + Weight1, + Weight2, + Scales, + B, + Reduction_Workspace, + M_Global, + N_Global, + K_Global, + Split_K); + break; + case 64: + Kernel_Ex, float>(stream, + Weight1, + Weight2, + Scales, + B, + Reduction_Workspace, + M_Global, + N_Global, + K_Global, + Split_K); + break; + case 128: + Kernel_Ex, float>(stream, + Weight1, + Weight2, + Scales, + B, + Reduction_Workspace, + M_Global, + N_Global, + K_Global, + Split_K); + break; + default: + if (N_PowerOf2 % 128 != 0) { + printf("QuantLLM_API Error: Unsupported N dimension %lu!\n", N_PowerOf2); + return cudaErrorUnknown; + } + Kernel_Ex, float>(stream, + Weight1, + Weight2, + Scales, + B, + Reduction_Workspace, + M_Global, + N_Global, + K_Global, + Split_K); + break; + } + // Reduction for SplitK + dim3 GridDim((M_Global * N_Global) / REDUCTION_ELEMENT_PER_THREADBLOCK, 1, 1); + dim3 BlockDim(WARP_SIZE, 1, 1); + SplitK_Reduction<<>>( + C, Reduction_Workspace, M_Global, N_Global, Split_K); + } + return cudaGetLastError(); +} + +/* +Computes FP6-FP16 GEMM (PyTorch interface). + +[Mathematical Formula] +Standard definition of linear layer: Out = In * trans(W), where In, Out, and W are stored in +row-major. After Equivalent transformation : trans(Out) = W * trans(In). Note that we do not +perform "transpose" during runtime, we instead interpret the In/Out as column-major matrices when +calling our CUDA kernel. + +[Inputs] + _in_feats: tensor of shape [B, IC]; // half + _weights: int tensor of shape [OC, IC // 16 * 3]; // 3 INT32 words contains 16 FP6 weights. + _scales: tensor of shape [OC]; // half + splitK: splitting the MatMul problem along K dimension for higher GPU utilization, default 1. +[Outputs] + _out_feats: tensor of shape [B, OC]; // half +*/ +torch::Tensor fp6_linear_forward_cuda(torch::Tensor _in_feats, + torch::Tensor _weights, + torch::Tensor _scales, + int splitK) +{ + int num_in_feats = _in_feats.size(0); + int num_in_channels = _in_feats.size(1); + int num_out_channels = _weights.size(0); + assert(num_in_channels % 64 == 0); + assert((num_in_channels / 16 * 3) == + _weights.size(1)); // Making sure the K dimension is matched. + // + int M = num_out_channels; + int K = num_in_channels; + int N = num_in_feats; + // Input Tensors + auto weight1 = reinterpret_cast( + _weights.data_ptr()); // weights is [OC, IC] but in FP6. + auto weight2 = weight1 + num_in_channels * num_out_channels * 2 / 128; + auto in_feats = reinterpret_cast(_in_feats.data_ptr()); + auto scales = reinterpret_cast(_scales.data_ptr()); + // Output Tensors + auto options = torch::TensorOptions().dtype(_in_feats.dtype()).device(_in_feats.device()); + at::Tensor _out_feats = torch::empty({num_in_feats, num_out_channels}, options); + auto out_feats = reinterpret_cast(_out_feats.data_ptr()); + + float* Reduction_Workspace = nullptr; + if (splitK != 1) { + auto options = torch::TensorOptions().dtype(torch::kFloat32).device(_in_feats.device()); + at::Tensor _workspace = torch::empty({splitK, num_in_feats, num_out_channels}, options); + auto Reduction_Workspace = reinterpret_cast( + _out_feats.data_ptr()); // Reduction_Workspace_Size = Split_K * M_Global * + // N_Global * sizeof(fp32) + } + + fp6_linear_kernel(0, // Using default stream here. + weight1, + weight2, + scales, + in_feats, + out_feats, + M, + N, + K, + Reduction_Workspace, + splitK); + + return _out_feats; +} + +/* + * Inputs: + * (1) unsigned char Weight_6bit [M*K*6/8] + * Outputs: + * (1) unsigned char Weight_2bit [M*K*2/8] + * (2) unsigned char Weight_4bit [M*K*4/8] + * + * Assumption: Weight_6bit, Weight_2bit, Weight_4bit all stored continuously in row-major. + * 8 FP6 = 6 Bytes + * 8 FP4 = 4 Bytes + * 8 FP2 = 2 Bytes + */ + +/* + * Weight prepacking (Pytorch interface). + * [Input & Output] + * fp6_tensor: int tensor of shape [OC, IC // 16 * 3]; // 3 INT32 words contains 16 FP6 weights. + * [Output] + * packed_tensor: int tensor of shape [OC, IC // 16 * 3]; + */ +torch::Tensor weight_matrix_prepacking_cpu(torch::Tensor fp6_tensor, size_t OC, size_t IC) +{ + assert((OC % 256 == 0) && (IC % 64 == 0)); + assert((fp6_tensor.size(0) == OC) && (fp6_tensor.size(1) == IC / 16 * 3)); + // auto packed_tensor = torch::empty_like(fp6_tensor); + // auto packed_tensor_ptr = reinterpret_cast(packed_tensor.data_ptr()); + auto fp6_tensor_ptr = reinterpret_cast(fp6_tensor.data_ptr()); + weight_matrix_prepacking(fp6_tensor_ptr, OC, IC); + return fp6_tensor; +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels_cuda.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels_cuda.h new file mode 100644 index 0000000000000000000000000000000000000000..6a83290f0cb55f7094af44664693a6b1a01e90c9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels_cuda.h @@ -0,0 +1,51 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +// This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112 + +#ifndef DEEPSPEED_CUDA_LINEAR_FP6_LINEAR_CUH +#define DEEPSPEED_CUDA_LINEAR_FP6_LINEAR_CUH + +#include +#include +#include + +#include + +/* + * Computes FP6-FP16 GEMM (C++ interface). + */ +cudaError_t fp6_linear_kernel(cudaStream_t stream, + const uint4* Weight1, + const uint4* Weight2, + const half* Scales, + const half* B, + half* C, + const size_t M_Global, + const size_t N_Global, + const size_t K_Global, + float* Reduction_Workspace, // Reduction_Workspace_Size = Split_K * + // M_Global * N_Global * sizeof(fp32) + int Split_K); + +/* + * Computes FP6-FP16 GEMM (PyTorch interface). + */ +torch::Tensor fp6_linear_forward_cuda(torch::Tensor _in_feats, + torch::Tensor _weights, + torch::Tensor _scales, + int splitK = 1); + +/* + * In-place weight prepacking (C++ interface). + */ +void weight_matrix_prepacking(int* FP6Weights, size_t M, size_t K); + +/* + * Weight prepacking (Pytorch interface). + */ +torch::Tensor weight_matrix_prepacking_cpu(torch::Tensor fp6_tensor, size_t M, size_t K); + +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..640a723076503b8ce4add447b6210b27a2e6f52f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .rms_norm import CUDARMSNorm +from .rms_pre_norm import CUDARMSPreNorm diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f04a3d74d2f12be5ae5955f56f47f7fbbe8738b1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__pycache__/rms_norm.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__pycache__/rms_norm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5934077d477d452fd3fd8bd69cf6a8ab7b92a0f7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__pycache__/rms_norm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__pycache__/rms_norm_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__pycache__/rms_norm_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3c11b5b9f2e94205103bbc43bf44b8486108d8c Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__pycache__/rms_norm_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__pycache__/rms_pre_norm.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__pycache__/rms_pre_norm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff4e05cbca6523a9c2eee5c01319bff24f0722be Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__pycache__/rms_pre_norm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm.cpp b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c67712df438aabf39307451bcef115bb815f47ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm.cpp @@ -0,0 +1,123 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "rms_norm.h" + +#ifdef BF16_AVAILABLE +#define DISPATCH_FOR_FLOAT(DTYPE, ...) \ + [&] { \ + if (DTYPE == torch::kFloat32) { \ + using scalar_t = float; \ + return __VA_ARGS__(); \ + } else if (DTYPE == torch::kFloat16) { \ + using scalar_t = __half; \ + return __VA_ARGS__(); \ + } else if (DTYPE == torch::kBFloat16) { \ + using scalar_t = __nv_bfloat16; \ + return __VA_ARGS__(); \ + } else { \ + TORCH_CHECK(false, "Unsupported dtype for BiasActivation"); \ + } \ + }() +#else +#define DISPATCH_FOR_FLOAT(DTYPE, ...) \ + [&] { \ + if (DTYPE == torch::kFloat32) { \ + using scalar_t = float; \ + return __VA_ARGS__(); \ + } else if (DTYPE == torch::kFloat16) { \ + using scalar_t = __half; \ + return __VA_ARGS__(); \ + } else { \ + TORCH_CHECK(false, "Unsupported dtype for BiasActivation"); \ + } \ + }() +#endif + +void rms_norm(torch::Tensor& norm_output, + torch::Tensor& norm_input, + torch::Tensor& gamma, + float epsilon) +{ + TORCH_CHECK(norm_output.scalar_type() == norm_input.scalar_type(), + "norm_output and norm_input should have the same data type"); + TORCH_CHECK(norm_output.scalar_type() == gamma.scalar_type(), + "norm_output and gamma should have the same data type"); + + const int32_t rows = norm_input.size(0); + const int32_t cols = norm_input.size(1); + + TORCH_CHECK(norm_output.size(0) == rows, + "norm_output and norm_input should have the same first dimension"); + TORCH_CHECK(norm_output.size(1) == cols, + "norm_output and norm_input should have the same second dimension"); + + DISPATCH_FOR_FLOAT(norm_output.scalar_type(), [&] { + scalar_t* norm_output_ptr = reinterpret_cast(norm_output.data_ptr()); + scalar_t* norm_input_ptr = reinterpret_cast(norm_input.data_ptr()); + scalar_t* gamma_ptr = reinterpret_cast(gamma.data_ptr()); + scalar_t* null_t = nullptr; + + launch_rms_norm(norm_output_ptr, + null_t, + norm_input_ptr, + null_t, + gamma_ptr, + epsilon, + rows, + cols, + at::cuda::getCurrentCUDAStream()); + }); +} + +void rms_pre_norm(torch::Tensor& norm_output, + torch::Tensor& residual_output, + torch::Tensor& norm_input, + torch::Tensor& residual_input, + torch::Tensor& gamma, + float epsilon) +{ + TORCH_CHECK(norm_output.scalar_type() == norm_input.scalar_type(), + "norm_output and norm_input should have the same data type"); + TORCH_CHECK(norm_output.scalar_type() == gamma.scalar_type(), + "norm_output and gamma should have the same data type"); + + const int32_t rows = norm_input.size(0); + const int32_t cols = norm_input.size(1); + + TORCH_CHECK(norm_output.size(0) == rows, + "norm_output and norm_input should have the same first dimension"); + TORCH_CHECK(norm_output.size(1) == cols, + "norm_output and norm_input should have the same second dimension"); + + TORCH_CHECK(residual_output.size(0) == rows, + "residual_output and norm_input should have the same first dimension"); + TORCH_CHECK(residual_output.size(1) == cols, + "residual_output and norm_input should have the same second dimension"); + + TORCH_CHECK(residual_input.size(0) == rows, + "residual_input and norm_input should have the same first dimension"); + TORCH_CHECK(residual_input.size(1) == cols, + "residual_input and norm_input should have the same second dimension"); + + DISPATCH_FOR_FLOAT(norm_output.scalar_type(), [&] { + scalar_t* norm_output_ptr = reinterpret_cast(norm_output.data_ptr()); + scalar_t* residual_output_ptr = reinterpret_cast(residual_output.data_ptr()); + const scalar_t* norm_input_ptr = reinterpret_cast(norm_input.data_ptr()); + const scalar_t* residual_input_ptr = + reinterpret_cast(residual_input.data_ptr()); + const scalar_t* gamma_ptr = reinterpret_cast(gamma.data_ptr()); + + launch_rms_norm(norm_output_ptr, + residual_output_ptr, + norm_input_ptr, + residual_input_ptr, + gamma_ptr, + epsilon, + rows, + cols, + at::cuda::getCurrentCUDAStream()); + }); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm.h new file mode 100644 index 0000000000000000000000000000000000000000..7867fb65964f6e92dc9d1d34908afce63baeb66b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm.h @@ -0,0 +1,33 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include "ds_kernel_utils.h" + +template +void launch_rms_norm(T* norm_output, + T* res_output, + const T* vals, + const T* residual, + const T* gamma, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream); + +void rms_norm(torch::Tensor& norm_output, + torch::Tensor& norm_input, + torch::Tensor& gamma, + float epsilon); + +void rms_pre_norm(torch::Tensor& norm_output, + torch::Tensor& residual_output, + torch::Tensor& norm_input, + torch::Tensor& residual_input, + torch::Tensor& gamma, + float epsilon); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..deb5d33111a9383ac0e7e552ac4737d999e6fc69 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm.py @@ -0,0 +1,28 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from .rms_norm_base import CUDARMSNormBase + + +class CUDARMSNorm(CUDARMSNormBase): + """ + Floating point layer norm kernel for CUDA/RoCM. + + Performs: z = ln(x) + """ + + def __call__(self, output_z: torch.Tensor, input_x: torch.Tensor, gamma: torch.Tensor) -> torch.Tensor: + """ + output_z may alias input_x directly. All Tensors should have the same shape. + + Parameters: + output_z (torch.Tensor): Output tensor. + input_x (torch.Tensor): Input tensor. + gamma (torch.Tensor): Gamma tensor. + """ + self.inf_module.rms_norm(output_z, input_x, gamma, self.epsilon) + return output_z diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm_base.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm_base.py new file mode 100644 index 0000000000000000000000000000000000000000..62bc9d056ade4aaf256a5eec48f42e61edbd80aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm_base.py @@ -0,0 +1,37 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ... import DSKernelBase +from ....inference_utils import elem_size +from deepspeed.ops.op_builder import InferenceCoreBuilder + + +class CUDARMSNormBase(DSKernelBase): + """ + Base class for CUDA LN kernels. They all same the same validation logic, + so we can share it here. + """ + + supported_dtypes = [torch.float16, torch.bfloat16, torch.float32] + + def __init__(self, channels: int, fp_dtype: torch.dtype, epsilon: float = 1e-5): + """ + Parameters: + channels (int): Number of channels in the input tensor. Must be divisible to align + to 16 bytes. + fp_dtype (torch.dtype): Data type for the input/output/gamma. Supported values + are torch.float16, torch.bfloat16, and torch.float32. + """ + if fp_dtype not in CUDARMSNormBase.supported_dtypes: + raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format( + fp_dtype, CUDARMSNormBase.supported_dtypes)) + + if elem_size(fp_dtype) * channels % 16 != 0: + raise ValueError("channels must be divisible by 16 bytes") + + self.inf_module = InferenceCoreBuilder().load() + self.epsilon = epsilon diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm_cuda.cu b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..e69d3c36cc001038c0b66fb107f3cae2075567d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm_cuda.cu @@ -0,0 +1,262 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "conversion_utils.h" +#include "ds_kernel_utils.h" +#include "memory_access_utils.h" +#include "reduction_utils.h" + +namespace cg = cooperative_groups; +using rop = reduce::ROpType; + +namespace rms { +constexpr int granularity = 16; +} // namespace rms + +template +__global__ void rms_norm(T* output, const T* vals, const T* gamma, float epsilon, int elems_per_row) +{ + constexpr int T_per_load = rms::granularity / sizeof(T); + + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + // X-dimension of the block + const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) + + (tb.thread_index().y * elems_per_row); + const int thread_offset = tb.thread_index().x * T_per_load; + const int base_offset = block_offset + thread_offset; + const int stride = blockDim.x * T_per_load; + + float var_sum = reduce::init(); + + const T* input_base = vals + base_offset; + + T local_buffer[UNROLL * T_per_load]; + +#pragma unroll + for (int i = 0; i < UNROLL; i++) { + T* iteration_buffer = local_buffer + (i * T_per_load); + + mem_access::load_global(iteration_buffer, + input_base + (i * stride), + thread_offset + (i * stride) < elems_per_row); + +#pragma unroll + for (int j = 0; j < T_per_load; j++) { + float up_cast = conversion::to(iteration_buffer[j]); + float sq_val = up_cast * up_cast; + var_sum = reduce::element(var_sum, sq_val); + } + } + + reduce::partitioned_block(tb, warp, var_sum); + const float var = var_sum / elems_per_row; + const T denom = conversion::to(__frsqrt_rn(var + epsilon)); + + T* block_output = output + block_offset; + +#pragma unroll + for (int i = 0; i < UNROLL; i++) { + T* iteration_buffer = local_buffer + (i * T_per_load); + const int iter_idx = i * stride + thread_offset; + const bool do_loads = (iter_idx < elems_per_row); + + T gamma_local[T_per_load]; + + mem_access::load_global(gamma_local, gamma + iter_idx, do_loads); + +#pragma unroll + for (int j = 0; j < T_per_load; j++) { + iteration_buffer[j] *= denom; + iteration_buffer[j] *= gamma_local[j]; + } + + if (do_loads) { + mem_access::store_global(block_output + iter_idx, iteration_buffer); + } + } +} + +template +__global__ void pre_rms_norm(T* output, + T* res_out, + const T* vals, + const T* residual, + const T* gamma, + float epsilon, + int elems_per_row) +{ + constexpr int T_per_load = rms::granularity / sizeof(T); + + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + // X-dimension of the block + const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) + + (tb.thread_index().y * elems_per_row); + const int thread_offset = tb.thread_index().x * T_per_load; + const int base_offset = block_offset + thread_offset; + const int stride = blockDim.x * T_per_load; + + float var_sum = reduce::init(); + + const T* input_base = vals + base_offset; + const T* residual_base = residual + base_offset; + T* res_output = res_out + base_offset; + + T local_buffer[UNROLL * T_per_load]; + +#pragma unroll + for (int i = 0; i < UNROLL; i++) { + T* iteration_buffer = local_buffer + (i * T_per_load); + T residual_buffer[T_per_load]; + + const int iter_offset = i * stride + thread_offset; + const bool do_loads = (iter_offset < elems_per_row); + + mem_access::load_global( + iteration_buffer, input_base + (i * stride), do_loads); + mem_access::load_global( + residual_buffer, residual_base + (i * stride), do_loads); + +#pragma unroll + for (int j = 0; j < T_per_load; j++) { + iteration_buffer[j] += residual_buffer[j]; + float vals_up_cast = conversion::to(iteration_buffer[j]); + + var_sum = reduce::element(var_sum, vals_up_cast * vals_up_cast); + } + + if (do_loads) { + mem_access::store_global(res_output + i * stride, iteration_buffer); + } + } + + reduce::partitioned_block(tb, warp, var_sum); + const float var = var_sum / elems_per_row; + const T denom = conversion::to(__frsqrt_rn(var + epsilon)); + + T* block_output = output + block_offset; + +#pragma unroll + for (int i = 0; i < UNROLL; i++) { + T* iteration_buffer = local_buffer + (i * T_per_load); + const int iter_idx = i * stride + thread_offset; + const bool do_loads = (iter_idx < elems_per_row); + + T gamma_local[T_per_load]; + + mem_access::load_global(gamma_local, gamma + iter_idx, do_loads); + +#pragma unroll + for (int j = 0; j < T_per_load; j++) { + iteration_buffer[j] *= denom; + iteration_buffer[j] *= gamma_local[j]; + } + + if (do_loads) { + mem_access::store_global(block_output + iter_idx, iteration_buffer); + } + } +} + +#define LAUNCH_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \ + rms_norm \ + <<>>(norm_output, vals, gamma, epsilon, elems_per_row); + +#define LAUNCH_PRE_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \ + pre_rms_norm<<>>( \ + norm_output, res_output, vals, residual, gamma, epsilon, elems_per_row); + +#define LAUNCH_ALL_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \ + if (pre_norm) { \ + LAUNCH_PRE_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \ + } else { \ + LAUNCH_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \ + } + +template +void launch_rms_norm(T* norm_output, + T* res_output, + const T* vals, + const T* residual, + const T* gamma, + float epsilon, + int rows, + int elems_per_row, + cudaStream_t stream) +{ + // 8 for __half, 4 for float + constexpr int T_per_load = rms::granularity / sizeof(T); + constexpr int maxThreads = 256; + constexpr int internalUnroll = sizeof(T) == 4 ? 4 : 2; + + const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false; + const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internalUnroll; + + // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of + // warp-sized blocks rather than stepping up to 64/96 threads + const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step); + const int threads_per_group = (one_step_threads < maxThreads) ? one_step_threads : maxThreads; + + const int groups_per_block_max = + is_subblock_schedule ? (maxThreads + threads_per_group - 1) / threads_per_group : 1; + const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max; + const int groups_launch = (groups_per_block + rows - 1) / groups_per_block; + + dim3 block(threads_per_group, groups_per_block); + dim3 grid(groups_launch); + + const int elems_per_step = threads_per_group * h_per_step; + const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step; + + bool pre_norm = (residual == nullptr) ? false : true; + + if (is_subblock_schedule) { + // <=128 + if (threads_per_group == 1) { + LAUNCH_ALL_RMS_NORM(1, 1, maxThreads); + } else if (threads_per_group == 2) { + LAUNCH_ALL_RMS_NORM(1, 2, maxThreads); + } else if (threads_per_group == 4) { + LAUNCH_ALL_RMS_NORM(1, 4, maxThreads); + } else if (threads_per_group == 8) { + LAUNCH_ALL_RMS_NORM(1, 8, maxThreads); + } else if (threads_per_group == 16) { + LAUNCH_ALL_RMS_NORM(1, 16, maxThreads); + } + } else if (external_unRoll == 1) { + // 129 - 4096 elems + // (this can launch with 1-7 warps as well) + LAUNCH_ALL_RMS_NORM(1 * internalUnroll, maxThreads, maxThreads); + } else if (external_unRoll == 2) { + // 4097 - 8192 elems + LAUNCH_ALL_RMS_NORM(2 * internalUnroll, maxThreads, maxThreads); + } else if (external_unRoll == 3) { + // 8193 - 12288 elems + LAUNCH_ALL_RMS_NORM(3 * internalUnroll, maxThreads, maxThreads); + } else if (external_unRoll == 4) { + // 12289 - 16384 elems + LAUNCH_ALL_RMS_NORM(4 * internalUnroll, maxThreads, maxThreads); + } +} + +#define INSTANTIATE_LAUNCH_RMS_NORM(T) \ + template void launch_rms_norm(T * norm_output, \ + T * res_output, \ + const T* vals, \ + const T* residual, \ + const T* gamma, \ + float epsilon, \ + int rows, \ + int elems_per_row, \ + cudaStream_t stream); + +INSTANTIATE_LAUNCH_RMS_NORM(float) +INSTANTIATE_LAUNCH_RMS_NORM(__half) +#ifdef BF16_AVAILABLE +INSTANTIATE_LAUNCH_RMS_NORM(__nv_bfloat16) +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_pre_norm.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_pre_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..3b040d88b50f9cea2744cc8b41adb229278263f7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_pre_norm.py @@ -0,0 +1,39 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Tuple + +import torch + +from .rms_norm_base import CUDARMSNormBase + + +class CUDARMSPreNorm(CUDARMSNormBase): + """ + Floating point pre-LayerNorm kernel for CUDA/RoCM. + + Performs: z_res = x_res + y_hid + z_hid = ln(z_hid) + """ + + def __call__(self, z_res: torch.Tensor, z_hid: torch.Tensor, x_res: torch.Tensor, y_hid: torch.Tensor, + gamma: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + z_res can alias x_res. All non-parameter input/output tensors + must have the same shape. z_hid can alias y_hid. + + Parameters: + z_res (torch.Tensor): Output residual. + z_hid (torch.Tensor): Output hidden states. + x_res (torch.Tensor): Input residual. + y_hid (torch.Tensor): Input hidden states. + gamma (torch.Tensor): Gamma tensor. + beta (torch.Tensor): Beta tensor. + + Returns: + output (torch.Tensor): Output tensor. + """ + self.inf_module.rms_pre_norm(z_hid, z_res, y_hid, x_res, gamma, self.epsilon) + return z_res, z_hid diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..05479d86c90673f7e724024c090bec5d0f8fa7a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .gated_activation import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c967e602f61b61a1d7af503134009d5c7dfc3d4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/__pycache__/gated_activation.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/__pycache__/gated_activation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4882500967a9ecfbdbe8d83311b770e0928b4092 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/__pycache__/gated_activation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/gated_activation.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/gated_activation.py new file mode 100644 index 0000000000000000000000000000000000000000..ca1b62ba5c364d33582cb6efe0aec34f4f7b469b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/gated_activation.py @@ -0,0 +1,65 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Optional + +import torch + +from ... import DSKernelBase +from ....inference_utils import ActivationType, elem_size +from deepspeed.ops.op_builder import InferenceCoreBuilder + + +class CUDAGatedActivation(DSKernelBase): + """ + CUDA implementation of gated activation kernel. This kernel assumes that the input + tensor has gate and activation values in adjacent channels. The output tensor should + have half the dimensionality of the input tensor. + """ + + supported_dtypes = [torch.float16, torch.bfloat16, torch.float32] + supported_act_fns = [ActivationType.GEGLU, ActivationType.ReGLU, ActivationType.SiGLU] + + def __init__(self, channels: int, fp_dtype: torch.dtype, act_fn: ActivationType) -> None: + """ + Compile and validate for the gated activation function. + + Args: + channels (int): Number of columns in the output tensor. Must be divisible to align + to 8 bytes. + fp_dtype (torch.dtype): Data type for the input/output/gamma. Supported values + are torch.float16, torch.bfloat16, and torch.float32. + act_fn (ActivationType): Activation function to use. Only GEGLU is supported. + """ + if fp_dtype not in CUDAGatedActivation.supported_dtypes: + raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format( + fp_dtype, CUDAGatedActivation.supported_dtypes)) + + act_fn = ActivationType(act_fn) + if act_fn not in CUDAGatedActivation.supported_act_fns: + raise ValueError("Unsupported activation function: {}, supported_act_fns are {}".format( + act_fn, CUDAGatedActivation.supported_act_fns)) + + if elem_size(fp_dtype) * channels % 8 != 0: + raise ValueError("Channels must be divisible by 16 bytes") + + if elem_size(fp_dtype) * channels > 98304: + raise ValueError( + "Kernel only compiled to support 98304 bytes per row, please file an issue if your model requires more." + ) + + self.inf_module = InferenceCoreBuilder().load() + self.act_fn = act_fn + self.kernel = self.inf_module.gated_activation + + def __call__(self, output: torch.Tensor, input: torch.Tensor, bias: Optional[torch.Tensor] = None) -> None: + """ + Performs gated activation on the input tensor, writing the result to the output tensor. + + Args: + output (torch.Tensor): Output tensor. Can be of [T, C // 2] or [B, S, C // 2] + input (torch.Tensor): Input tensor. Can be of [T, C] or [B, S, C] + """ + self.kernel(output, input, bias, self.act_fn.value) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/gated_activation_kernels.cpp b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/gated_activation_kernels.cpp new file mode 100644 index 0000000000000000000000000000000000000000..05463c75138ca5e9517788849b2818b515a4c18f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/gated_activation_kernels.cpp @@ -0,0 +1,72 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "gated_activation_kernels.h" + +#ifdef BF16_AVAILABLE +#define DISPATCH_FOR_FLOAT(DTYPE, ...) \ + [&] { \ + if (DTYPE == torch::kFloat32) { \ + using scalar_t = float; \ + return __VA_ARGS__(); \ + } else if (DTYPE == torch::kFloat16) { \ + using scalar_t = __half; \ + return __VA_ARGS__(); \ + } else if (DTYPE == torch::kBFloat16) { \ + using scalar_t = __nv_bfloat16; \ + return __VA_ARGS__(); \ + } else { \ + TORCH_CHECK(false, "Unsupported dtype for BiasActivation"); \ + } \ + }() +#else +#define DISPATCH_FOR_FLOAT(DTYPE, ...) \ + [&] { \ + if (DTYPE == torch::kFloat32) { \ + using scalar_t = float; \ + return __VA_ARGS__(); \ + } else if (DTYPE == torch::kFloat16) { \ + using scalar_t = __half; \ + return __VA_ARGS__(); \ + } else { \ + TORCH_CHECK(false, "Unsupported dtype for BiasActivation"); \ + } \ + }() +#endif + +void ds_gated_activation(at::Tensor& output, + at::Tensor& input, + c10::optional& bias, + int activation_type_raw) +{ + bool ragged_input = input.dim() == 2; + + const ActivationType activation_type = static_cast(activation_type_raw); + + const int rows = ragged_input ? input.size(0) : input.size(0) * input.size(1); + const int cols = ragged_input ? input.size(1) : input.size(2); + + DISPATCH_FOR_FLOAT(input.scalar_type(), [&] { + scalar_t* bias_ptr = nullptr; + if (bias.has_value()) { + TORCH_CHECK(bias.value().scalar_type() == input.scalar_type(), + "Bias type must match input type"); + TORCH_CHECK(bias.value().numel() == cols, + "Bias must have the same number of elements as the input channels"); + bias_ptr = reinterpret_cast(bias.value().data_ptr()); + } + + scalar_t* output_ptr = reinterpret_cast(output.data_ptr()); + const scalar_t* input_ptr = reinterpret_cast(input.data_ptr()); + + launch_gated_activation(output_ptr, + input_ptr, + bias_ptr, + rows, + cols, + activation_type, + c10::cuda::getCurrentCUDAStream()); + }); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/gated_activation_kernels.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/gated_activation_kernels.h new file mode 100644 index 0000000000000000000000000000000000000000..6ae01e99679ad494546c79dbb9e66a3b7bf49c7d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/gated_activation_kernels.h @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include "activation_type.h" +#include "ds_kernel_utils.h" + +template +void launch_gated_activation(T* output, + const T* vals, + const T* bias, + int rows, + int cols, + ActivationType activation_type, + cudaStream_t stream); + +void ds_gated_activation(at::Tensor& output, + at::Tensor& input, + c10::optional& bias, + int activation_type_raw); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/gated_activation_kernels_cuda.cu b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/gated_activation_kernels_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..cfa62f94596ae1ed20bca5b041eea5e8259b48ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/gated_activations/gated_activation_kernels_cuda.cu @@ -0,0 +1,172 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include "activation_type.h" +#include "conversion_utils.h" +#include "ds_kernel_utils.h" +#include "memory_access_utils.h" + +namespace cg = cooperative_groups; + +namespace gated_act { + +constexpr int access_size = 16; +constexpr int threads = 1024; + +template +float gated_act_fn(float x, float y); + +template <> +DS_D_INLINE float gated_act_fn(float x, float y) +{ + constexpr float sqrt_param = 0.79788456080286535587989211986876f; + constexpr float mul_param = 0.044715; + return y * x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x))); +} + +template <> +DS_D_INLINE float gated_act_fn(float x, float y) +{ + return y * (x > 0.0f ? x : 0.0f); +} + +template <> +DS_D_INLINE float gated_act_fn(float x, float y) +{ + return y * (x / (1.0f + expf(-x))); +} + +} // namespace gated_act + +template +__global__ void gated_activation_kernel(T* output, + const T* input, + const T* bias, + int rows, + int cols) +{ + constexpr int read_vector = gated_act::access_size / sizeof(T); + constexpr int write_vector = read_vector / 2; + + const int row = blockIdx.x; + const int col = threadIdx.x * read_vector; + + const T* input_row = input + row * cols; + T* output_row = output + row * cols / 2; + +#pragma unroll + for (int i = 0; i < loopUnroll; i++) { + T read[read_vector]; + T bias_read[read_vector]; + T store[write_vector]; + + const int read_offset = col + gated_act::threads * read_vector * i; + const int write_offset = col / 2 + gated_act::threads * write_vector * i; + + if (i != loopUnroll - 1 || read_offset < cols) { + mem_access::load_global(read, input_row + read_offset); + mem_access::load_global( + bias_read, bias + read_offset, bias != nullptr); + + for (int j = 0; j < write_vector; j++) { + float g_val = + conversion::to(read[j * 2]) + conversion::to(bias_read[j * 2]); + float a_val = conversion::to(read[j * 2 + 1]) + + conversion::to(bias_read[j * 2 + 1]); + + float act_val = gated_act::gated_act_fn(g_val, a_val); + store[j] = conversion::to(act_val); + } + + mem_access::store_global(output_row + write_offset, store); + } + } +} + +#define DISPATCH_UNROLL(unroll_val) \ + gated_activation_kernel \ + <<>>(output, input, bias, rows, cols); + +template +void launch_gated_activation_impl(T* output, + const T* input, + const T* bias, + int rows, + int cols, + cudaStream_t stream) +{ + constexpr int read_vector = gated_act::access_size / sizeof(T); + constexpr int cols_per_unroll = gated_act::threads * read_vector; + const int req_threads = (cols + read_vector - 1) / read_vector; + const int threads = std::min(req_threads, gated_act::threads); + + const dim3 grid(rows); + const dim3 block(threads); + const int unroll = (cols + cols_per_unroll - 1) / cols_per_unroll; + + if (unroll == 1) { + DISPATCH_UNROLL(1); + } else if (unroll == 2) { + DISPATCH_UNROLL(2); + } else if (unroll == 3) { + DISPATCH_UNROLL(3); + } else if (unroll == 4) { + DISPATCH_UNROLL(4); + } else if (unroll == 5) { + DISPATCH_UNROLL(5); + } else if (unroll == 6) { + DISPATCH_UNROLL(6); + } else if (unroll == 7) { + DISPATCH_UNROLL(7); + } else { + // TODO: provide a kernel with an outer loop to handle larger columns. + throw std::runtime_error( + "Called with more columns than supported, please report this bug and this limit will " + "be increased."); + } +} + +template +void launch_gated_activation(T* output, + const T* input, + const T* bias, + int rows, + int cols, + ActivationType act_type, + cudaStream_t stream) +{ + switch (act_type) { + case ActivationType::GEGLU: + launch_gated_activation_impl( + output, input, bias, rows, cols, stream); + break; + case ActivationType::ReGLU: + launch_gated_activation_impl( + output, input, bias, rows, cols, stream); + break; + case ActivationType::SiGLU: + launch_gated_activation_impl( + output, input, bias, rows, cols, stream); + break; + default: throw std::runtime_error("Unsupported activation type"); + } +} + +#define INSTANTIATE_FOR_TYPE(T) \ + template void launch_gated_activation(T * output, \ + const T* input, \ + const T* bias, \ + int rows, \ + int cols, \ + ActivationType act_type, \ + cudaStream_t stream); + +INSTANTIATE_FOR_TYPE(float) +INSTANTIATE_FOR_TYPE(__half) + +#ifdef BF16_AVAILABLE +INSTANTIATE_FOR_TYPE(__nv_bfloat16) +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm.h new file mode 100644 index 0000000000000000000000000000000000000000..1fc3831e908424fdf07744851b20c27e70d379dd --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm.h @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include + +void mixed_gemm(at::Tensor& output, + at::Tensor& hidden_states, + at::Tensor& weight, + at::Tensor& scales, + c10::optional& bias, + int num_bits, + int activation_raw); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..332b67cc5ae5a43815d2bb6d12581054050bced6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c79201cdf165ea93dbbb06e941566113fc367314 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .atom_builder import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e8f9ac5378a8d061dc0b2d5584e1f19d3d6d2a6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__pycache__/atom_builder.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__pycache__/atom_builder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66fdffab3663681516bcdea7afbdc0cef3faa109 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/__pycache__/atom_builder.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.cpp b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..7ad4dc5faa20821a1d720b717685a9148f7535c5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.cpp @@ -0,0 +1,53 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "atom_builder.h" +#include "attention_atom.h" +#include "ragged_dtypes.h" + +int32_t build_atoms(torch::Tensor& atoms_ten, + torch::Tensor& batch_metadata, + torch::Tensor& seq_metadata, + torch::Tensor& kv_ptrs, + const int32_t q_block_size, + const int32_t kv_block_size) +{ + const RaggedBatchDescriptor* batch_desc = + reinterpret_cast(batch_metadata.data_ptr()); + + const InflightSeqDescriptor* seq_desc = + reinterpret_cast(seq_metadata.data_ptr()); + + int32_t** kv_ptr_list = reinterpret_cast(kv_ptrs.data_ptr()); + + AttentionAtom* atoms = reinterpret_cast(atoms_ten.data_ptr()); + + int32_t n_atoms = 0; + for (int i = 0; i < batch_desc->n_sequences; i++) { + const int seq_atoms = (seq_desc[i].n_tokens + q_block_size - 1) / q_block_size; + int32_t cur_start_idx = seq_desc[i].start_idx; + int32_t global_start_idx = seq_desc[i].seen_tokens; + int32_t remaining_toks = seq_desc[i].n_tokens; + + for (int j = 0; j < seq_atoms; j++) { + atoms[n_atoms].block_idx_list = kv_ptr_list[i]; + atoms[n_atoms].q_start_idx = cur_start_idx; + atoms[n_atoms].q_len = std::min(remaining_toks, q_block_size); + atoms[n_atoms].global_q_idx = global_start_idx; + + const int32_t end_toks = global_start_idx + atoms[n_atoms].q_len; + // TODO(cmikeh2): This logic needs to be changed for sparse implementations + atoms[n_atoms].kv_blocks = (end_toks + kv_block_size - 1) / kv_block_size; + atoms[n_atoms].total_extent = end_toks; + + cur_start_idx += atoms[n_atoms].q_len; + global_start_idx += atoms[n_atoms].q_len; + remaining_toks -= atoms[n_atoms].q_len; + n_atoms++; + } + } + + return n_atoms; +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.h new file mode 100644 index 0000000000000000000000000000000000000000..a3342d0e6695b7191bbe1ccd223cb4c6c842cbc2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.h @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include + +/* +Construct the attention atoms given the ragged metadata for the current batch. +This could largely be done at the Python level, but since we pack the KV ptr +alongside the int32_t metadata, it gets very ugly to handle the mixed-width +data structures (since we're packing them in a single tensor). +*/ +int32_t build_atoms(torch::Tensor& atoms_ten, + torch::Tensor& batch_metadata, + torch::Tensor& seq_metadata, + torch::Tensor& kv_ptrs, + const int32_t q_block_size, + const int32_t kv_block_size); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.py new file mode 100644 index 0000000000000000000000000000000000000000..3355ca76c6a44533086c08bd5448e37161ecc43e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/atom_builder/atom_builder.py @@ -0,0 +1,50 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Tuple + +import torch + +from ... import DSKernelBase +from deepspeed.ops.op_builder import RaggedOpsBuilder +from ....ragged import RaggedBatchWrapper + + +class AtomBuilder(DSKernelBase): + """ + C++ implementation to populate the attention atoms for the blocked attention + kernel. + """ + + def __init__(self) -> None: + """ + Triggers compilation of the C++ implementation. + """ + inf_module = RaggedOpsBuilder().load() + self.kernel = inf_module.build_atoms + + def __call__(self, atoms: torch.Tensor, ragged_batch: RaggedBatchWrapper, q_block_size: int, + kv_block_size: int) -> Tuple[torch.Tensor, int]: + """ + Populates the attention atoms for the blocked attention kernel. + + Args: + atoms (torch.Tensor): Pre-allocated int32 tensor of shape [max_atoms, 8] + ragged_batch (torch.Tensor): Wrapper for the ragged batch. + q_block_size (int): The block size for the queries (as determined by the + attention implementation) + kv_block_size (int): The block size for the keys/values (as determined by the + attention implementation) + + Returns: + + """ + if atoms.device != torch.device("cpu"): + raise RuntimeError("AtomBuilder must be called on tensors") + + n_atoms = self.kernel(atoms, ragged_batch.batch_metadata_buffer(on_device=False), + ragged_batch.inflight_seq_descriptors(on_device=False), + ragged_batch.kv_ptrs(on_device=False), q_block_size, kv_block_size) + return atoms, n_atoms diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..abda12f9c7c4dd7785528fac2eca875ccb30ba0b Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/__pycache__/moe_gather.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/__pycache__/moe_gather.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c975afa638d9662a21800ed39c200c64bce85bc Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/__pycache__/moe_gather.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a7ca91fe536344e692fa5538e3ae2c48c6c9b418 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .moe_scatter import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..caa70b3fb8c1e1f983ae5cf854c95a192179e617 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/__pycache__/moe_scatter.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/__pycache__/moe_scatter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05fc2b1d55ad5f53c332ff8d7288cf4c7ebf1387 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/__pycache__/moe_scatter.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter.cpp b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8f7ecbd1a287264f8ab4b39cf63647fef53fed5b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter.cpp @@ -0,0 +1,67 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "moe_scatter.h" +#include + +#define DISPATCH_MOE_SCATTER(T_TYPE, C_TYPE) \ + if (activations.options().dtype() == torch::T_TYPE) { \ + launch_moe_scatter((C_TYPE*)moe_input.data_ptr(), \ + (int64_t*)expert_count_cumsums.data_ptr(), \ + (int32_t*)mapped_slots.data_ptr(), \ + (const C_TYPE*)activations.data_ptr(), \ + (const int32_t*)expert_counts.data_ptr(), \ + (const int32_t*)assignments.data_ptr(), \ + (const int32_t*)offsets.data_ptr(), \ + n_channels, \ + n_tokens, \ + n_experts, \ + n_top_k, \ + at::cuda::getCurrentCUDAStream()); \ + return; \ + } + +/* +Performs a cumsum on the expert counts and copies the hidden states to the +appropriate spot to ensure that each experts inputs are contiguous. +*/ +void moe_scatter(torch::Tensor& moe_input, + torch::Tensor& expert_count_cumsums, + torch::Tensor& mapped_slots, + torch::Tensor& activations, + torch::Tensor& expert_counts, + torch::Tensor& assignments, + torch::Tensor& offsets) +{ + const int32_t n_tokens = activations.size(0); + const int32_t n_channels = activations.size(1); + const int32_t n_top_k = assignments.size(1); + + // Should have a lot of matching buffer sizes here. + TORCH_CHECK(n_tokens == assignments.size(0)); + TORCH_CHECK(n_tokens == offsets.size(0)); + TORCH_CHECK(n_channels == moe_input.size(1)); + + TORCH_CHECK(n_top_k == offsets.size(1)); + TORCH_CHECK(n_top_k * n_tokens == moe_input.size(0)); + TORCH_CHECK(n_top_k == mapped_slots.size(1)); + + const int32_t n_experts = expert_count_cumsums.size(0); + + TORCH_CHECK(moe_input.scalar_type() == activations.scalar_type()); + TORCH_CHECK(expert_count_cumsums.scalar_type() == torch::kInt64); + TORCH_CHECK(mapped_slots.scalar_type() == torch::kInt32); + TORCH_CHECK(expert_counts.scalar_type() == torch::kInt32); + TORCH_CHECK(assignments.scalar_type() == torch::kInt32); + TORCH_CHECK(offsets.scalar_type() == torch::kInt32); + + DISPATCH_MOE_SCATTER(kHalf, __half); + +#ifdef BF16_AVAILABLE + DISPATCH_MOE_SCATTER(kBFloat16, __nv_bfloat16); +#endif + + TORCH_CHECK(false, "Unsupported dtype for moe_scatter") +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter.cuh b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter.cuh new file mode 100644 index 0000000000000000000000000000000000000000..d9756c80f05a2254ba7c806b4dd9a51ff06197a3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter.cuh @@ -0,0 +1,23 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include "ds_kernel_utils.h" +#include "ragged_dtypes.h" + +template +void launch_moe_scatter(T* moe_input, + int64_t* expert_count_cumsums, + int32_t* mapped_slots, + const T* activations, + const int32_t* expert_counts, + const int32_t* assignments, + const int32_t* offsets, + const int32_t n_channels, + const int32_t n_tokens, + const int32_t n_experts, + const int32_t n_top_k, + cudaStream_t stream); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter.h new file mode 100644 index 0000000000000000000000000000000000000000..59597f63d12348b179908bd70d74f1e12696f326 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter.h @@ -0,0 +1,23 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include "moe_scatter.cuh" +#include "ragged_dtypes.h" + +/* +Performs a cumsum on the expert counts and copies the hidden states to the +appropriate spot to ensure that each experts inputs are contiguous. +*/ +void moe_scatter(torch::Tensor& moe_input, + torch::Tensor& expert_count_cumsums, + torch::Tensor& mapped_slots, + torch::Tensor& activations, + torch::Tensor& expert_counts, + torch::Tensor& assignments, + torch::Tensor& offsets); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter.py new file mode 100644 index 0000000000000000000000000000000000000000..7efcedb4e8809c097bc59ace63be2e0c3cb44aba --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter.py @@ -0,0 +1,55 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from typing import Tuple + +from ... import DSKernelBase +from ....inference_utils import DtypeEnum +from deepspeed.ops.op_builder import RaggedOpsBuilder + + +class MoEScatter(DSKernelBase): + """ + CUDA implementation of MoE scatter + """ + + supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16] + + def __init__(self, dtype: DtypeEnum, channels: int) -> None: + + if not isinstance(dtype, DtypeEnum): + dtype = DtypeEnum(dtype) + + if dtype not in MoEScatter.supported_dtypes: + raise RuntimeError(f"Unsupported dtype {dtype}") + + if channels % 8 != 0: + raise RuntimeError(f"Channels {channels} must be divisible by 8") + + inf_module = RaggedOpsBuilder().load() + self.kernel = inf_module.moe_scatter + + def __call__(self, moe_input: torch.Tensor, expert_cumsum: torch.Tensor, mapped_slots: torch.Tensor, + activations: torch.Tensor, expert_counts: torch.Tensor, assignments: torch.Tensor, + offsets: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Scatters the hidden states such that the token stride for each expert's input is contiguous. + + Arguments: + moe_input (torch.Tensor): The direct input for the MoE GEMM of shape [n_tokens * n_top_k, hidden_size]. + expert_cumsum (torch.Tensor): The cumulative sum of the expert counts of shape [n_experts]. + mapped_slots (torch.Tensor): The index of the token in the expert's input of shape [n_tokens, n_top_k]. + hidden_states (torch.Tensor): The hidden states of shape [n_tokens, hidden_size]. + expert_counts (torch.Tensor): The number of tokens assigned to each expert of shape [n_experts]. + assignments (torch.Tensor): The expert assignments of shape [n_tokens, n_top_k]. + offsets (torch.Tensor): The offsets into the expert for a given token of shape [n_tokens, n_top_K]. + + Returns: + Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: The MoE input (with scattered values), the cumsum of the offsets (for the MoE kernels themselves), and the assignments Tensor modified in place to show which row that token was mapped to in the input. + """ + self.kernel(moe_input, expert_cumsum, mapped_slots, activations, expert_counts, assignments, offsets) + return moe_input, expert_cumsum, mapped_slots diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter_cuda.cu b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..d3eb4f649e79ee1dc30f26b16c16b0328bf84cb1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_scatter/moe_scatter_cuda.cu @@ -0,0 +1,216 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "ds_kernel_utils.h" +#include "reduction_utils.h" +#include "top_k_gating.cuh" +#include "top_k_utils.h" + +using ROp = reduce::ROpType; + +namespace scatter { + +constexpr int access_granularity = 16; +constexpr int threads = 256; +constexpr int warps = threads / hw_warp_size; +constexpr int max_experts = 1024; + +} // namespace scatter + +template +__global__ void moe_scatter_kernel(T* moe_input, + int64_t* expert_count_cumsums, + int32_t* mapped_slots, + const T* activations, + const int32_t* assignments, + const int32_t* expert_counts, + const int32_t* offsets, + const int32_t n_channels, + const int32_t n_experts) +{ + constexpr int32_t vector_size = scatter::access_granularity / sizeof(T); + constexpr int32_t load_stride = vector_size * scatter::threads; + + const int32_t token_idx = blockIdx.x; + const int32_t tidx = threadIdx.x; + const int32_t warp_rank = tidx / hw_warp_size; + + // Bank aligned and sufficient + __shared__ int32_t red_buffer[32]; + __shared__ int32_t expert_offsets[scatter::max_experts]; + + // CG helpers + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + // Fetch the assigned experts for this token. + int assigned_experts[N_TOP_K]; + for (int i = 0; i < N_TOP_K; i++) { + assigned_experts[i] = assignments[token_idx * N_TOP_K + i]; + } + + bool all_unassigned = true; + for (int i = 0; i < N_TOP_K; i++) { + if (assigned_experts[i] != gating::unassigned) { + all_unassigned = false; + } else { + mapped_slots[token_idx * N_TOP_K + i] = gating::unassigned; + } + } + if (all_unassigned && token_idx != 0) return; + + // Do a prefix scan on the expert counts to get the base offsets. Here we use the + // single up-sweep variant. + int32_t expert_vals; + if (tidx < n_experts) { + expert_vals = expert_counts[tidx]; + } else { + expert_vals = 0; + } + +#pragma unroll + for (int i = 1; i < hw_warp_size; i *= 2) { + int32_t maybe_add = warp.shfl_up(expert_vals, i); + expert_vals = (warp.thread_rank() < i) ? expert_vals : expert_vals + maybe_add; + } + + if (warp.thread_rank() == hw_warp_size - 1) { + mem_access::store_shared<4>(red_buffer + warp_rank, &expert_vals); + } + + tb.sync(); + + int32_t phase_2_val = 0; + if (warp.thread_rank() < scatter::warps) { + mem_access::load_shared<4>(&phase_2_val, red_buffer + warp.thread_rank()); + } + +#pragma unroll + for (int i = 1; i < hw_warp_size; i *= 2) { + int32_t maybe_add = warp.shfl_up(phase_2_val, i); + phase_2_val = (warp.thread_rank() < i) ? phase_2_val : phase_2_val + maybe_add; + } + + int warp_offset = 0; + if (warp_rank > 0) { warp_offset = warp.shfl(phase_2_val, warp_rank - 1); } + const int32_t expert_cumsum = warp_offset + expert_vals; + + // Token 0 will write the + if (token_idx == 0 && tidx < n_experts) { + int64_t expert_cumsum_64 = (int64_t)expert_cumsum; + expert_count_cumsums[tidx] = expert_cumsum_64; + } + + // Since token 0 has now written the expert cumsum to global memory, + // if it has no valid experts, we can early return. + if (token_idx == 0 && all_unassigned) return; + + if (tidx < n_experts) { expert_offsets[tidx] = expert_cumsum; } + + // Ensure all the expert offsets are written in shared memory. + tb.sync(); + + // Data copy to appropriate location + const int32_t thread_offset = tidx * vector_size; + + const int32_t base_load_offset = token_idx * n_channels + thread_offset; + const T* load_base_ptr = activations + base_load_offset; + + int32_t store_rows[N_TOP_K]; + T* store_base_ptrs[N_TOP_K]; +#pragma unroll + for (int i = 0; i < N_TOP_K; i++) { + const int32_t cur_expert_offset = + (assigned_experts[i] > 0) ? expert_offsets[assigned_experts[i] - 1] : 0; + store_rows[i] = cur_expert_offset + offsets[token_idx * N_TOP_K + i]; + const int32_t base_store_offset = store_rows[i] * n_channels + thread_offset; + store_base_ptrs[i] = moe_input + base_store_offset; + } + +#pragma unroll + for (int i = 0; i < copyUnroll; i++) { + T tmp_buf[vector_size]; + + if (i * load_stride + thread_offset < n_channels) { + mem_access::load_global(tmp_buf, + load_base_ptr + i * load_stride); +#pragma unroll + for (int j = 0; j < N_TOP_K; j++) { + mem_access::store_global( + store_base_ptrs[j] + i * load_stride, tmp_buf); + } + } + } + + if (threadIdx.x == 0) { + for (int i = 0; i < N_TOP_K; i++) { mapped_slots[token_idx * N_TOP_K + i] = store_rows[i]; } + } +} + +#define LAUNCH_FOR_UNROLL(COUNT) \ + case COUNT: \ + moe_scatter_kernel \ + <<>>(moe_input, \ + expert_count_cumsums, \ + mapped_slots, \ + activations, \ + assignments, \ + expert_counts, \ + offsets, \ + n_channels, \ + n_experts); \ + break; + +template +void launch_moe_scatter(T* moe_input, + int64_t* expert_count_cumsums, + int32_t* mapped_slots, + const T* activations, + const int32_t* expert_counts, + const int32_t* assignments, + const int32_t* offsets, + const int32_t n_channels, + const int32_t n_tokens, + const int32_t n_experts, + const int32_t n_top_k, + cudaStream_t stream) +{ + constexpr int vals_per_unroll = scatter::threads * scatter::access_granularity / sizeof(T); + const int copy_unroll = (n_channels + vals_per_unroll - 1) / vals_per_unroll; + + const dim3 block(scatter::threads); + const dim3 grid(n_tokens); + + TOP_K_SWITCH(n_top_k, [&] { + switch (copy_unroll) { + LAUNCH_FOR_UNROLL(1); + LAUNCH_FOR_UNROLL(2); + LAUNCH_FOR_UNROLL(3); + LAUNCH_FOR_UNROLL(4); + LAUNCH_FOR_UNROLL(5); + LAUNCH_FOR_UNROLL(6); + } + }); +} + +#define INSTANTIATE_SCATTER_FOR_TYPE(TYPE) \ + template void launch_moe_scatter(TYPE*, \ + int64_t*, \ + int32_t*, \ + const TYPE*, \ + const int32_t*, \ + const int32_t*, \ + const int32_t*, \ + const int32_t, \ + const int32_t, \ + const int32_t, \ + const int32_t, \ + cudaStream_t); + +INSTANTIATE_SCATTER_FOR_TYPE(__half); + +#ifdef BF16_AVAILABLE +INSTANTIATE_SCATTER_FOR_TYPE(__nv_bfloat16); +#endif diff --git a/venv/lib/python3.10/site-packages/numexpr/interpreter.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/numexpr/interpreter.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..eb598a12e82e671893c024b0248da2093076c3d9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/numexpr/interpreter.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bcb0c6481fb539a0d205a98b8ad9860f3226a3b80ec3dd81200970415199cab5 +size 1150680 diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e64286904927cdd471a30892f0414907df349fa5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4565fa0d0dc440d074193a89770c505a2a17463dcfd6346e83da0858f22f1b1e +size 2066281 diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_15_data.npz b/venv/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_15_data.npz new file mode 100644 index 0000000000000000000000000000000000000000..660bbb41b7fad43ed945dc701693451ceb60166c --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_15_data.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:13f3e1491a876bbf59d7ea10ad29c1f9b5996a2ab99216f31d5bfcd659012c1e +size 34462 diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_18_data.npz b/venv/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_18_data.npz new file mode 100644 index 0000000000000000000000000000000000000000..0b3d569a1a65e9b5ff153ae4121a6a5a69409f7c --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_18_data.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59f839467f2752b7df6fb6d4094396edd32a5929b764f7ffa1e6666431e6cac6 +size 161487 diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_20_data.npz b/venv/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_20_data.npz new file mode 100644 index 0000000000000000000000000000000000000000..87266deb46238307347362b63a4878f2565baf56 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_20_data.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:14e222d34a7118c7284a1675c6feceee77b84df951a5c6ba2a5ee9ff3054fa1d +size 31231 diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/data/gendare_20170120_data.npz b/venv/lib/python3.10/site-packages/scipy/linalg/tests/data/gendare_20170120_data.npz new file mode 100644 index 0000000000000000000000000000000000000000..ff967f2ca0d0868aacf7d7e67402599e64bab817 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/data/gendare_20170120_data.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3dfab451d9d5c20243e0ed85cd8b6c9657669fb9a0f83b5be165585783d55b5 +size 2164 diff --git a/venv/lib/python3.10/site-packages/sklearn/_loss/_loss.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/sklearn/_loss/_loss.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..dbf1b5a0574c13c51acfc57c6313fe8b0b85cef0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/_loss/_loss.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7b822564bd165be8bd8e13af19b612395f8cae70df94ade213ae1c63063078e0 +size 3043289