applied-ai-018 commited on
Commit
7c7d1b4
·
verified ·
1 Parent(s): 8ba0881

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/__init__.py +11 -0
  3. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/__pycache__/__init__.cpython-310.pyc +0 -0
  4. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/__init__.py +6 -0
  5. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/__pycache__/__init__.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/__pycache__/bias_activation.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/bias_activation.cpp +68 -0
  8. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/bias_activation.h +22 -0
  9. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/bias_activation.py +62 -0
  10. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/bias_activation_cuda.cu +140 -0
  11. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/__init__.py +6 -0
  12. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/__pycache__/__init__.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/__pycache__/blas_linear.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/blas.h +138 -0
  15. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/blas_linear.py +55 -0
  16. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/blas_utils.h +275 -0
  17. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/core_ops.cpp +42 -0
  18. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__init__.py +8 -0
  19. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/__init__.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_fp_ln_base.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_ln.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_post_ln.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_pre_ln.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_fp_ln_base.py +37 -0
  25. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_ln.py +30 -0
  26. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_post_ln.py +34 -0
  27. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_pre_ln.py +39 -0
  28. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.cpp +102 -0
  29. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.h +67 -0
  30. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm_cuda.cu +490 -0
  31. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/__init__.py +6 -0
  32. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/__pycache__/__init__.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/__pycache__/cuda_linear.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/cuda_linear.py +207 -0
  35. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/kernel_reduction.cuh +43 -0
  36. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/ptx_cp.async.cuh +71 -0
  37. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels.cpp +224 -0
  38. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels.h +28 -0
  39. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels_cuda.cu +318 -0
  40. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels_cuda.h +51 -0
  41. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__init__.py +7 -0
  42. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__pycache__/__init__.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__pycache__/rms_norm.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__pycache__/rms_norm_base.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__pycache__/rms_pre_norm.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm.cpp +123 -0
  47. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm.h +33 -0
  48. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm.py +28 -0
  49. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm_base.py +37 -0
  50. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm_cuda.cu +262 -0
.gitattributes CHANGED
@@ -59,3 +59,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
  venv/bin/python3.10 filter=lfs diff=lfs merge=lfs -text
61
  venv/bin/python3 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
  venv/bin/python3.10 filter=lfs diff=lfs merge=lfs -text
61
  venv/bin/python3 filter=lfs diff=lfs merge=lfs -text
62
+ venv/lib/python3.10/site-packages/scipy/linalg/_flapack.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
63
+ venv/lib/python3.10/site-packages/numexpr/interpreter.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
64
+ venv/lib/python3.10/site-packages/sklearn/_loss/_loss.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .bias_activations import *
7
+ from .blas_kernels import *
8
+ from .cuda_layer_norm import *
9
+ from .cuda_rms_norm import *
10
+ from .gated_activations import *
11
+ from .cuda_linear import *
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (368 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .bias_activation import *
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (256 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/__pycache__/bias_activation.cpython-310.pyc ADDED
Binary file (2.55 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/bias_activation.cpp ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "bias_activation.h"
7
+ #include <c10/cuda/CUDAStream.h>
8
+ #include "ds_kernel_utils.h"
9
+
10
+ #ifdef BF16_AVAILABLE
11
+ #define DTYPE_SWITCH(DTYPE, ...) \
12
+ [&] { \
13
+ if (DTYPE == torch::kFloat16) { \
14
+ using scalar_t = __half; \
15
+ return __VA_ARGS__(); \
16
+ } else if (DTYPE == torch::kBFloat16) { \
17
+ using scalar_t = __nv_bfloat16; \
18
+ return __VA_ARGS__(); \
19
+ } else { \
20
+ TORCH_CHECK(false, "Unsupported dtype for BiasActivation"); \
21
+ } \
22
+ }()
23
+ #else
24
+ #define DTYPE_SWITCH(DTYPE, ...) \
25
+ [&] { \
26
+ if (DTYPE == torch::kFloat16) { \
27
+ using scalar_t = __half; \
28
+ return __VA_ARGS__(); \
29
+ } else { \
30
+ TORCH_CHECK(false, "Unsupported dtype for BiasActivation"); \
31
+ } \
32
+ }()
33
+ #endif
34
+
35
+ /*
36
+ In-place bias and activation fusion kernel.
37
+ */
38
+ void bias_activation(torch::Tensor& activation,
39
+ c10::optional<torch::Tensor>& bias,
40
+ const int32_t act_type)
41
+ {
42
+ const ActivationType atype = static_cast<ActivationType>(act_type);
43
+ const int32_t rows = activation.size(0);
44
+ const int32_t cols = activation.size(1);
45
+
46
+ TORCH_CHECK(atype == ActivationType::GELU || atype == ActivationType::RELU ||
47
+ atype == ActivationType::SILU || atype == ActivationType::IDENTITY,
48
+ "Unsupported activation type for BiasActivation");
49
+ TORCH_CHECK(activation.dim() == 2, "BiasActivation only supports 2D activation tensors");
50
+
51
+ DTYPE_SWITCH(activation.scalar_type(), [&] {
52
+ scalar_t* activation_ptr = reinterpret_cast<scalar_t*>(activation.data_ptr());
53
+
54
+ const scalar_t* bias_ptr;
55
+ if (bias.has_value()) {
56
+ TORCH_CHECK(activation.scalar_type() == bias.value().scalar_type(),
57
+ "BiasActivation activation and bias must have same dtype");
58
+ bias_ptr = reinterpret_cast<const scalar_t*>(bias.value().data_ptr());
59
+ } else {
60
+ bias_ptr = nullptr;
61
+ }
62
+
63
+ if (atype == ActivationType::IDENTITY && bias_ptr == nullptr) { return; }
64
+
65
+ launch_bias_activation<scalar_t>(
66
+ activation_ptr, bias_ptr, rows, cols, atype, c10::cuda::getCurrentCUDAStream());
67
+ });
68
+ }
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/bias_activation.h ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <c10/cuda/CUDAStream.h>
9
+ #include <torch/extension.h>
10
+ #include "activation_type.h"
11
+
12
+ template <typename T>
13
+ void launch_bias_activation(T* activation,
14
+ const T* bias,
15
+ const int32_t n_rows,
16
+ const int32_t n_cols,
17
+ const ActivationType activation_type,
18
+ cudaStream_t stream);
19
+
20
+ void bias_activation(torch::Tensor& activation,
21
+ c10::optional<torch::Tensor>& bias,
22
+ const int32_t activation_type);
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/bias_activation.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Optional
7
+
8
+ import torch
9
+
10
+ from ....inference_utils import ActivationType, DtypeEnum
11
+ from deepspeed.ops.op_builder import InferenceCoreBuilder
12
+ from ... import DSKernelBase
13
+
14
+
15
+ class CUDABiasActivation(DSKernelBase):
16
+ """
17
+ CUDA implementation of bias activation kernel. This kernel should be deprecated once
18
+ we are fusing the bias activation into the linear kernel in all scenarios.
19
+ """
20
+
21
+ supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16]
22
+ supported_act_fns = [ActivationType.IDENTITY, ActivationType.GELU, ActivationType.RELU, ActivationType.SILU]
23
+
24
+ def __init__(self, channels: int, dtype: DtypeEnum, act_fn: ActivationType) -> None:
25
+ """
26
+ Compile and validate for the fused bias-activation kernel.
27
+
28
+ Parameters:
29
+ channels (int): Number of channels to expect in the activation.
30
+ dtype (torch.dtype): Data type for the input/output. Supported values
31
+ are DtypeEnum.fp16 and DtypeEnum.bf16.
32
+ act_fn (ActivationType): Activation function to use. Only IDENTITY, GELU, RELU, and SILU are supported.
33
+ """
34
+
35
+ if channels % 8 != 0:
36
+ raise ValueError("channels must be divisible by 8")
37
+
38
+ if DtypeEnum(dtype) not in CUDABiasActivation.supported_dtypes:
39
+ raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format(
40
+ dtype, CUDABiasActivation.supported_dtypes))
41
+
42
+ act_fn = ActivationType(act_fn)
43
+ if act_fn not in CUDABiasActivation.supported_act_fns:
44
+ raise ValueError("Unsupported activation function: {}, supported_act_fns are {}".format(
45
+ act_fn, CUDABiasActivation.supported_act_fns))
46
+
47
+ inf_module = InferenceCoreBuilder().load()
48
+ self.kernel = inf_module.bias_activation
49
+ self.act_fn = act_fn
50
+
51
+ def __call__(self, activation: torch.Tensor, bias: Optional[torch.Tensor] = None) -> torch.Tensor:
52
+ """
53
+ Add an optional bias and perform the non-linear activation function.
54
+
55
+ Parameters:
56
+ activation (torch.Tensor): Input tensor of shape [tokens, channels]
57
+ bias (torch.Tensor): Optional bias tensor of shape [channels]
58
+
59
+ Returns:
60
+ activation that has been updated in-place
61
+ """
62
+ self.kernel(activation, bias, self.act_fn.value)
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/bias_activations/bias_activation_cuda.cu ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <cassert>
7
+ #include "activation_type.h"
8
+ #include "conversion_utils.h"
9
+ #include "ds_kernel_utils.h"
10
+ #include "memory_access_utils.h"
11
+
12
+ // Default activation function will error out
13
+ template <ActivationType ActType>
14
+ DS_D_INLINE float act_fn(float val);
15
+
16
+ template <>
17
+ DS_D_INLINE float act_fn<ActivationType::IDENTITY>(float val)
18
+ {
19
+ return val;
20
+ }
21
+
22
+ template <>
23
+ DS_D_INLINE float act_fn<ActivationType::RELU>(float val)
24
+ {
25
+ return val > 0.0f ? val : 0.0f;
26
+ }
27
+
28
+ template <>
29
+ DS_D_INLINE float act_fn<ActivationType::GELU>(float val)
30
+ {
31
+ constexpr float sqrt_param = 0.79788456080286535587989211986876f;
32
+ constexpr float mul_param = 0.044715f;
33
+ return val * 0.5f * (1.0f + tanhf(sqrt_param * (val + mul_param * val * val * val)));
34
+ }
35
+
36
+ template <>
37
+ DS_D_INLINE float act_fn<ActivationType::SILU>(float val)
38
+ {
39
+ return val / (1.0f + expf(-val));
40
+ }
41
+
42
+ namespace bias_act {
43
+
44
+ constexpr int access_size = 16;
45
+ constexpr int threads = 512;
46
+ constexpr int unroll = 4;
47
+
48
+ } // namespace bias_act
49
+
50
+ template <typename T, ActivationType ActType>
51
+ __global__ void bias_activation_kernel(T* activation,
52
+ const T* bias,
53
+ const int32_t rows,
54
+ const int32_t cols)
55
+ {
56
+ constexpr int vector_T = bias_act::access_size / sizeof(T);
57
+
58
+ const int32_t thread_offset = threadIdx.x * vector_T;
59
+ const int32_t block_offset = blockIdx.x * vector_T * bias_act::unroll * bias_act::threads;
60
+ const int32_t base_offset = block_offset + thread_offset;
61
+
62
+ const int32_t thread_stride = bias_act::threads * vector_T;
63
+
64
+ #pragma unroll
65
+ for (int i = 0; i < bias_act::unroll; i++) {
66
+ const int32_t iter_offset = base_offset + i * thread_stride;
67
+
68
+ const int32_t row = iter_offset / cols;
69
+
70
+ T buffer[vector_T];
71
+ T bias_buffer[vector_T];
72
+
73
+ if (row < rows) {
74
+ const int32_t col = iter_offset % cols;
75
+
76
+ mem_access::load_global<bias_act::access_size>(buffer, activation + iter_offset);
77
+ mem_access::load_global<bias_act::access_size>(
78
+ bias_buffer, bias + col, bias != nullptr);
79
+
80
+ #pragma unroll
81
+ for (int j = 0; j < vector_T; j++) {
82
+ float val =
83
+ conversion::to<float>(buffer[j]) + conversion::to<float>(bias_buffer[j]);
84
+ buffer[j] = conversion::to<T>(act_fn<ActType>(val));
85
+ }
86
+
87
+ mem_access::store_global<bias_act::access_size>(activation + iter_offset, buffer);
88
+ }
89
+ }
90
+ }
91
+
92
+ #define ACT_TYPE_SWITCH(ACT_TYPE, ...) \
93
+ if (ACT_TYPE == ActivationType::IDENTITY) { \
94
+ constexpr ActivationType act_fn_t = ActivationType::IDENTITY; \
95
+ return __VA_ARGS__(); \
96
+ } else if (ACT_TYPE == ActivationType::RELU) { \
97
+ constexpr ActivationType act_fn_t = ActivationType::RELU; \
98
+ return __VA_ARGS__(); \
99
+ } else if (ACT_TYPE == ActivationType::GELU) { \
100
+ constexpr ActivationType act_fn_t = ActivationType::GELU; \
101
+ return __VA_ARGS__(); \
102
+ } else if (ACT_TYPE == ActivationType::SILU) { \
103
+ constexpr ActivationType act_fn_t = ActivationType::SILU; \
104
+ return __VA_ARGS__(); \
105
+ } else { \
106
+ assert(false); \
107
+ }
108
+
109
+ template <typename T>
110
+ void launch_bias_activation(T* activation,
111
+ const T* bias,
112
+ const int32_t n_rows,
113
+ const int32_t n_cols,
114
+ const ActivationType activation_type,
115
+ cudaStream_t stream)
116
+ {
117
+ constexpr int32_t elems_per_block =
118
+ bias_act::threads * bias_act::unroll * bias_act::access_size / sizeof(T);
119
+ const int32_t total_elems = n_rows * n_cols;
120
+
121
+ const int32_t blocks = (total_elems + elems_per_block - 1) / elems_per_block;
122
+
123
+ const dim3 grid(blocks);
124
+ const dim3 block(bias_act::threads);
125
+
126
+ ACT_TYPE_SWITCH(activation_type, [&] {
127
+ bias_activation_kernel<T, act_fn_t>
128
+ <<<grid, block, 0, stream>>>(activation, bias, n_rows, n_cols);
129
+ });
130
+ }
131
+
132
+ #define INSTANTIATE_FOR_T(T) \
133
+ template void launch_bias_activation<T>( \
134
+ T*, const T*, const int32_t, const int32_t, const ActivationType, cudaStream_t);
135
+
136
+ INSTANTIATE_FOR_T(__half);
137
+
138
+ #ifdef BF16_AVAILABLE
139
+ INSTANTIATE_FOR_T(__nv_bfloat16);
140
+ #endif
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .blas_linear import *
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (248 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/__pycache__/blas_linear.cpython-310.pyc ADDED
Binary file (2.26 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/blas.h ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <c10/cuda/CUDAStream.h>
9
+ #include <torch/extension.h>
10
+ #include <cstdio>
11
+ #include "blas_utils.h"
12
+
13
+ #define DISPATCH_BLAS_MATMUL(T_TYPE, C_TYPE) \
14
+ if (output.options().dtype() == torch::T_TYPE) { \
15
+ blas_gemm_ex(output.data_ptr(), \
16
+ (const void*)weights.data_ptr(), \
17
+ (const void*)hidden_states.data_ptr(), \
18
+ m, \
19
+ n, \
20
+ k, \
21
+ lda, \
22
+ ldb, \
23
+ ldc, \
24
+ trans_a, \
25
+ trans_b, \
26
+ &alpha, \
27
+ &beta, \
28
+ C_TYPE); \
29
+ }
30
+
31
+ void blas_linear(at::Tensor& output, at::Tensor& hidden_states, at::Tensor& weights)
32
+ {
33
+ /*
34
+ Expected shape: output([total_tokens_across_dims], out_neurons)
35
+ hidden_states([total_tokens_across_dims], in_neurons)
36
+ weights(out_neurons, in_neurons)
37
+
38
+ We are going to assume contiguous for the above shapes.
39
+
40
+ The shapes are going to get messed with a little internally to handle column-major
41
+ GEMMs.
42
+ */
43
+
44
+ // Number of tokens is N (since the GEMM output is column-major but our Tensor
45
+ // is row-major, we need to transpose the shapes)
46
+ const int n = output.numel() / output.size(-1);
47
+ const int k = weights.size(1);
48
+ const int m = weights.size(0);
49
+
50
+ // A strides
51
+ const bool trans_a = weights.stride(1) == 1;
52
+ const int lda = (trans_a) ? weights.stride(0) : weights.stride(1);
53
+
54
+ // B strides
55
+ const bool trans_b = hidden_states.stride(-1) != 1;
56
+ const int ldb = (trans_b) ? hidden_states.stride(-1) : hidden_states.stride(-2);
57
+
58
+ // C strides
59
+ const int ldc = output.stride(-2);
60
+
61
+ const float alpha = 1.0f;
62
+ const float beta = 0.0f;
63
+
64
+ TORCH_CHECK(output.scalar_type() == hidden_states.scalar_type(),
65
+ "Output and hidden states must have the same scalar type");
66
+ TORCH_CHECK(output.scalar_type() == weights.scalar_type(),
67
+ "Output and weights must have the same scalar type");
68
+
69
+ // Dispatch the datatypes
70
+ DISPATCH_BLAS_MATMUL(kFloat, BlasType::FP32);
71
+ DISPATCH_BLAS_MATMUL(kHalf, BlasType::FP16);
72
+ #ifdef BF16_AVAILABLE
73
+ DISPATCH_BLAS_MATMUL(kBFloat16, BlasType::BF16);
74
+ #endif
75
+ }
76
+
77
+ #define DISPATCH_4D_BLAS(T_TYPE, C_TYPE) \
78
+ if (C.options().dtype() == torch::T_TYPE) { \
79
+ blas_strided_batched_gemm(C.data_ptr(), \
80
+ (const void*)A.data_ptr(), \
81
+ (const void*)B.data_ptr(), \
82
+ m, \
83
+ n, \
84
+ k, \
85
+ lda, \
86
+ ldb, \
87
+ ldc, \
88
+ trans_a, \
89
+ trans_b, \
90
+ &alpha, \
91
+ &beta, \
92
+ stride_a, \
93
+ stride_b, \
94
+ stride_c, \
95
+ batch, \
96
+ C_TYPE); \
97
+ }
98
+
99
+ void blas_4d_matmul(at::Tensor& C, at::Tensor& B, at::Tensor& A)
100
+ {
101
+ /*
102
+ C shape: (batch_size, N, M)
103
+ A shape: (batch_size, N, K)
104
+ B shape: (batch_size, K, M)
105
+ */
106
+
107
+ const int n = C.size(-2);
108
+ const int k = C.size(-1);
109
+ const int m = B.size(-1);
110
+
111
+ // A strides
112
+ const bool trans_a = A.stride(-1) == 1;
113
+ const int lda = (trans_a) ? A.stride(-2) : A.stride(-1);
114
+ const int stride_a = A.stride(-3);
115
+
116
+ // B strides
117
+ const bool trans_b = B.stride(-1) != 1;
118
+ const int ldb = (trans_b) ? B.stride(-1) : B.stride(-2);
119
+ const int stride_b = B.stride(-3);
120
+
121
+ // C strides
122
+ const int ldc = C.stride(-2);
123
+ const int stride_c = C.stride(-3);
124
+
125
+ const float alpha = 1.0f;
126
+ const float beta = 0.0f;
127
+
128
+ const int batch = C.numel() / (n * m);
129
+
130
+ // Dispatch the datatypes
131
+ DISPATCH_4D_BLAS(kFloat, BlasType::FP32);
132
+ DISPATCH_4D_BLAS(kHalf, BlasType::FP16);
133
+ #ifdef BF16_AVAILABLE
134
+ DISPATCH_4D_BLAS(kBFloat16, BlasType::BF16);
135
+ #endif
136
+ }
137
+
138
+ void create_handle() { BlasContext::getInstance().get_handle(); }
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/blas_linear.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from ....inference_utils import DtypeEnum
9
+ from deepspeed.ops.op_builder import InferenceCoreBuilder
10
+ from ... import DSKernelBase
11
+
12
+
13
+ class BlasLibLinear(DSKernelBase):
14
+ """
15
+ Wrapper around the BLAS matmul kernel for FP16/BF16/FP32 for CUDA/RoCM.
16
+
17
+ Performs z = x @ y
18
+ """
19
+
20
+ supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16, DtypeEnum.fp32]
21
+
22
+ def __init__(self, fp_dtype: DtypeEnum):
23
+ """
24
+ Parameters:
25
+ fp_dtype (torch.dtype): Data type for the input/output. Supported values
26
+ are torch.float16, torch.bfloat16, and torch.float32.
27
+ """
28
+ fp_dtype = DtypeEnum(fp_dtype)
29
+ if fp_dtype not in BlasLibLinear.supported_dtypes:
30
+ raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format(
31
+ fp_dtype, BlasLibLinear.supported_dtypes))
32
+
33
+ self.inf_module = InferenceCoreBuilder().load()
34
+ self.inf_module.create_handle()
35
+ self.kernel = self.inf_module.blas_linear
36
+
37
+ def __call__(self, output: torch.Tensor, hidden_states: torch.Tensor, weights: torch.Tensor) -> torch.Tensor:
38
+ """
39
+ Matmul kernel as implemented by platform BLAS library. The input must be 2D or larger. If
40
+ n-dimensional, the leading dimensions are folded into each other:
41
+ 2D: m = x.size(0)
42
+ 3D: m = x.size(0) * x.size(1)
43
+ 4D: m = x.size(0) * x.size(1) * x.size(2) (etc...)
44
+ All inputs should be contiguous.
45
+
46
+ Parameters:
47
+ output (torch.Tensor): Output tensor. Shape is of [*, out_features]
48
+ hidden_states (torch.Tensor): Input tensor. Shape is of [*, in_features]
49
+ weights (torch.Tensor): Input tensor. Shape is of [out_features, in_features]
50
+
51
+ Returns:
52
+ z (torch.Tensor): Output tensor. Shape is of [m, n]
53
+ """
54
+ self.kernel(output, hidden_states, weights)
55
+ return output
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/blas_kernels/blas_utils.h ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <assert.h>
9
+ #include <cublas_v2.h>
10
+ #include <cuda.h>
11
+ #ifdef BF16_AVAILABLE
12
+ #include <cuda_bf16.h>
13
+ #endif
14
+ #include <cuda_fp16.h>
15
+ #include <cuda_runtime.h>
16
+ #ifndef __HIP_PLATFORM_AMD__
17
+ #include <mma.h>
18
+ #endif
19
+ #include <stdio.h>
20
+ #include <iostream>
21
+ #include <stdexcept>
22
+
23
+ class BlasContext {
24
+ /*
25
+ Slim wrapper for managing the lifetime of the platform's BLAS handle. This should
26
+ be hipified for ROCm.
27
+ */
28
+ public:
29
+ BlasContext()
30
+ {
31
+ if (cublasCreate(&_handle) != CUBLAS_STATUS_SUCCESS) {
32
+ auto message = std::string("Fail to create cublas handle.");
33
+ std::cerr << message << std::endl;
34
+ throw std::runtime_error(message);
35
+ }
36
+ #ifndef __HIP_PLATFORM_AMD__
37
+ cublasSetMathMode(_handle, CUBLAS_TENSOR_OP_MATH);
38
+ #endif
39
+ }
40
+
41
+ virtual ~BlasContext() { cublasDestroy(_handle); }
42
+
43
+ static BlasContext& getInstance()
44
+ {
45
+ // Should always access the singleton through this function.
46
+ static BlasContext _instance;
47
+ return _instance;
48
+ }
49
+
50
+ cublasHandle_t get_handle() const { return _handle; }
51
+
52
+ private:
53
+ cublasHandle_t _handle;
54
+ };
55
+
56
+ enum class BlasType { FP32, FP16, BF16 };
57
+
58
+ #ifdef __HIP_PLATFORM_AMD__
59
+ rocblas_operation get_trans_op(bool do_trans)
60
+ {
61
+ return (do_trans) ? rocblas_operation_transpose : rocblas_operation_none;
62
+ }
63
+
64
+ rocblas_datatype get_datatype(BlasType type)
65
+ {
66
+ switch (type) {
67
+ case BlasType::FP32: return rocblas_datatype_f32_r;
68
+ case BlasType::FP16: return rocblas_datatype_f16_r;
69
+ case BlasType::BF16: return rocblas_datatype_bf16_r;
70
+ default: throw std::runtime_error("Unsupported BlasType");
71
+ }
72
+ }
73
+ #else
74
+ cublasOperation_t get_trans_op(bool do_trans) { return (do_trans) ? CUBLAS_OP_T : CUBLAS_OP_N; }
75
+
76
+ cublasDataType_t get_datatype(BlasType type)
77
+ {
78
+ switch (type) {
79
+ case BlasType::FP32: return CUDA_R_32F;
80
+ case BlasType::FP16: return CUDA_R_16F;
81
+ case BlasType::BF16: return CUDA_R_16BF;
82
+ default: throw std::runtime_error("Unsupported BlasType");
83
+ }
84
+ }
85
+ #endif
86
+
87
+ int blas_gemm_ex(void* C,
88
+ const void* A,
89
+ const void* B,
90
+ int m,
91
+ int n,
92
+ int k,
93
+ int lda,
94
+ int ldb,
95
+ int ldc,
96
+ bool transa,
97
+ bool transb,
98
+ const float* alpha,
99
+ const float* beta,
100
+ BlasType type)
101
+ {
102
+ #ifdef __HIP_PLATFORM_AMD__
103
+ rocblas_operation_t transa_op = get_trans_op(transa);
104
+ rocblas_operation_t transb_op = get_trans_op(transb);
105
+
106
+ rocblas_datatype_t abc_type = get_datatype(type);
107
+
108
+ rocblas_status status = rocblas_gemm_ex(BlasContext::getInstance().get_handle(),
109
+ transa_op,
110
+ transb_op,
111
+ m,
112
+ n,
113
+ k,
114
+ (const void*)alpha,
115
+ A,
116
+ abc_type,
117
+ lda,
118
+ B,
119
+ abc_type,
120
+ ldb,
121
+ (const void*)beta,
122
+ C,
123
+ abc_type,
124
+ ldc,
125
+ C,
126
+ abc_type,
127
+ ldc,
128
+ rocblas_datatype_f32_r,
129
+ rocblas_gemm_algo_standard,
130
+ 0,
131
+ 0);
132
+ #else
133
+ cublasOperation_t transa_op = get_trans_op(transa);
134
+ cublasOperation_t transb_op = get_trans_op(transb);
135
+
136
+ cublasDataType_t abc_type = get_datatype(type);
137
+ cublasStatus_t status = cublasGemmEx(BlasContext::getInstance().get_handle(),
138
+ transa_op,
139
+ transb_op,
140
+ m,
141
+ n,
142
+ k,
143
+ (const void*)alpha,
144
+ A,
145
+ abc_type,
146
+ lda,
147
+ B,
148
+ abc_type,
149
+ ldb,
150
+ (const void*)beta,
151
+ C,
152
+ abc_type,
153
+ ldc,
154
+ CUDA_R_32F,
155
+ CUBLAS_GEMM_DEFAULT_TENSOR_OP);
156
+ #endif
157
+
158
+ #ifdef __HIP_PLATFORM_AMD__
159
+ if (status != rocblas_status_success) {
160
+ #else
161
+ if (status != CUBLAS_STATUS_SUCCESS) {
162
+ #endif
163
+ fprintf(stderr,
164
+ "!!!! kernel execution error. (m: %d, n: %d, k: %d, error: %d) \n",
165
+ m,
166
+ n,
167
+ k,
168
+ (int)status);
169
+ return EXIT_FAILURE;
170
+ }
171
+ return 0;
172
+ }
173
+
174
+ int blas_strided_batched_gemm(void* C,
175
+ const void* A,
176
+ const void* B,
177
+ int m,
178
+ int n,
179
+ int k,
180
+ int lda,
181
+ int ldb,
182
+ int ldc,
183
+ bool transa,
184
+ bool transb,
185
+ const float* alpha,
186
+ const float* beta,
187
+ int stride_A,
188
+ int stride_B,
189
+ int stride_C,
190
+ int batch,
191
+ BlasType type)
192
+ {
193
+ #ifdef __HIP_PLATFORM_AMD__
194
+ rocblas_operation_t transa_op = get_trans_op(transa);
195
+ rocblas_operation_t transb_op = get_trans_op(transb);
196
+
197
+ rocblas_datatype_t abc_type = get_datatype(type);
198
+
199
+ rocblas_status status =
200
+ rocblas_gemm_strided_batched_ex(BlasContext::getInstance()::get_handle(),
201
+ transa_op,
202
+ transb_op,
203
+ m,
204
+ n,
205
+ k,
206
+ (const void*)alpha,
207
+ A,
208
+ abc_type,
209
+ lda,
210
+ stride_A,
211
+ B,
212
+ abc_type,
213
+ ldb,
214
+ stride_B,
215
+ (const void*)beta,
216
+ C,
217
+ abc_type,
218
+ ldc,
219
+ stride_C,
220
+ C,
221
+ abc_type,
222
+ ldc,
223
+ stride_C,
224
+ batch,
225
+ rocblas_datatype_f32_r,
226
+ rocblas_gemm_algo_standard,
227
+ 0,
228
+ 0);
229
+ #else
230
+ cublasOperation_t transa_op = get_trans_op(transa);
231
+ cublasOperation_t transb_op = get_trans_op(transb);
232
+
233
+ cublasDataType_t abc_type = get_datatype(type);
234
+
235
+ cublasStatus_t status = cublasGemmStridedBatchedEx(BlasContext::getInstance().get_handle(),
236
+ transa_op,
237
+ transb_op,
238
+ m,
239
+ n,
240
+ k,
241
+ (const void*)alpha,
242
+ A,
243
+ abc_type,
244
+ lda,
245
+ stride_A,
246
+ B,
247
+ abc_type,
248
+ ldb,
249
+ stride_B,
250
+ (const void*)beta,
251
+ C,
252
+ abc_type,
253
+ ldc,
254
+ stride_C,
255
+ batch,
256
+ CUDA_R_32F,
257
+ CUBLAS_GEMM_DEFAULT_TENSOR_OP);
258
+ #endif
259
+
260
+ #ifdef __HIP_PLATFORM_AMD__
261
+ if (status != rocblas_status_success) {
262
+ #else
263
+ if (status != CUBLAS_STATUS_SUCCESS) {
264
+ #endif
265
+ fprintf(stderr,
266
+ "!!!! kernel execution error. (batch: %d, m: %d, n: %d, k: %d, error: %d) \n",
267
+ batch,
268
+ m,
269
+ n,
270
+ k,
271
+ (int)status);
272
+ return EXIT_FAILURE;
273
+ }
274
+ return 0;
275
+ }
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/core_ops.cpp ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <c10/cuda/CUDAStream.h>
7
+ #include <torch/extension.h>
8
+
9
+ #include "bias_activation.h"
10
+ #include "blas.h"
11
+ #include "gated_activation_kernels.h"
12
+ #include "layer_norm.h"
13
+ #include "linear_kernels.h"
14
+ #include "rms_norm.h"
15
+
16
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
17
+ {
18
+ // bias_activation.h
19
+ m.def("bias_activation", &bias_activation, "DeepSpeed bias activation in CUDA");
20
+
21
+ // layer_norm.h
22
+ m.def("layer_norm", &ds_layer_norm, "DeepSpeed layer norm in CUDA");
23
+ m.def("pre_layer_norm", &ds_pre_layer_norm, "DeepSpeed pre layer norm in CUDA");
24
+ m.def("post_layer_norm", &ds_post_layer_norm, "DeepSpeed pre layer norm in CUDA");
25
+
26
+ // blas.h
27
+ m.def("blas_linear", &blas_linear, "Linear implemented by vendor BLAS");
28
+ m.def("blas_4d_matmul", &blas_4d_matmul, "4D matmul implemented by vendor BLAS");
29
+ m.def("create_handle", &create_handle, "Create a handle for vendor BLAS");
30
+
31
+ // gated_activation_kernels.h
32
+ m.def("gated_activation", &ds_gated_activation, "DeepSpeed gated activation in CUDA");
33
+
34
+ // rms_norm.h
35
+ m.def("rms_norm", &rms_norm, "DeepSpeed rms norm in CUDA");
36
+ m.def("rms_pre_norm", &rms_pre_norm, "DeepSpeed rms pre norm in CUDA");
37
+
38
+ // linear_kernels.h
39
+ m.def("cuda_wf6af16_linear", &cuda_wf6af16_linear, "DeepSpeed Wf6Af16 linear in CUDA");
40
+ m.def(
41
+ "preprocess_weight", &preprocess_weight, "preprocess the FP16 weight to be 2bit and 4 bit");
42
+ }
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .cuda_ln import *
7
+ from .cuda_post_ln import *
8
+ from .cuda_pre_ln import *
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (294 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_fp_ln_base.cpython-310.pyc ADDED
Binary file (1.56 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_ln.cpython-310.pyc ADDED
Binary file (1.15 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_post_ln.cpython-310.pyc ADDED
Binary file (1.29 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/__pycache__/cuda_pre_ln.cpython-310.pyc ADDED
Binary file (1.53 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_fp_ln_base.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from ... import DSKernelBase
9
+ from ....inference_utils import elem_size
10
+ from deepspeed.ops.op_builder import InferenceCoreBuilder
11
+
12
+
13
+ class CUDAFPLNBase(DSKernelBase):
14
+ """
15
+ Base class for CUDA LN kernels. They all same the same validation logic,
16
+ so we can share it here.
17
+ """
18
+
19
+ supported_dtypes = [torch.float16, torch.bfloat16, torch.float32]
20
+
21
+ def __init__(self, channels: int, fp_dtype: torch.dtype, epsilon: float = 1e-5):
22
+ """
23
+ Parameters:
24
+ channels (int): Number of channels in the input tensor. Must be divisible to align
25
+ to 16 bytes.
26
+ fp_dtype (torch.dtype): Data type for the input/output/gamma. Supported values
27
+ are torch.float16, torch.bfloat16, and torch.float32.
28
+ """
29
+ if fp_dtype not in CUDAFPLNBase.supported_dtypes:
30
+ raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format(
31
+ fp_dtype, CUDAFPLNBase.supported_dtypes))
32
+
33
+ if elem_size(fp_dtype) * channels % 16 != 0:
34
+ raise ValueError("channels must be divisible by 16 bytes")
35
+
36
+ self.inf_module = InferenceCoreBuilder().load()
37
+ self.epsilon = epsilon
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_ln.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from .cuda_fp_ln_base import CUDAFPLNBase
9
+
10
+
11
+ class CUDAFPLN(CUDAFPLNBase):
12
+ """
13
+ Floating point layer norm kernel for CUDA/RoCM.
14
+
15
+ Performs: z = ln(x)
16
+ """
17
+
18
+ def __call__(self, output_z: torch.Tensor, input_x: torch.Tensor, gamma: torch.Tensor,
19
+ beta: torch.Tensor) -> torch.Tensor:
20
+ """
21
+ output_z may alias input_x directly. All Tensors should have the same shape.
22
+
23
+ Parameters:
24
+ output_z (torch.Tensor): Output tensor.
25
+ input_x (torch.Tensor): Input tensor.
26
+ gamma (torch.Tensor): Gamma tensor.
27
+ beta (torch.Tensor): Beta tensor.
28
+ """
29
+ self.inf_module.layer_norm(output_z, input_x, gamma, beta, self.epsilon)
30
+ return output_z
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_post_ln.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from .cuda_fp_ln_base import CUDAFPLNBase
9
+
10
+
11
+ class CUDAFPPostLN(CUDAFPLNBase):
12
+ """
13
+ Floating point post-LayerNorm kernel for CUDA/RoCM.
14
+
15
+ Performs: z = ln(x + y)
16
+ """
17
+
18
+ def __call__(self, output_z: torch.Tensor, input_x: torch.Tensor, input_y: torch.Tensor, gamma: torch.Tensor,
19
+ beta: torch.Tensor) -> torch.Tensor:
20
+ """
21
+ Either input_x or input_y can alias output_z.
22
+
23
+ Parameters:
24
+ output_z (torch.Tensor): Output tensor.
25
+ input_x (torch.Tensor): Input tensor.
26
+ input_y (torch.Tensor): Input tensor.
27
+ gamma (torch.Tensor): Gamma tensor.
28
+ beta (torch.Tensor): Beta tensor.
29
+
30
+ Returns:
31
+ output (torch.Tensor): Output tensor.
32
+ """
33
+ self.inf_module.post_layer_norm(output_z, input_x, input_y, gamma, beta, self.epsilon)
34
+ return output_z
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/cuda_pre_ln.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Tuple
7
+
8
+ import torch
9
+
10
+ from .cuda_fp_ln_base import CUDAFPLNBase
11
+
12
+
13
+ class CUDAFPPreLN(CUDAFPLNBase):
14
+ """
15
+ Floating point pre-LayerNorm kernel for CUDA/RoCM.
16
+
17
+ Performs: z_res = x_res + y_hid
18
+ z_hid = ln(z_hid)
19
+ """
20
+
21
+ def __call__(self, z_res: torch.Tensor, z_hid: torch.Tensor, x_res: torch.Tensor, y_hid: torch.Tensor,
22
+ gamma: torch.Tensor, beta: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
23
+ """
24
+ z_res can alias x_res. All non-parameter input/output tensors
25
+ must have the same shape. z_hid can alias y_hid.
26
+
27
+ Parameters:
28
+ z_res (torch.Tensor): Output residual.
29
+ z_hid (torch.Tensor): Output hidden states.
30
+ x_res (torch.Tensor): Input residual.
31
+ y_hid (torch.Tensor): Input hidden states.
32
+ gamma (torch.Tensor): Gamma tensor.
33
+ beta (torch.Tensor): Beta tensor.
34
+
35
+ Returns:
36
+ output (torch.Tensor): Output tensor.
37
+ """
38
+ self.inf_module.pre_layer_norm(z_res, z_hid, x_res, y_hid, gamma, beta, self.epsilon)
39
+ return z_res, z_hid
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.cpp ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "layer_norm.h"
7
+
8
+ #define DISPATCH_LAYER_NORM(T_TYPE, C_TYPE) \
9
+ if (input.options().dtype() == torch::T_TYPE) { \
10
+ launch_fused_ln((C_TYPE*)output.data_ptr(), \
11
+ (const C_TYPE*)input.data_ptr(), \
12
+ (const C_TYPE*)gamma.data_ptr(), \
13
+ (const C_TYPE*)beta.data_ptr(), \
14
+ epsilon, \
15
+ rows, \
16
+ elems_per_row, \
17
+ at::cuda::getCurrentCUDAStream()); \
18
+ }
19
+
20
+ void ds_layer_norm(at::Tensor& output,
21
+ at::Tensor& input,
22
+ at::Tensor& gamma,
23
+ at::Tensor& beta,
24
+ float epsilon)
25
+ {
26
+ bool ragged_input = input.dim() == 2;
27
+
28
+ const int rows = ragged_input ? input.size(0) : input.size(0) * input.size(1);
29
+ const int elems_per_row = ragged_input ? input.size(1) : input.size(2);
30
+
31
+ DISPATCH_LAYER_NORM(kFloat, float);
32
+ DISPATCH_LAYER_NORM(kHalf, __half);
33
+ #ifdef BF16_AVAILABLE
34
+ DISPATCH_LAYER_NORM(kBFloat16, __nv_bfloat16);
35
+ #endif
36
+ }
37
+
38
+ #define DISPATCH_LAYER_NORM_RESIDUAL(T_TYPE, C_TYPE) \
39
+ if (input.options().dtype() == torch::T_TYPE) { \
40
+ launch_fused_post_ln((C_TYPE*)output.data_ptr(), \
41
+ (const C_TYPE*)input.data_ptr(), \
42
+ (const C_TYPE*)residual.data_ptr(), \
43
+ (const C_TYPE*)gamma.data_ptr(), \
44
+ (const C_TYPE*)beta.data_ptr(), \
45
+ epsilon, \
46
+ rows, \
47
+ elems_per_row, \
48
+ at::cuda::getCurrentCUDAStream()); \
49
+ }
50
+
51
+ void ds_post_layer_norm(at::Tensor& output,
52
+ at::Tensor& input,
53
+ at::Tensor& residual,
54
+ at::Tensor& gamma,
55
+ at::Tensor& beta,
56
+ float epsilon)
57
+ {
58
+ bool ragged_input = input.dim() == 2;
59
+
60
+ const int rows = ragged_input ? input.size(0) : input.size(0) * input.size(1);
61
+ const int elems_per_row = ragged_input ? input.size(1) : input.size(2);
62
+
63
+ DISPATCH_LAYER_NORM_RESIDUAL(kFloat, float);
64
+ DISPATCH_LAYER_NORM_RESIDUAL(kHalf, __half);
65
+ #ifdef BF16_AVAILABLE
66
+ DISPATCH_LAYER_NORM_RESIDUAL(kBFloat16, __nv_bfloat16);
67
+ #endif
68
+ }
69
+
70
+ #define DISPATCH_PRE_LAYER_NORM_RESIDUAL(T_TYPE, C_TYPE) \
71
+ if (input.options().dtype() == torch::T_TYPE) { \
72
+ launch_fused_pre_ln((C_TYPE*)norm_output.data_ptr(), \
73
+ (C_TYPE*)res_output.data_ptr(), \
74
+ (const C_TYPE*)input.data_ptr(), \
75
+ (const C_TYPE*)residual.data_ptr(), \
76
+ (const C_TYPE*)gamma.data_ptr(), \
77
+ (const C_TYPE*)beta.data_ptr(), \
78
+ epsilon, \
79
+ rows, \
80
+ elems_per_row, \
81
+ at::cuda::getCurrentCUDAStream()); \
82
+ }
83
+
84
+ void ds_pre_layer_norm(at::Tensor& res_output,
85
+ at::Tensor& norm_output,
86
+ at::Tensor& input,
87
+ at::Tensor& residual,
88
+ at::Tensor& gamma,
89
+ at::Tensor& beta,
90
+ float epsilon)
91
+ {
92
+ bool ragged_input = input.dim() == 2;
93
+
94
+ const int rows = ragged_input ? input.size(0) : input.size(0) * input.size(1);
95
+ const int elems_per_row = ragged_input ? input.size(1) : input.size(2);
96
+
97
+ DISPATCH_PRE_LAYER_NORM_RESIDUAL(kFloat, float);
98
+ DISPATCH_PRE_LAYER_NORM_RESIDUAL(kHalf, __half);
99
+ #ifdef BF16_AVAILABLE
100
+ DISPATCH_PRE_LAYER_NORM_RESIDUAL(kBFloat16, __nv_bfloat16);
101
+ #endif
102
+ }
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm.h ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <c10/cuda/CUDAStream.h>
9
+ #include <torch/extension.h>
10
+ #include "ds_kernel_utils.h"
11
+
12
+ /*
13
+ Kernel launch methods for layer norm variants.
14
+ */
15
+
16
+ template <typename T>
17
+ void launch_fused_ln(T* output,
18
+ const T* vals,
19
+ const T* gamma,
20
+ const T* beta,
21
+ float epsilon,
22
+ int rows,
23
+ int elems_per_row,
24
+ cudaStream_t stream);
25
+
26
+ template <typename T>
27
+ void launch_fused_post_ln(T* output,
28
+ const T* vals,
29
+ const T* residual,
30
+ const T* gamma,
31
+ const T* beta,
32
+ float epsilon,
33
+ int rows,
34
+ int elems_per_row,
35
+ cudaStream_t stream);
36
+ template <typename T>
37
+ void launch_fused_pre_ln(T* norm_output,
38
+ T* res_output,
39
+ const T* vals,
40
+ const T* residual,
41
+ const T* gamma,
42
+ const T* beta,
43
+ float epsilon,
44
+ int rows,
45
+ int elems_per_row,
46
+ cudaStream_t stream);
47
+
48
+ void ds_layer_norm(at::Tensor& output,
49
+ at::Tensor& input,
50
+ at::Tensor& gamma,
51
+ at::Tensor& beta,
52
+ float epsilon);
53
+
54
+ void ds_post_layer_norm(at::Tensor& output,
55
+ at::Tensor& input,
56
+ at::Tensor& residual,
57
+ at::Tensor& gamma,
58
+ at::Tensor& beta,
59
+ float epsilon);
60
+
61
+ void ds_pre_layer_norm(at::Tensor& res_output,
62
+ at::Tensor& norm_output,
63
+ at::Tensor& input,
64
+ at::Tensor& residual,
65
+ at::Tensor& gamma,
66
+ at::Tensor& beta,
67
+ float epsilon);
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_layer_norm/layer_norm_cuda.cu ADDED
@@ -0,0 +1,490 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "conversion_utils.h"
7
+ #include "ds_kernel_utils.h"
8
+ #include "memory_access_utils.h"
9
+ #include "reduction_utils.h"
10
+
11
+ namespace cg = cooperative_groups;
12
+ using rop = reduce::ROpType;
13
+
14
+ namespace ln {
15
+ constexpr int granularity = 16;
16
+ } // namespace ln
17
+
18
+ /*
19
+ Regular layer norm implementation. Assumes elems_per_row % 8
20
+ is equal to 0.
21
+
22
+ Args:
23
+ output: buffer for output data
24
+ vals: buffer for input data
25
+ gamma: gain for normalization
26
+ beta: bias for normalization
27
+ epsilon: numeric stability
28
+ elems_per_row: number of elements each block will normalize
29
+ */
30
+ template <typename T, int unRoll, int threadsPerGroup, int maxThreads>
31
+ __global__ void fused_ln(T* output,
32
+ const T* vals,
33
+ const T* gamma,
34
+ const T* beta,
35
+ float epsilon,
36
+ int elems_per_row)
37
+ {
38
+ constexpr int T_per_load = ln::granularity / sizeof(T);
39
+
40
+ cg::thread_block tb = cg::this_thread_block();
41
+ cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
42
+
43
+ // X-dimension of the block
44
+ const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) +
45
+ (tb.thread_index().y * elems_per_row);
46
+ const int thread_offset = tb.thread_index().x * T_per_load;
47
+ const int base_offset = block_offset + thread_offset;
48
+ const int stride = blockDim.x * T_per_load;
49
+
50
+ float sum = reduce::init<rop::Add, float>();
51
+
52
+ const T* input_base = vals + base_offset;
53
+
54
+ T local_buffer[unRoll * T_per_load];
55
+
56
+ #pragma unRoll
57
+ for (int i = 0; i < unRoll; i++) {
58
+ T* iteration_buffer = local_buffer + i * T_per_load;
59
+
60
+ mem_access::load_global<ln::granularity>(
61
+ iteration_buffer, input_base + i * stride, thread_offset + i * stride < elems_per_row);
62
+
63
+ #pragma unRoll
64
+ for (int j = 0; j < T_per_load; j++) {
65
+ float vals_up_cast = conversion::to<float>(iteration_buffer[j]);
66
+ sum = reduce::element<rop::Add>(sum, vals_up_cast);
67
+ }
68
+ }
69
+
70
+ reduce::partitioned_block<rop::Add, threadsPerGroup>(tb, warp, sum);
71
+ const float mean = sum / elems_per_row;
72
+
73
+ float mean_diff = reduce::init<rop::Add, float>();
74
+
75
+ #pragma unRoll
76
+ for (int i = 0; i < unRoll; i++) {
77
+ #pragma unRoll
78
+ for (int j = 0; j < T_per_load; j++) {
79
+ // Using a 0 value here skews the variance, have to if-guard
80
+ if (thread_offset + i * stride < elems_per_row) {
81
+ float diff = (conversion::to<float>(local_buffer[i * T_per_load + j]) - mean);
82
+ mean_diff = reduce::element<rop::Add>(mean_diff, diff * diff);
83
+ }
84
+ }
85
+ }
86
+
87
+ reduce::partitioned_block<rop::Add, threadsPerGroup>(tb, warp, mean_diff);
88
+ const float variance = mean_diff / elems_per_row;
89
+ const float denom = __frsqrt_rn(variance + epsilon);
90
+
91
+ T* block_output = output + block_offset;
92
+
93
+ #pragma unRoll
94
+ for (int i = 0; i < unRoll; i++) {
95
+ T* iteration_buffer = local_buffer + i * T_per_load;
96
+ const int iter_idx = i * stride + thread_offset;
97
+ const bool do_loads = iter_idx < elems_per_row;
98
+
99
+ T gamma_local[T_per_load], beta_local[T_per_load];
100
+
101
+ mem_access::load_global<ln::granularity>(gamma_local, gamma + iter_idx, do_loads);
102
+ mem_access::load_global<ln::granularity>(beta_local, beta + iter_idx, do_loads);
103
+
104
+ #pragma unRoll
105
+ for (int j = 0; j < T_per_load; j++) {
106
+ float val = conversion::to<float>(iteration_buffer[j]);
107
+ val = (val - mean) * denom;
108
+ val =
109
+ val * conversion::to<float>(gamma_local[j]) + conversion::to<float>(beta_local[j]);
110
+ iteration_buffer[j] = conversion::to<T>(val);
111
+ }
112
+
113
+ if (do_loads) {
114
+ mem_access::store_global<ln::granularity>(block_output + iter_idx, iteration_buffer);
115
+ }
116
+ }
117
+ }
118
+
119
+ #define LAUNCH_FUSED_LN(unRollFactor, threadsPerGroup, maxThreads) \
120
+ fused_ln<T, unRollFactor, threadsPerGroup, maxThreads> \
121
+ <<<grid, block, 0, stream>>>(output, vals, gamma, beta, epsilon, elems_per_row);
122
+
123
+ template <typename T>
124
+ void launch_fused_ln(T* output,
125
+ const T* vals,
126
+ const T* gamma,
127
+ const T* beta,
128
+ float epsilon,
129
+ int rows,
130
+ int elems_per_row,
131
+ cudaStream_t stream)
132
+ {
133
+ // 8 for __half, 4 for float
134
+ constexpr int T_per_load = ln::granularity / sizeof(T);
135
+
136
+ constexpr int maxThreads = 256;
137
+
138
+ // For Flaoat, unRoll 4, for __half, unRoll 2
139
+ constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2;
140
+
141
+ const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false;
142
+ const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll;
143
+
144
+ // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of
145
+ // warp-sized blocks rather than stepping up to 64/96 threads
146
+ const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step);
147
+ const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads;
148
+
149
+ const int groups_per_block_max =
150
+ is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1;
151
+ const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max;
152
+ const int groups_launch = (groups_per_block + rows - 1) / groups_per_block;
153
+
154
+ dim3 block(threadsPerGroup, groups_per_block);
155
+ dim3 grid(groups_launch);
156
+
157
+ const int elems_per_step = threadsPerGroup * h_per_step;
158
+ const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step;
159
+
160
+ if (is_subblock_schedule) {
161
+ // <=128
162
+ if (threadsPerGroup == 1) {
163
+ LAUNCH_FUSED_LN(1, 1, maxThreads);
164
+ } else if (threadsPerGroup == 2) {
165
+ LAUNCH_FUSED_LN(1, 2, maxThreads);
166
+ } else if (threadsPerGroup == 4) {
167
+ LAUNCH_FUSED_LN(1, 4, maxThreads);
168
+ } else if (threadsPerGroup == 8) {
169
+ LAUNCH_FUSED_LN(1, 8, maxThreads);
170
+ } else if (threadsPerGroup == 16) {
171
+ LAUNCH_FUSED_LN(1, 16, maxThreads);
172
+ }
173
+ } else if (external_unRoll == 1) {
174
+ // 129 - 4096 elems
175
+ // (this can launch with 1-7 warps as well)
176
+ LAUNCH_FUSED_LN(1 * internal_unRoll, maxThreads, maxThreads);
177
+ } else if (external_unRoll == 2) {
178
+ // 4097 - 8192 elems
179
+ LAUNCH_FUSED_LN(2 * internal_unRoll, maxThreads, maxThreads);
180
+ } else if (external_unRoll == 3) {
181
+ // 8193 - 12288 elems
182
+ LAUNCH_FUSED_LN(3 * internal_unRoll, maxThreads, maxThreads);
183
+ } else if (external_unRoll == 4) {
184
+ // 12289 - 16384 elems
185
+ LAUNCH_FUSED_LN(4 * internal_unRoll, maxThreads, maxThreads);
186
+ }
187
+ }
188
+
189
+ #define INSTANTIATE_FUSED_LN(T) \
190
+ template void launch_fused_ln(T*, const T*, const T*, const T*, float, int, int, cudaStream_t);
191
+
192
+ INSTANTIATE_FUSED_LN(__half);
193
+ #ifdef BF16_AVAILABLE
194
+ INSTANTIATE_FUSED_LN(__nv_bfloat16);
195
+ #endif
196
+ INSTANTIATE_FUSED_LN(float);
197
+
198
+ /*
199
+ Fused resiual + bias + layer norm implementation. Assumes elems_per_row % 8
200
+ is equal to 0.
201
+
202
+ TODO(cmikeh2): Goal is to deprecate this implementation. The bias + residual
203
+ need to be fused into compute-bound producer operations.
204
+
205
+ Args:
206
+ output: buffer for output data
207
+ res_output: output of residual addition
208
+ vals: buffer for input data
209
+ residual: residual data
210
+ bias: bias of of input data
211
+ gamma: gain for normalization
212
+ beta: bias for normalization
213
+ epsilon: numeric stability
214
+ elems_per_row: number of elements each block will normalize
215
+ Template arg:
216
+ StoreResidual: controls whether the residual calculation is stored
217
+ or not. When set to false, the input `res_output` is unused.
218
+ */
219
+ template <typename T, int unRoll, int threadsPerGroup, int maxThreads, bool preLnResidual>
220
+ __global__ void fused_residual_ln(T* output,
221
+ T* res_output,
222
+ const T* vals,
223
+ const T* residual,
224
+ const T* gamma,
225
+ const T* beta,
226
+ float epsilon,
227
+ int elems_per_row)
228
+ {
229
+ constexpr int T_per_load = ln::granularity / sizeof(T);
230
+
231
+ cg::thread_block tb = cg::this_thread_block();
232
+ cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
233
+
234
+ // X-dimension of the block
235
+ const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) +
236
+ (tb.thread_index().y * elems_per_row);
237
+ const int thread_offset = tb.thread_index().x * T_per_load;
238
+ const int base_offset = block_offset + thread_offset;
239
+ const int stride = tb.size() * T_per_load;
240
+
241
+ float sum = reduce::init<rop::Add, float>();
242
+
243
+ const T* input_base = vals + base_offset;
244
+ const T* residual_base = residual + base_offset;
245
+
246
+ T local_buffer[unRoll * T_per_load];
247
+
248
+ // Unlike a vanilla layernorm, since we're fusing the two adds as well
249
+ // an inner unRoll seems to be less valuable. If anything, a double unRoll
250
+ // makes the most sense if we find we are having performance issues.
251
+ #pragma unRoll
252
+ for (int i = 0; i < unRoll; i++) {
253
+ T* iteration_buffer = local_buffer + i * T_per_load;
254
+ T residual_buffer[T_per_load];
255
+ T bias_buffer[T_per_load];
256
+
257
+ mem_access::load_global<ln::granularity>(
258
+ iteration_buffer, input_base + i * stride, thread_offset + i * stride < elems_per_row);
259
+ mem_access::load_global<ln::granularity>(residual_buffer,
260
+ residual_base + i * stride,
261
+ thread_offset + i * stride < elems_per_row);
262
+
263
+ #pragma unRoll
264
+ for (int j = 0; j < T_per_load; j++) {
265
+ float vals_up_cast = conversion::to<float>(iteration_buffer[j]);
266
+ float res_up_cast = conversion::to<float>(residual_buffer[j]);
267
+ vals_up_cast += res_up_cast;
268
+ sum = reduce::element<rop::Add>(sum, vals_up_cast);
269
+ iteration_buffer[j] = conversion::to<T>(vals_up_cast);
270
+ }
271
+
272
+ if (preLnResidual && (thread_offset + i * stride < elems_per_row)) {
273
+ mem_access::store_global<ln::granularity>(res_output + base_offset + i * stride,
274
+ iteration_buffer);
275
+ }
276
+ }
277
+
278
+ reduce::partitioned_block<rop::Add, threadsPerGroup>(tb, warp, sum);
279
+ const float mean = sum / elems_per_row;
280
+
281
+ float mean_diff = reduce::init<rop::Add, float>();
282
+ #pragma unRoll
283
+ for (int i = 0; i < unRoll; i++) {
284
+ #pragma unRoll
285
+ for (int j = 0; j < T_per_load; j++) {
286
+ // Using a 0 value here skews the variance, have to if-guard
287
+ if (thread_offset + i * stride < elems_per_row) {
288
+ float diff = (conversion::to<float>(local_buffer[i * T_per_load + j]) - mean);
289
+ mean_diff = reduce::element<rop::Add>(mean_diff, diff * diff);
290
+ }
291
+ }
292
+ }
293
+
294
+ reduce::partitioned_block<rop::Add, threadsPerGroup>(tb, warp, mean_diff);
295
+ const float variance = mean_diff / elems_per_row;
296
+ const float denom = __frsqrt_rn(variance + epsilon);
297
+
298
+ T* block_output = output + block_offset;
299
+
300
+ #pragma unRoll
301
+ for (int i = 0; i < unRoll; i++) {
302
+ T* iteration_buffer = local_buffer + i * T_per_load;
303
+ const int iter_idx = i * stride + thread_offset;
304
+ const bool do_loads = iter_idx < elems_per_row;
305
+
306
+ T gamma_local[T_per_load], beta_local[T_per_load];
307
+
308
+ mem_access::load_global<ln::granularity>(gamma_local, gamma + iter_idx, do_loads);
309
+ mem_access::load_global<ln::granularity>(beta_local, beta + iter_idx, do_loads);
310
+
311
+ #pragma unRoll
312
+ for (int j = 0; j < T_per_load; j++) {
313
+ float val = conversion::to<float>(iteration_buffer[j]);
314
+ val = (val - mean) * denom;
315
+ val =
316
+ val * conversion::to<float>(gamma_local[j]) + conversion::to<float>(beta_local[j]);
317
+ iteration_buffer[j] = conversion::to<T>(val);
318
+ }
319
+
320
+ if (do_loads) {
321
+ mem_access::store_global<ln::granularity>(block_output + iter_idx, iteration_buffer);
322
+ }
323
+ }
324
+ }
325
+
326
+ // TODO(cmikeh2): There's a bunch of redundancy here that needs to be removed/simplified.
327
+ #define LAUNCH_FUSED_RES_LN(unRollFactor, threadsPerGroup, maxThreads) \
328
+ fused_residual_ln<T, unRollFactor, threadsPerGroup, maxThreads, false> \
329
+ <<<grid, block, 0, stream>>>( \
330
+ output, nullptr, vals, residual, gamma, beta, epsilon, elems_per_row);
331
+
332
+ template <typename T>
333
+ void launch_fused_post_ln(T* output,
334
+ const T* vals,
335
+ const T* residual,
336
+ const T* gamma,
337
+ const T* beta,
338
+ float epsilon,
339
+ int rows,
340
+ int elems_per_row,
341
+ cudaStream_t stream)
342
+ {
343
+ // 8 for __half, 4 for float
344
+ constexpr int T_per_load = ln::granularity / sizeof(T);
345
+
346
+ constexpr int maxThreads = 256;
347
+
348
+ // For Flaoat, unRoll 4, for __half, unRoll 2
349
+ constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2;
350
+
351
+ const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false;
352
+ const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll;
353
+
354
+ // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of
355
+ // warp-sized blocks rather than stepping up to 64/96 threads
356
+ const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step);
357
+ const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads;
358
+
359
+ const int groups_per_block_max =
360
+ is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1;
361
+ const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max;
362
+ const int groups_launch = (groups_per_block + rows - 1) / groups_per_block;
363
+
364
+ dim3 block(threadsPerGroup, groups_per_block);
365
+ dim3 grid(groups_launch);
366
+
367
+ const int elems_per_step = threadsPerGroup * h_per_step;
368
+ const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step;
369
+
370
+ if (is_subblock_schedule) {
371
+ // <=128
372
+ if (threadsPerGroup == 1) {
373
+ LAUNCH_FUSED_RES_LN(1, 1, maxThreads);
374
+ } else if (threadsPerGroup == 2) {
375
+ LAUNCH_FUSED_RES_LN(1, 2, maxThreads);
376
+ } else if (threadsPerGroup == 4) {
377
+ LAUNCH_FUSED_RES_LN(1, 4, maxThreads);
378
+ } else if (threadsPerGroup == 8) {
379
+ LAUNCH_FUSED_RES_LN(1, 8, maxThreads);
380
+ } else if (threadsPerGroup == 16) {
381
+ LAUNCH_FUSED_RES_LN(1, 16, maxThreads);
382
+ }
383
+ } else if (external_unRoll == 1) {
384
+ // 129 - 4096 elems
385
+ // (this can launch with 1-7 warps as well)
386
+ LAUNCH_FUSED_RES_LN(1 * internal_unRoll, maxThreads, maxThreads);
387
+ } else if (external_unRoll == 2) {
388
+ // 4097 - 8192 elems
389
+ LAUNCH_FUSED_RES_LN(2 * internal_unRoll, maxThreads, maxThreads);
390
+ } else if (external_unRoll == 3) {
391
+ // 8193 - 12288 elems
392
+ LAUNCH_FUSED_RES_LN(3 * internal_unRoll, maxThreads, maxThreads);
393
+ } else if (external_unRoll == 4) {
394
+ // 12289 - 16384 elems
395
+ LAUNCH_FUSED_RES_LN(4 * internal_unRoll, maxThreads, maxThreads);
396
+ }
397
+ }
398
+
399
+ #define LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(unRollFactor, threadsPerGroup, maxThreads) \
400
+ fused_residual_ln<T, unRollFactor, threadsPerGroup, maxThreads, true> \
401
+ <<<grid, block, 0, stream>>>( \
402
+ norm_output, res_output, vals, residual, gamma, beta, epsilon, elems_per_row);
403
+
404
+ template <typename T>
405
+ void launch_fused_pre_ln(T* norm_output,
406
+ T* res_output,
407
+ const T* vals,
408
+ const T* residual,
409
+ const T* gamma,
410
+ const T* beta,
411
+ float epsilon,
412
+ int rows,
413
+ int elems_per_row,
414
+ cudaStream_t stream)
415
+ {
416
+ // 8 for __half, 4 for float
417
+ constexpr int T_per_load = ln::granularity / sizeof(T);
418
+
419
+ constexpr int maxThreads = 256;
420
+
421
+ // For Flaoat, unRoll 4, for __half, unRoll 2
422
+ constexpr int internal_unRoll = sizeof(T) == 4 ? 4 : 2;
423
+
424
+ const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false;
425
+ const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internal_unRoll;
426
+
427
+ // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of
428
+ // warp-sized blocks rather than stepping up to 64/96 threads
429
+ const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step);
430
+ const int threadsPerGroup = (one_step_threads < maxThreads) ? one_step_threads : maxThreads;
431
+
432
+ const int groups_per_block_max =
433
+ is_subblock_schedule ? (maxThreads + threadsPerGroup - 1) / threadsPerGroup : 1;
434
+ const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max;
435
+ const int groups_launch = (groups_per_block + rows - 1) / groups_per_block;
436
+
437
+ dim3 block(threadsPerGroup, groups_per_block);
438
+ dim3 grid(groups_launch);
439
+
440
+ const int elems_per_step = threadsPerGroup * h_per_step;
441
+ const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step;
442
+
443
+ if (is_subblock_schedule) {
444
+ // <=128
445
+ if (threadsPerGroup == 1) {
446
+ LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 1, maxThreads);
447
+ } else if (threadsPerGroup == 2) {
448
+ LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 2, maxThreads);
449
+ } else if (threadsPerGroup == 4) {
450
+ LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 4, maxThreads);
451
+ } else if (threadsPerGroup == 8) {
452
+ LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 8, maxThreads);
453
+ } else if (threadsPerGroup == 16) {
454
+ LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1, 16, maxThreads);
455
+ }
456
+ } else if (external_unRoll == 1) {
457
+ // 129 - 4096 elems
458
+ // (this can launch with 1-7 warps as well)
459
+ LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(1 * internal_unRoll, maxThreads, maxThreads);
460
+ } else if (external_unRoll == 2) {
461
+ // 4097 - 8192 elems
462
+ LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(2 * internal_unRoll, maxThreads, maxThreads);
463
+ } else if (external_unRoll == 3) {
464
+ // 8193 - 12288 elems
465
+ LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(3 * internal_unRoll, maxThreads, maxThreads);
466
+ } else if (external_unRoll == 4) {
467
+ // 12289 - 16384 elems
468
+ LAUNCH_FUSED_RES_LN_STORE_PRE_LN_RES(4 * internal_unRoll, maxThreads, maxThreads);
469
+ }
470
+ }
471
+
472
+ #define INSTANTIATE_RES_LN(T) \
473
+ template void launch_fused_post_ln<T>( \
474
+ T*, const T*, const T*, const T*, const T*, float, int, int, cudaStream_t);
475
+
476
+ #define INSTANTIATE_PRE_LN_RES(T) \
477
+ template void launch_fused_pre_ln<T>( \
478
+ T*, T*, const T*, const T*, const T*, const T*, float, int, int, cudaStream_t);
479
+
480
+ INSTANTIATE_RES_LN(__half);
481
+ INSTANTIATE_RES_LN(float);
482
+ #ifdef BF16_AVAILABLE
483
+ INSTANTIATE_RES_LN(__nv_bfloat16);
484
+ #endif
485
+
486
+ INSTANTIATE_PRE_LN_RES(__half);
487
+ INSTANTIATE_PRE_LN_RES(float);
488
+ #ifdef BF16_AVAILABLE
489
+ INSTANTIATE_PRE_LN_RES(__nv_bfloat16);
490
+ #endif
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .cuda_linear import *
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (247 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/__pycache__/cuda_linear.cpython-310.pyc ADDED
Binary file (3.76 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/cuda_linear.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from ....inference_utils import DtypeEnum
9
+ from ....logging import inference_logger
10
+ from deepspeed.ops.op_builder import InferenceCoreBuilder
11
+ from ... import DSKernelBase
12
+
13
+
14
+ class CUDAWf6Af16Linear(DSKernelBase):
15
+ """
16
+ Wrapper around the CUDA kernel of Wf6Af16 quantized linear.
17
+
18
+ Performs z = x @ y
19
+ """
20
+ supported_dtypes = [DtypeEnum.fp16]
21
+
22
+ def __init__(self):
23
+ self.inf_module = InferenceCoreBuilder().load()
24
+ self.inf_module.create_handle()
25
+ self.kernel = self.inf_module.cuda_wf6af16_linear
26
+ # The split_k_map is profiled on A100-80G GPU for some common shapes.
27
+ # It is an array of dictionaries, where the array index is the tokens chunk id.
28
+ # The dictionary is the mapping from the output channel to the split-K size.
29
+ self.split_k_map = [
30
+ { # tokens: [1, 64]
31
+ 3072: 18,
32
+ 4096: 13,
33
+ 5120: 10,
34
+ 6144: 9,
35
+ 8192: 6,
36
+ 10240: 5,
37
+ 14336: 7,
38
+ 28672: 7,
39
+ 57344: 7
40
+ },
41
+ { # tokens: [65:128]
42
+ 3072: 9,
43
+ 4096: 6,
44
+ 5120: 5,
45
+ 6144: 9,
46
+ 8192: 3,
47
+ 10240: 5,
48
+ 14336: 7,
49
+ 28672: 7,
50
+ 57344: 6
51
+ },
52
+ { # tokens: [129:192]
53
+ 3072: 6,
54
+ 4096: 4,
55
+ 5120: 7,
56
+ 6144: 3,
57
+ 8192: 2,
58
+ 10240: 5,
59
+ 14336: 5,
60
+ 28672: 5,
61
+ 57344: 4
62
+ },
63
+ { # tokens: [193:256]
64
+ 3072: 9,
65
+ 4096: 3,
66
+ 5120: 5,
67
+ 6144: 2,
68
+ 8192: 5,
69
+ 10240: 4,
70
+ 14336: 8,
71
+ 28672: 6,
72
+ 57344: 4
73
+ },
74
+ { # tokens: [257:320]
75
+ 3072: 7,
76
+ 4096: 5,
77
+ 5120: 2,
78
+ 6144: 5,
79
+ 8192: 4,
80
+ 10240: 1,
81
+ 14336: 3,
82
+ 28672: 3,
83
+ 57344: 4
84
+ },
85
+ { # tokens: [321:384]
86
+ 3072: 3,
87
+ 4096: 2,
88
+ 5120: 5,
89
+ 6144: 3,
90
+ 8192: 1,
91
+ 10240: 8,
92
+ 14336: 3,
93
+ 28672: 4,
94
+ 57344: 3
95
+ },
96
+ { # tokens: [385:448]
97
+ 3072: 5,
98
+ 4096: 7,
99
+ 5120: 3,
100
+ 6144: 5,
101
+ 8192: 7,
102
+ 10240: 3,
103
+ 14336: 1,
104
+ 28672: 1,
105
+ 57344: 3
106
+ },
107
+ { # tokens: [449:512]
108
+ 3072: 2,
109
+ 4096: 5,
110
+ 5120: 4,
111
+ 6144: 1,
112
+ 8192: 5,
113
+ 10240: 2,
114
+ 14336: 6,
115
+ 28672: 4,
116
+ 57344: 1
117
+ },
118
+ { # tokens: [513:576]
119
+ 3072: 2,
120
+ 4096: 3,
121
+ 5120: 1,
122
+ 6144: 1,
123
+ 8192: 3,
124
+ 10240: 3,
125
+ 14336: 3,
126
+ 28672: 1,
127
+ 57344: 1
128
+ },
129
+ { # tokens: [577:640]
130
+ 3072: 5,
131
+ 4096: 4,
132
+ 5120: 1,
133
+ 6144: 4,
134
+ 8192: 2,
135
+ 10240: 1,
136
+ 14336: 1,
137
+ 28672: 1,
138
+ 57344: 1
139
+ },
140
+ { # tokens: [641:704]
141
+ 3072: 3,
142
+ 4096: 1,
143
+ 5120: 2,
144
+ 6144: 2,
145
+ 8192: 1,
146
+ 10240: 2,
147
+ 14336: 1,
148
+ 28672: 1,
149
+ 57344: 1
150
+ },
151
+ { # tokens: [705:768]
152
+ 3072: 3,
153
+ 4096: 1,
154
+ 5120: 3,
155
+ 6144: 2,
156
+ 8192: 1,
157
+ 10240: 1,
158
+ 14336: 1,
159
+ 28672: 1,
160
+ 57344: 1
161
+ }
162
+ ]
163
+
164
+ def __call__(self, output: torch.Tensor, hidden_states: torch.Tensor, weights_2bit: torch.Tensor,
165
+ weights_4bit: torch.Tensor, scale: torch.Tensor, out_channels, tokens, in_channels) -> torch.Tensor:
166
+ """
167
+ Matmul kernel of FP6 weight-only quantized linear. All inputs should be contiguous.
168
+ It does not support batched-matmul.
169
+
170
+ Parameters:
171
+ output (torch.Tensor): Output tensor. Shape is of [token_number, out_features]
172
+ hidden_states (torch.Tensor): Input tensor. Shape is of [token_number, in_features]
173
+ weights_2bit (torch.Tensor): Input tensor of the 2-bit slice. Shape is of [out_features*2/8, in_features]
174
+ weights_4bit (torch.Tensor): Input tensor of the 4-bit slice. Shape is of [out_features*4/8, in_features]
175
+ scale (torch.Tensor): Input tensor. Shape is of [out_features], since the scale is per output channel
176
+ out_channels (int): The number of output channels
177
+ tokens (int): The number of tokens
178
+ in_channels (int): The number of input channels
179
+ """
180
+
181
+ if out_channels % 256 != 0 or in_channels % 64 != 0:
182
+ raise ValueError("The out and in channel should be multiple of 256 and 64 respectively.")
183
+
184
+ # TODO: add a more general heuristic to determine the split-K.
185
+ split_k = -1 # not initialized
186
+ if tokens <= 768:
187
+ # Try to find the split-K from the pre-profiled map.
188
+ tokens_chunk_id = (tokens - 1) // 64
189
+ split_k = self.split_k_map[tokens_chunk_id].get(out_channels, -1)
190
+ if split_k == -1:
191
+ split_k = 1
192
+ inference_logger().warning(
193
+ f"The split-K setting may be suboptimal for shape {tokens}x{in_channels}x{out_channels}...")
194
+
195
+ workspace = self.get_workspace(out_channels, tokens, in_channels, split_k, torch.float, hidden_states.device)
196
+ self.kernel(output, hidden_states, weights_2bit, weights_4bit, scale, workspace, out_channels, tokens,
197
+ in_channels, split_k)
198
+
199
+ def get_workspace(self, out_channels: int, tokens: int, in_channels: int, split_k: int, dtype,
200
+ device) -> torch.Tensor:
201
+ """
202
+ Allocate workspace for the kernel. The workspace is used to store the intermediate results of the matmul before
203
+ split-K. The split-K size is determined by the size of the matmul.
204
+ """
205
+ workspace = torch.empty((split_k, out_channels, tokens), dtype=dtype, device=device)
206
+
207
+ return workspace
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/kernel_reduction.cuh ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ // This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112
7
+
8
+ #ifndef DEEPSPEED_CUDA_LINEAR_KERNEL_REDUCTION_CUH
9
+ #define DEEPSPEED_CUDA_LINEAR_KERNEL_REDUCTION_CUH
10
+
11
+ #include <cuda.h>
12
+ #include <cuda_fp16.h>
13
+ #include <cuda_runtime.h>
14
+
15
+ #define REDUCTION_ELEMENT_PER_THREADBLOCK 256
16
+ #define HALF_PER_128BIT 8
17
+
18
+ __global__ void SplitK_Reduction(half* C,
19
+ float* Reduction_Workspace,
20
+ size_t M_Global,
21
+ size_t N_Global,
22
+ int Split_K)
23
+ {
24
+ half* WARP_GPTR_C = C + REDUCTION_ELEMENT_PER_THREADBLOCK * blockIdx.x;
25
+ float* WARP_GPTR_R = Reduction_Workspace + REDUCTION_ELEMENT_PER_THREADBLOCK * blockIdx.x;
26
+ half* THREAD_GPTR_C = WARP_GPTR_C + threadIdx.x * HALF_PER_128BIT;
27
+ float* THREAD_GPTR_R = WARP_GPTR_R + threadIdx.x * HALF_PER_128BIT;
28
+ // Initializing Thread-Local Results
29
+ float Results[HALF_PER_128BIT];
30
+ #pragma unroll
31
+ for (int i = 0; i < HALF_PER_128BIT; i++) Results[i] = 0.0f;
32
+ // Reduction
33
+ for (int i = 0; i < Split_K; i++) {
34
+ #pragma unroll
35
+ for (int j = 0; j < HALF_PER_128BIT; j++) Results[j] += THREAD_GPTR_R[j];
36
+ THREAD_GPTR_R += M_Global * N_Global;
37
+ }
38
+ // Writing to global memory
39
+ #pragma unroll
40
+ for (int i = 0; i < HALF_PER_128BIT; i++) THREAD_GPTR_C[i] = __float2half_rn(Results[i]);
41
+ }
42
+
43
+ #endif
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/ptx_cp.async.cuh ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ // This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112
7
+
8
+ #ifndef DEEPSPEED_CUDA_LINEAR_PTX_CP_ASYNC_CUH
9
+ #define DEEPSPEED_CUDA_LINEAR_PTX_CP_ASYNC_CUH
10
+
11
+ #include <cuda.h>
12
+ #include <cuda_fp16.h>
13
+ #include <cuda_runtime.h>
14
+
15
+ template <int SizeInBytes>
16
+ __device__ __forceinline__ void cp_async(half* smem_ptr,
17
+ const half* global_ptr,
18
+ bool pred_guard = true)
19
+ {
20
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
21
+ static_assert(SizeInBytes == 16, "Size is not supported");
22
+ unsigned smem_int_ptr = __cvta_generic_to_shared(smem_ptr);
23
+ asm volatile(
24
+ "{ \n"
25
+ " .reg .pred p;\n"
26
+ " setp.ne.b32 p, %0, 0;\n"
27
+ " @p cp.async.cg.shared.global [%1], [%2], %3;\n"
28
+ "}\n" ::"r"((int)pred_guard),
29
+ "r"(smem_int_ptr),
30
+ "l"(global_ptr),
31
+ "n"(SizeInBytes));
32
+ #else
33
+ #warning "The async copy functions are only supported on Ampere and newer architectures"
34
+ #endif
35
+ }
36
+
37
+ /// Establishes an ordering w.r.t previously issued cp.async instructions. Does not block.
38
+ __device__ __forceinline__ void cp_async_group_commit()
39
+ {
40
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
41
+ asm volatile("cp.async.commit_group;\n" ::);
42
+ #else
43
+ #warning "The async copy functions are only supported on Ampere and newer architectures"
44
+ #endif
45
+ }
46
+
47
+ /// Blocks until all but <N> previous cp.async.commit_group operations have committed.
48
+ template <int N>
49
+ __device__ __forceinline__ void cp_async_wait_group()
50
+ {
51
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
52
+ asm volatile("cp.async.wait_group %0;\n" ::"n"(N));
53
+ #else
54
+ #warning "The async copy functions are only supported on Ampere and newer architectures"
55
+ #endif
56
+ }
57
+
58
+ /// Blocks until all previous cp.async.commit_group operations have committed.
59
+ // cp.async.wait_all is equivalent to :
60
+ // cp.async.commit_group;
61
+ // cp.async.wait_group 0;
62
+ __device__ __forceinline__ void cp_async_wait_all()
63
+ {
64
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
65
+ asm volatile("cp.async.wait_all;\n" ::);
66
+ #else
67
+ #warning "The async copy functions are only supported on Ampere and newer architectures"
68
+ #endif
69
+ }
70
+
71
+ #endif
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels.cpp ADDED
@@ -0,0 +1,224 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <ATen/cuda/CUDAContext.h>
7
+
8
+ #include "linear_kernels.h"
9
+
10
+ namespace {
11
+
12
+ // For bit-level debugging.
13
+ template <typename T>
14
+ void print_bits(T num)
15
+ {
16
+ char bits[sizeof(T) * 8 + 1] = {'\0'};
17
+ for (int bit = 0; bit < (sizeof(T) * 8); bit++) {
18
+ bits[sizeof(T) * 8 - 1 - bit] = '0' + (num & 0x01);
19
+ num = num >> 1;
20
+ }
21
+ printf("%s\n", bits);
22
+ }
23
+
24
+ void print_bits(half num)
25
+ {
26
+ char bits[sizeof(half) * 8 + 1] = {'\0'};
27
+ auto int_num = *reinterpret_cast<uint16_t*>(&num);
28
+ for (int bit = 0; bit < (sizeof(half) * 8); bit++) {
29
+ bits[sizeof(half) * 8 - 1 - bit] = '0' + (int_num & 0x01);
30
+ int_num = int_num >> 1;
31
+ }
32
+ printf("%s\n", bits);
33
+ }
34
+
35
+ /*
36
+ * Function to pack 4 fake quantized FP16 value into continuously stored 4 FP6 values.
37
+ */
38
+ void cast_fp16_fp6(uint16_t* FP16x4, uint8_t* FP6x4)
39
+ {
40
+ // Constants for FP6
41
+ constexpr int exponent_nbits_fp6 = 3;
42
+ constexpr int mantissa_nbits_fp6 = 2;
43
+ constexpr int exp_bias_fp6 = (1 << (exponent_nbits_fp6 - 1)) - 1;
44
+ // Constants for FP16
45
+ constexpr int exponent_nbits_fp16 = 5;
46
+ constexpr int mantissa_nbits_fp16 = 10;
47
+ constexpr int exp_bias_fp16 = (1 << (exponent_nbits_fp16 - 1)) - 1;
48
+
49
+ int fp6_temp[4];
50
+
51
+ float absmin_nonzero_fp6 = 0.0625;
52
+ // Note that we regard the exponent of '111' as a regular value rather than NaN or inf. This is
53
+ // the same with that in qtorch.
54
+ float absmax_fp6 = 28;
55
+
56
+ for (int i = 0; i < 4; ++i) {
57
+ uint16_t source = FP16x4[i];
58
+ float fp6_value_abs = std::abs(__half2float(*((half*)(&source))));
59
+ if ((fp6_value_abs != 0 && fp6_value_abs < absmin_nonzero_fp6) ||
60
+ fp6_value_abs > absmax_fp6) {
61
+ // TODO(zhen): a better way may be rounding it to the nearest FP6 value.
62
+ throw std::invalid_argument("Input value out of range for FP6.");
63
+ }
64
+
65
+ // It is not safe to do shift operation on uint16_t. So we promote it to int.
66
+ int source_promote = int(source);
67
+
68
+ int sign_bit = (source_promote >> 15);
69
+ // Extracting exponent represented in FP16. The sign mask 0x7FFF is '0111 1111 1111 1111'
70
+ int exp_bit = (source_promote & 0x7FFF) >> mantissa_nbits_fp16;
71
+ // Extracting mantissa represented in FP16
72
+ int mant_bit = source_promote & ((1 << mantissa_nbits_fp16) - 1);
73
+
74
+ int new_exp_bit;
75
+ int new_mant_bit;
76
+
77
+ if (exp_bit == 0) {
78
+ // Subnormal FP16 number. Too small for FP6.
79
+ new_exp_bit = 0;
80
+ new_mant_bit = 0;
81
+ } else {
82
+ new_mant_bit = mant_bit >> (mantissa_nbits_fp16 - mantissa_nbits_fp6);
83
+ new_exp_bit = exp_bit - exp_bias_fp16 + exp_bias_fp6;
84
+
85
+ // Deal with subnormal FP6 values.
86
+ int target_exp_val = exp_bit - exp_bias_fp16;
87
+ int min_fp6_exp_val = -exp_bias_fp6 + 1;
88
+ bool subnormal_fp6 = target_exp_val < min_fp6_exp_val;
89
+ if (subnormal_fp6) {
90
+ // TODO(zhen): add the rounding logic.
91
+ new_exp_bit = 0;
92
+ // The implicit 1 in the mantissa of FP16 is not present in subnormal FP6. Thus we
93
+ // need to add it
94
+ new_mant_bit = (new_mant_bit | (1 << mantissa_nbits_fp6)) >>
95
+ (min_fp6_exp_val - target_exp_val);
96
+ }
97
+ }
98
+
99
+ fp6_temp[i] = (sign_bit << (exponent_nbits_fp6 + mantissa_nbits_fp6)) |
100
+ (new_exp_bit << mantissa_nbits_fp6) | new_mant_bit;
101
+ }
102
+ // Pack the values
103
+ FP6x4[0] = fp6_temp[0] << 2 | (fp6_temp[1] >> 4);
104
+ FP6x4[1] = (fp6_temp[1] & 0x0F) << 4 | (fp6_temp[2] >> 2);
105
+ FP6x4[2] = (fp6_temp[2] & 0x03) << 6 | fp6_temp[3];
106
+ }
107
+
108
+ /*
109
+ * Function to prepack FP16 weights into continuous FP6 values.
110
+ *
111
+ * Parameters:
112
+ * weight_16bit: input weight in FP16, size M*K
113
+ * weight_6bit: output weight in packed FP6, continuously stored, size M*K*6/8
114
+ * M, K: the shape of the weight
115
+ */
116
+ void weight_prepacking_fp16_to_fp6(uint16_t* weight_16bit,
117
+ uint8_t* weight_6bit_packed,
118
+ size_t M,
119
+ size_t K)
120
+ {
121
+ // Every four 16-bit elements are packed into three 6-bit values (4*6bit == 3*8bit).
122
+ if (K * 6 % 8 != 0) { throw std::invalid_argument("(K * 6 % 8) should be 0"); }
123
+ size_t K_fp6_packed = K * 6 / 8;
124
+ // #pragma omp parallel for
125
+ for (auto m = 0; m < M; m++) {
126
+ uint8_t* ptr_6bit = weight_6bit_packed + m * K_fp6_packed;
127
+ uint16_t* ptr_16bit = weight_16bit + m * K;
128
+ for (auto k = 0; k < K; k += 4) {
129
+ cast_fp16_fp6(ptr_16bit, ptr_6bit);
130
+ ptr_16bit += 4;
131
+ ptr_6bit += 3;
132
+ }
133
+ }
134
+ }
135
+
136
+ } // namespace
137
+
138
+ /*
139
+ * Function to execute the FP6 linear kernel.
140
+ *
141
+ * Parameters:
142
+ * output: output tensor, size M*N
143
+ * hidden_states: input activation tensor, size N*K
144
+ * weights_2bit: packed 2bit weights, size M*K*2/8
145
+ * weights_4bit: packed 4bit weights, size M*K*4/8
146
+ * scales: scale tensor, size M
147
+ * workspace: workspace tensor, size M*N*split_k
148
+ * M: the output channel number of the weight
149
+ * N: the token number of the activation
150
+ * K: the input channel number of the weight
151
+ * split_k: the split size of the GEMM calculation
152
+ */
153
+ void cuda_wf6af16_linear(torch::Tensor& output,
154
+ torch::Tensor& hidden_states,
155
+ torch::Tensor& weights_2bit,
156
+ torch::Tensor& weights_4bit,
157
+ torch::Tensor& scales,
158
+ torch::Tensor& workspace,
159
+ int M,
160
+ int N,
161
+ int K,
162
+ int split_k)
163
+ {
164
+ TORCH_CHECK(weights_2bit.device().type() == torch::kCUDA, "weight_2bit must be on CUDA");
165
+ TORCH_CHECK(weights_4bit.device().type() == torch::kCUDA, "weight_4bit must be on CUDA");
166
+ TORCH_CHECK(hidden_states.device().type() == torch::kCUDA, "X must be on CUDA");
167
+ TORCH_CHECK(scales.device().type() == torch::kCUDA, "scales must be on CUDA");
168
+
169
+ auto status = fp6_linear_kernel(at::cuda::getCurrentCUDAStream(),
170
+ (uint4*)(weights_2bit.data_ptr<uint8_t>()),
171
+ (uint4*)(weights_4bit.data_ptr<uint8_t>()),
172
+ (half*)(scales.data_ptr<at::Half>()),
173
+ (half*)(hidden_states.data_ptr<at::Half>()),
174
+ (half*)(output.data_ptr<at::Half>()),
175
+ M,
176
+ N,
177
+ K,
178
+ workspace.data_ptr<float>(),
179
+ split_k);
180
+ if (status != cudaSuccess) {
181
+ AT_ERROR("fp6_linear_kernel failed with error: ", cudaGetErrorString(status));
182
+ }
183
+ }
184
+
185
+ /*
186
+ * Function to prepack the fake 6-bit-quantized FP16 weights into 2bit and 4bit.
187
+ *
188
+ * Parameters:
189
+ * weight: input weight in FP16 (containing the quantized FP6-ranged value), size M*K
190
+ * Returns:
191
+ * weight_2bit: output weight in 2bit, size M*K*2/8
192
+ * weight_4bit: output weight in 4bit, size M*K*4/8
193
+ */
194
+ std::vector<torch::Tensor> preprocess_weight(torch::Tensor& weight)
195
+ {
196
+ TORCH_CHECK(weight.dim() == 2, "weight must be 2-dimensional");
197
+ TORCH_CHECK(weight.scalar_type() == torch::kFloat16, "weight must be FP16");
198
+ TORCH_CHECK(weight.is_contiguous(), "weight must be contiguous");
199
+ TORCH_CHECK(weight.device().type() == torch::kCPU, "weight must be on CPU");
200
+ auto M = weight.size(0);
201
+ auto K = weight.size(1);
202
+ TORCH_CHECK(K % 4 == 0, "K must be multiple of 4");
203
+
204
+ // Pack weight from FP16 to FP6.
205
+ uint16_t* weight_16bit_ptr = reinterpret_cast<uint16_t*>(weight.data_ptr<at::Half>());
206
+ std::vector<uint8_t> weight_6bit_packed(M * K * 6 / 8);
207
+ uint8_t* weight_6bit_ptr = weight_6bit_packed.data();
208
+ weight_prepacking_fp16_to_fp6(weight_16bit_ptr, weight_6bit_ptr, M, K);
209
+
210
+ // Split weight into 2bit and 4bit.
211
+ weight_matrix_prepacking(reinterpret_cast<int*>(weight_6bit_ptr), M, K);
212
+ uint8_t* weight_2bit_ptr = weight_6bit_ptr;
213
+
214
+ // Make sure that the new split tensor does not share the underlying memory with the original
215
+ // one. Otherwise it will incur some problems when the original tensor is deleted. It also
216
+ // makes the memory flattern risky.
217
+ auto weight_2bit =
218
+ torch::from_blob(weight_2bit_ptr, {M * K * 2 / 8}, torch::kUInt8).clone().detach();
219
+ uint8_t* weight_4bit_ptr = weight_2bit_ptr + M * K * 2 / 8;
220
+ auto weight_4bit =
221
+ torch::from_blob(weight_4bit_ptr, {M * K * 4 / 8}, torch::kUInt8).clone().detach();
222
+
223
+ return {weight_2bit, weight_4bit};
224
+ }
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels.h ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #ifndef DEEPSPEED_CUDA_LINEAR_KERNELS_H
7
+ #define DEEPSPEED_CUDA_LINEAR_KERNELS_H
8
+
9
+ #include <c10/cuda/CUDAStream.h>
10
+ #include <torch/extension.h>
11
+ #include "ds_kernel_utils.h"
12
+
13
+ #include "linear_kernels_cuda.h"
14
+
15
+ void cuda_wf6af16_linear(torch::Tensor& output,
16
+ torch::Tensor& hidden_states,
17
+ torch::Tensor& weights_2bit,
18
+ torch::Tensor& weights_4bit,
19
+ torch::Tensor& scale,
20
+ torch::Tensor& workspace,
21
+ int M,
22
+ int N,
23
+ int K,
24
+ int split_k);
25
+
26
+ std::vector<torch::Tensor> preprocess_weight(torch::Tensor& Weight);
27
+
28
+ #endif
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels_cuda.cu ADDED
@@ -0,0 +1,318 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ // This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112
7
+
8
+ // clang-format off
9
+ // Put the torch headers at the front to avoid conflict with other headers on
10
+ // `at::nullopt` and `at::optional`.
11
+ #include <torch/extension.h>
12
+ #include <ATen/ATen.h>
13
+ // clang-format on
14
+
15
+ #include "include/kernel_matmul.cuh"
16
+ #include "include/kernel_reduction.cuh"
17
+ #include "include/weight_prepacking.h"
18
+
19
+ #include <assert.h>
20
+ #include <stdio.h>
21
+
22
+ #include "linear_kernels_cuda.h"
23
+
24
+ template <typename TilingConfig, typename OutputDataType>
25
+ static void Kernel_Ex(cudaStream_t stream,
26
+ const uint4* Weight1,
27
+ const uint4* Weight2,
28
+ const half* Scales,
29
+ const half* B,
30
+ OutputDataType* C,
31
+ const size_t M_Global,
32
+ const size_t N_Global,
33
+ const size_t K_Global,
34
+ int Split_K)
35
+ {
36
+ #ifdef DEBUG_MODE
37
+ printf("\n");
38
+ printf("Launcher.cu->Kernel_Ex():\n");
39
+ printf("M: %d, N: %d, K: %d, SplitK: %d\n", M_Global, N_Global, K_Global, Split_K);
40
+ printf("TILE_M: %d, TILE_K: %d, TILE_N: %d\n",
41
+ TilingConfig::TILE_M,
42
+ TilingConfig::TILE_K,
43
+ TilingConfig::TILE_N);
44
+ #endif
45
+ static size_t SHMEM_SZ =
46
+ max(TilingConfig::SMEM_SIZE_B_TILE + SMEM_SIZE_A1_TILE + SMEM_SIZE_A2_TILE,
47
+ TilingConfig::SMEM_SIZE_C_TILE);
48
+ cudaFuncSetAttribute(QUANT_GEMM_Kernel<TilingConfig, OutputDataType>,
49
+ cudaFuncAttributeMaxDynamicSharedMemorySize,
50
+ SHMEM_SZ);
51
+ size_t dimN = (N_Global - 1) / TilingConfig::TILE_N + 1;
52
+ size_t dimM = M_Global * Split_K / TilingConfig::TILE_M;
53
+ dim3 GridDim(dimN, dimM, 1);
54
+ dim3 BlockDim(WARP_SIZE * TilingConfig::BLOCK_WARPS, 1, 1);
55
+
56
+ #ifdef DEBUG_MODE
57
+ printf(
58
+ "GridDim.x: %d, GridDim.y: %d, GridDim.z: %d, BlockDim.x: %d, BlockDim.y: %d, BlockDim.z: "
59
+ "%d SHMEM_SZ: %d\n",
60
+ GridDim.x,
61
+ GridDim.y,
62
+ GridDim.z,
63
+ BlockDim.x,
64
+ BlockDim.y,
65
+ BlockDim.z,
66
+ SHMEM_SZ);
67
+ printf("\n");
68
+ #endif
69
+
70
+ QUANT_GEMM_Kernel<TilingConfig, OutputDataType><<<GridDim, BlockDim, SHMEM_SZ, stream>>>(
71
+ Weight1, Weight2, Scales, B, C, M_Global, N_Global, K_Global, Split_K);
72
+ }
73
+
74
+ /*
75
+ *
76
+ */
77
+ cudaError_t fp6_linear_kernel(cudaStream_t stream,
78
+ const uint4* Weight1,
79
+ const uint4* Weight2,
80
+ const half* Scales,
81
+ const half* B,
82
+ half* C,
83
+ const size_t M_Global,
84
+ const size_t N_Global,
85
+ const size_t K_Global,
86
+ float* Reduction_Workspace, // Reduction_Workspace_Size = Split_K *
87
+ // M_Global * N_Global * sizeof(fp32)
88
+ int Split_K)
89
+ {
90
+ assert(M_Global % 256 == 0);
91
+ assert(K_Global % 64 == 0);
92
+ assert(N_Global > 0);
93
+
94
+ // Work around to support more N shapes:
95
+ size_t N_PowerOf2;
96
+ if (N_Global > 0 && N_Global <= 8) N_PowerOf2 = 8;
97
+ if (N_Global > 8 && N_Global <= 16) N_PowerOf2 = 16;
98
+ if (N_Global > 16 && N_Global <= 32) N_PowerOf2 = 32;
99
+ if (N_Global > 32 && N_Global <= 64) N_PowerOf2 = 64;
100
+ if (N_Global > 64 && N_Global <= 128) N_PowerOf2 = 128;
101
+ if (N_Global > 128) N_PowerOf2 = ((N_Global - 1) / 128 + 1) * 128;
102
+
103
+ if (Split_K == 1) {
104
+ switch (N_PowerOf2) {
105
+ case 8:
106
+ Kernel_Ex<TilingConfig<4, 1, 1>, half>(
107
+ stream, Weight1, Weight2, Scales, B, C, M_Global, N_Global, K_Global, Split_K);
108
+ break;
109
+ case 16:
110
+ Kernel_Ex<TilingConfig<4, 1, 2>, half>(
111
+ stream, Weight1, Weight2, Scales, B, C, M_Global, N_Global, K_Global, Split_K);
112
+ break;
113
+ case 32:
114
+ Kernel_Ex<TilingConfig<4, 1, 4>, half>(
115
+ stream, Weight1, Weight2, Scales, B, C, M_Global, N_Global, K_Global, Split_K);
116
+ break;
117
+ case 64:
118
+ Kernel_Ex<TilingConfig<4, 1, 8>, half>(
119
+ stream, Weight1, Weight2, Scales, B, C, M_Global, N_Global, K_Global, Split_K);
120
+ break;
121
+ case 128:
122
+ Kernel_Ex<TilingConfig<4, 1, 8>, half>(
123
+ stream, Weight1, Weight2, Scales, B, C, M_Global, N_Global, K_Global, Split_K);
124
+ break;
125
+ default:
126
+ if (N_PowerOf2 % 128 != 0) {
127
+ printf("QuantLLM_API Error: Unsupported N dimension %lu!\n", N_PowerOf2);
128
+ return cudaErrorUnknown;
129
+ }
130
+ Kernel_Ex<TilingConfig<4, 1, 8>, half>(
131
+ stream, Weight1, Weight2, Scales, B, C, M_Global, N_Global, K_Global, Split_K);
132
+ break;
133
+ }
134
+ } else {
135
+ switch (N_PowerOf2) {
136
+ case 8:
137
+ Kernel_Ex<TilingConfig<4, 1, 1>, float>(stream,
138
+ Weight1,
139
+ Weight2,
140
+ Scales,
141
+ B,
142
+ Reduction_Workspace,
143
+ M_Global,
144
+ N_Global,
145
+ K_Global,
146
+ Split_K);
147
+ break;
148
+ case 16:
149
+ Kernel_Ex<TilingConfig<4, 1, 2>, float>(stream,
150
+ Weight1,
151
+ Weight2,
152
+ Scales,
153
+ B,
154
+ Reduction_Workspace,
155
+ M_Global,
156
+ N_Global,
157
+ K_Global,
158
+ Split_K);
159
+ break;
160
+ case 32:
161
+ Kernel_Ex<TilingConfig<4, 1, 4>, float>(stream,
162
+ Weight1,
163
+ Weight2,
164
+ Scales,
165
+ B,
166
+ Reduction_Workspace,
167
+ M_Global,
168
+ N_Global,
169
+ K_Global,
170
+ Split_K);
171
+ break;
172
+ case 64:
173
+ Kernel_Ex<TilingConfig<4, 1, 8>, float>(stream,
174
+ Weight1,
175
+ Weight2,
176
+ Scales,
177
+ B,
178
+ Reduction_Workspace,
179
+ M_Global,
180
+ N_Global,
181
+ K_Global,
182
+ Split_K);
183
+ break;
184
+ case 128:
185
+ Kernel_Ex<TilingConfig<4, 1, 8>, float>(stream,
186
+ Weight1,
187
+ Weight2,
188
+ Scales,
189
+ B,
190
+ Reduction_Workspace,
191
+ M_Global,
192
+ N_Global,
193
+ K_Global,
194
+ Split_K);
195
+ break;
196
+ default:
197
+ if (N_PowerOf2 % 128 != 0) {
198
+ printf("QuantLLM_API Error: Unsupported N dimension %lu!\n", N_PowerOf2);
199
+ return cudaErrorUnknown;
200
+ }
201
+ Kernel_Ex<TilingConfig<4, 1, 8>, float>(stream,
202
+ Weight1,
203
+ Weight2,
204
+ Scales,
205
+ B,
206
+ Reduction_Workspace,
207
+ M_Global,
208
+ N_Global,
209
+ K_Global,
210
+ Split_K);
211
+ break;
212
+ }
213
+ // Reduction for SplitK
214
+ dim3 GridDim((M_Global * N_Global) / REDUCTION_ELEMENT_PER_THREADBLOCK, 1, 1);
215
+ dim3 BlockDim(WARP_SIZE, 1, 1);
216
+ SplitK_Reduction<<<GridDim, BlockDim, 0, stream>>>(
217
+ C, Reduction_Workspace, M_Global, N_Global, Split_K);
218
+ }
219
+ return cudaGetLastError();
220
+ }
221
+
222
+ /*
223
+ Computes FP6-FP16 GEMM (PyTorch interface).
224
+
225
+ [Mathematical Formula]
226
+ Standard definition of linear layer: Out = In * trans(W), where In, Out, and W are stored in
227
+ row-major. After Equivalent transformation : trans(Out) = W * trans(In). Note that we do not
228
+ perform "transpose" during runtime, we instead interpret the In/Out as column-major matrices when
229
+ calling our CUDA kernel.
230
+
231
+ [Inputs]
232
+ _in_feats: tensor of shape [B, IC]; // half
233
+ _weights: int tensor of shape [OC, IC // 16 * 3]; // 3 INT32 words contains 16 FP6 weights.
234
+ _scales: tensor of shape [OC]; // half
235
+ splitK: splitting the MatMul problem along K dimension for higher GPU utilization, default 1.
236
+ [Outputs]
237
+ _out_feats: tensor of shape [B, OC]; // half
238
+ */
239
+ torch::Tensor fp6_linear_forward_cuda(torch::Tensor _in_feats,
240
+ torch::Tensor _weights,
241
+ torch::Tensor _scales,
242
+ int splitK)
243
+ {
244
+ int num_in_feats = _in_feats.size(0);
245
+ int num_in_channels = _in_feats.size(1);
246
+ int num_out_channels = _weights.size(0);
247
+ assert(num_in_channels % 64 == 0);
248
+ assert((num_in_channels / 16 * 3) ==
249
+ _weights.size(1)); // Making sure the K dimension is matched.
250
+ //
251
+ int M = num_out_channels;
252
+ int K = num_in_channels;
253
+ int N = num_in_feats;
254
+ // Input Tensors
255
+ auto weight1 = reinterpret_cast<const uint4*>(
256
+ _weights.data_ptr<int>()); // weights is [OC, IC] but in FP6.
257
+ auto weight2 = weight1 + num_in_channels * num_out_channels * 2 / 128;
258
+ auto in_feats = reinterpret_cast<const half*>(_in_feats.data_ptr<at::Half>());
259
+ auto scales = reinterpret_cast<const half*>(_scales.data_ptr<at::Half>());
260
+ // Output Tensors
261
+ auto options = torch::TensorOptions().dtype(_in_feats.dtype()).device(_in_feats.device());
262
+ at::Tensor _out_feats = torch::empty({num_in_feats, num_out_channels}, options);
263
+ auto out_feats = reinterpret_cast<half*>(_out_feats.data_ptr<at::Half>());
264
+
265
+ float* Reduction_Workspace = nullptr;
266
+ if (splitK != 1) {
267
+ auto options = torch::TensorOptions().dtype(torch::kFloat32).device(_in_feats.device());
268
+ at::Tensor _workspace = torch::empty({splitK, num_in_feats, num_out_channels}, options);
269
+ auto Reduction_Workspace = reinterpret_cast<float*>(
270
+ _out_feats.data_ptr<float>()); // Reduction_Workspace_Size = Split_K * M_Global *
271
+ // N_Global * sizeof(fp32)
272
+ }
273
+
274
+ fp6_linear_kernel(0, // Using default stream here.
275
+ weight1,
276
+ weight2,
277
+ scales,
278
+ in_feats,
279
+ out_feats,
280
+ M,
281
+ N,
282
+ K,
283
+ Reduction_Workspace,
284
+ splitK);
285
+
286
+ return _out_feats;
287
+ }
288
+
289
+ /*
290
+ * Inputs:
291
+ * (1) unsigned char Weight_6bit [M*K*6/8]
292
+ * Outputs:
293
+ * (1) unsigned char Weight_2bit [M*K*2/8]
294
+ * (2) unsigned char Weight_4bit [M*K*4/8]
295
+ *
296
+ * Assumption: Weight_6bit, Weight_2bit, Weight_4bit all stored continuously in row-major.
297
+ * 8 FP6 = 6 Bytes
298
+ * 8 FP4 = 4 Bytes
299
+ * 8 FP2 = 2 Bytes
300
+ */
301
+
302
+ /*
303
+ * Weight prepacking (Pytorch interface).
304
+ * [Input & Output]
305
+ * fp6_tensor: int tensor of shape [OC, IC // 16 * 3]; // 3 INT32 words contains 16 FP6 weights.
306
+ * [Output]
307
+ * packed_tensor: int tensor of shape [OC, IC // 16 * 3];
308
+ */
309
+ torch::Tensor weight_matrix_prepacking_cpu(torch::Tensor fp6_tensor, size_t OC, size_t IC)
310
+ {
311
+ assert((OC % 256 == 0) && (IC % 64 == 0));
312
+ assert((fp6_tensor.size(0) == OC) && (fp6_tensor.size(1) == IC / 16 * 3));
313
+ // auto packed_tensor = torch::empty_like(fp6_tensor);
314
+ // auto packed_tensor_ptr = reinterpret_cast<int*>(packed_tensor.data_ptr<int>());
315
+ auto fp6_tensor_ptr = reinterpret_cast<int*>(fp6_tensor.data_ptr<int>());
316
+ weight_matrix_prepacking(fp6_tensor_ptr, OC, IC);
317
+ return fp6_tensor;
318
+ }
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/linear_kernels_cuda.h ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ // This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112
7
+
8
+ #ifndef DEEPSPEED_CUDA_LINEAR_FP6_LINEAR_CUH
9
+ #define DEEPSPEED_CUDA_LINEAR_FP6_LINEAR_CUH
10
+
11
+ #include <cuda.h>
12
+ #include <cuda_fp16.h>
13
+ #include <cuda_runtime.h>
14
+
15
+ #include <torch/extension.h>
16
+
17
+ /*
18
+ * Computes FP6-FP16 GEMM (C++ interface).
19
+ */
20
+ cudaError_t fp6_linear_kernel(cudaStream_t stream,
21
+ const uint4* Weight1,
22
+ const uint4* Weight2,
23
+ const half* Scales,
24
+ const half* B,
25
+ half* C,
26
+ const size_t M_Global,
27
+ const size_t N_Global,
28
+ const size_t K_Global,
29
+ float* Reduction_Workspace, // Reduction_Workspace_Size = Split_K *
30
+ // M_Global * N_Global * sizeof(fp32)
31
+ int Split_K);
32
+
33
+ /*
34
+ * Computes FP6-FP16 GEMM (PyTorch interface).
35
+ */
36
+ torch::Tensor fp6_linear_forward_cuda(torch::Tensor _in_feats,
37
+ torch::Tensor _weights,
38
+ torch::Tensor _scales,
39
+ int splitK = 1);
40
+
41
+ /*
42
+ * In-place weight prepacking (C++ interface).
43
+ */
44
+ void weight_matrix_prepacking(int* FP6Weights, size_t M, size_t K);
45
+
46
+ /*
47
+ * Weight prepacking (Pytorch interface).
48
+ */
49
+ torch::Tensor weight_matrix_prepacking_cpu(torch::Tensor fp6_tensor, size_t M, size_t K);
50
+
51
+ #endif
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .rms_norm import CUDARMSNorm
7
+ from .rms_pre_norm import CUDARMSPreNorm
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (316 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__pycache__/rms_norm.cpython-310.pyc ADDED
Binary file (1.09 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__pycache__/rms_norm_base.cpython-310.pyc ADDED
Binary file (1.56 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/__pycache__/rms_pre_norm.cpython-310.pyc ADDED
Binary file (1.51 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm.cpp ADDED
@@ -0,0 +1,123 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "rms_norm.h"
7
+
8
+ #ifdef BF16_AVAILABLE
9
+ #define DISPATCH_FOR_FLOAT(DTYPE, ...) \
10
+ [&] { \
11
+ if (DTYPE == torch::kFloat32) { \
12
+ using scalar_t = float; \
13
+ return __VA_ARGS__(); \
14
+ } else if (DTYPE == torch::kFloat16) { \
15
+ using scalar_t = __half; \
16
+ return __VA_ARGS__(); \
17
+ } else if (DTYPE == torch::kBFloat16) { \
18
+ using scalar_t = __nv_bfloat16; \
19
+ return __VA_ARGS__(); \
20
+ } else { \
21
+ TORCH_CHECK(false, "Unsupported dtype for BiasActivation"); \
22
+ } \
23
+ }()
24
+ #else
25
+ #define DISPATCH_FOR_FLOAT(DTYPE, ...) \
26
+ [&] { \
27
+ if (DTYPE == torch::kFloat32) { \
28
+ using scalar_t = float; \
29
+ return __VA_ARGS__(); \
30
+ } else if (DTYPE == torch::kFloat16) { \
31
+ using scalar_t = __half; \
32
+ return __VA_ARGS__(); \
33
+ } else { \
34
+ TORCH_CHECK(false, "Unsupported dtype for BiasActivation"); \
35
+ } \
36
+ }()
37
+ #endif
38
+
39
+ void rms_norm(torch::Tensor& norm_output,
40
+ torch::Tensor& norm_input,
41
+ torch::Tensor& gamma,
42
+ float epsilon)
43
+ {
44
+ TORCH_CHECK(norm_output.scalar_type() == norm_input.scalar_type(),
45
+ "norm_output and norm_input should have the same data type");
46
+ TORCH_CHECK(norm_output.scalar_type() == gamma.scalar_type(),
47
+ "norm_output and gamma should have the same data type");
48
+
49
+ const int32_t rows = norm_input.size(0);
50
+ const int32_t cols = norm_input.size(1);
51
+
52
+ TORCH_CHECK(norm_output.size(0) == rows,
53
+ "norm_output and norm_input should have the same first dimension");
54
+ TORCH_CHECK(norm_output.size(1) == cols,
55
+ "norm_output and norm_input should have the same second dimension");
56
+
57
+ DISPATCH_FOR_FLOAT(norm_output.scalar_type(), [&] {
58
+ scalar_t* norm_output_ptr = reinterpret_cast<scalar_t*>(norm_output.data_ptr());
59
+ scalar_t* norm_input_ptr = reinterpret_cast<scalar_t*>(norm_input.data_ptr());
60
+ scalar_t* gamma_ptr = reinterpret_cast<scalar_t*>(gamma.data_ptr());
61
+ scalar_t* null_t = nullptr;
62
+
63
+ launch_rms_norm(norm_output_ptr,
64
+ null_t,
65
+ norm_input_ptr,
66
+ null_t,
67
+ gamma_ptr,
68
+ epsilon,
69
+ rows,
70
+ cols,
71
+ at::cuda::getCurrentCUDAStream());
72
+ });
73
+ }
74
+
75
+ void rms_pre_norm(torch::Tensor& norm_output,
76
+ torch::Tensor& residual_output,
77
+ torch::Tensor& norm_input,
78
+ torch::Tensor& residual_input,
79
+ torch::Tensor& gamma,
80
+ float epsilon)
81
+ {
82
+ TORCH_CHECK(norm_output.scalar_type() == norm_input.scalar_type(),
83
+ "norm_output and norm_input should have the same data type");
84
+ TORCH_CHECK(norm_output.scalar_type() == gamma.scalar_type(),
85
+ "norm_output and gamma should have the same data type");
86
+
87
+ const int32_t rows = norm_input.size(0);
88
+ const int32_t cols = norm_input.size(1);
89
+
90
+ TORCH_CHECK(norm_output.size(0) == rows,
91
+ "norm_output and norm_input should have the same first dimension");
92
+ TORCH_CHECK(norm_output.size(1) == cols,
93
+ "norm_output and norm_input should have the same second dimension");
94
+
95
+ TORCH_CHECK(residual_output.size(0) == rows,
96
+ "residual_output and norm_input should have the same first dimension");
97
+ TORCH_CHECK(residual_output.size(1) == cols,
98
+ "residual_output and norm_input should have the same second dimension");
99
+
100
+ TORCH_CHECK(residual_input.size(0) == rows,
101
+ "residual_input and norm_input should have the same first dimension");
102
+ TORCH_CHECK(residual_input.size(1) == cols,
103
+ "residual_input and norm_input should have the same second dimension");
104
+
105
+ DISPATCH_FOR_FLOAT(norm_output.scalar_type(), [&] {
106
+ scalar_t* norm_output_ptr = reinterpret_cast<scalar_t*>(norm_output.data_ptr());
107
+ scalar_t* residual_output_ptr = reinterpret_cast<scalar_t*>(residual_output.data_ptr());
108
+ const scalar_t* norm_input_ptr = reinterpret_cast<const scalar_t*>(norm_input.data_ptr());
109
+ const scalar_t* residual_input_ptr =
110
+ reinterpret_cast<const scalar_t*>(residual_input.data_ptr());
111
+ const scalar_t* gamma_ptr = reinterpret_cast<const scalar_t*>(gamma.data_ptr());
112
+
113
+ launch_rms_norm(norm_output_ptr,
114
+ residual_output_ptr,
115
+ norm_input_ptr,
116
+ residual_input_ptr,
117
+ gamma_ptr,
118
+ epsilon,
119
+ rows,
120
+ cols,
121
+ at::cuda::getCurrentCUDAStream());
122
+ });
123
+ }
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm.h ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <c10/cuda/CUDAStream.h>
9
+ #include <torch/extension.h>
10
+ #include "ds_kernel_utils.h"
11
+
12
+ template <typename T>
13
+ void launch_rms_norm(T* norm_output,
14
+ T* res_output,
15
+ const T* vals,
16
+ const T* residual,
17
+ const T* gamma,
18
+ float epsilon,
19
+ int rows,
20
+ int elems_per_row,
21
+ cudaStream_t stream);
22
+
23
+ void rms_norm(torch::Tensor& norm_output,
24
+ torch::Tensor& norm_input,
25
+ torch::Tensor& gamma,
26
+ float epsilon);
27
+
28
+ void rms_pre_norm(torch::Tensor& norm_output,
29
+ torch::Tensor& residual_output,
30
+ torch::Tensor& norm_input,
31
+ torch::Tensor& residual_input,
32
+ torch::Tensor& gamma,
33
+ float epsilon);
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from .rms_norm_base import CUDARMSNormBase
9
+
10
+
11
+ class CUDARMSNorm(CUDARMSNormBase):
12
+ """
13
+ Floating point layer norm kernel for CUDA/RoCM.
14
+
15
+ Performs: z = ln(x)
16
+ """
17
+
18
+ def __call__(self, output_z: torch.Tensor, input_x: torch.Tensor, gamma: torch.Tensor) -> torch.Tensor:
19
+ """
20
+ output_z may alias input_x directly. All Tensors should have the same shape.
21
+
22
+ Parameters:
23
+ output_z (torch.Tensor): Output tensor.
24
+ input_x (torch.Tensor): Input tensor.
25
+ gamma (torch.Tensor): Gamma tensor.
26
+ """
27
+ self.inf_module.rms_norm(output_z, input_x, gamma, self.epsilon)
28
+ return output_z
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm_base.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from ... import DSKernelBase
9
+ from ....inference_utils import elem_size
10
+ from deepspeed.ops.op_builder import InferenceCoreBuilder
11
+
12
+
13
+ class CUDARMSNormBase(DSKernelBase):
14
+ """
15
+ Base class for CUDA LN kernels. They all same the same validation logic,
16
+ so we can share it here.
17
+ """
18
+
19
+ supported_dtypes = [torch.float16, torch.bfloat16, torch.float32]
20
+
21
+ def __init__(self, channels: int, fp_dtype: torch.dtype, epsilon: float = 1e-5):
22
+ """
23
+ Parameters:
24
+ channels (int): Number of channels in the input tensor. Must be divisible to align
25
+ to 16 bytes.
26
+ fp_dtype (torch.dtype): Data type for the input/output/gamma. Supported values
27
+ are torch.float16, torch.bfloat16, and torch.float32.
28
+ """
29
+ if fp_dtype not in CUDARMSNormBase.supported_dtypes:
30
+ raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format(
31
+ fp_dtype, CUDARMSNormBase.supported_dtypes))
32
+
33
+ if elem_size(fp_dtype) * channels % 16 != 0:
34
+ raise ValueError("channels must be divisible by 16 bytes")
35
+
36
+ self.inf_module = InferenceCoreBuilder().load()
37
+ self.epsilon = epsilon
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_rms_norm/rms_norm_cuda.cu ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "conversion_utils.h"
7
+ #include "ds_kernel_utils.h"
8
+ #include "memory_access_utils.h"
9
+ #include "reduction_utils.h"
10
+
11
+ namespace cg = cooperative_groups;
12
+ using rop = reduce::ROpType;
13
+
14
+ namespace rms {
15
+ constexpr int granularity = 16;
16
+ } // namespace rms
17
+
18
+ template <typename T, int UNROLL, int threadsPerGroup, int maxThreads>
19
+ __global__ void rms_norm(T* output, const T* vals, const T* gamma, float epsilon, int elems_per_row)
20
+ {
21
+ constexpr int T_per_load = rms::granularity / sizeof(T);
22
+
23
+ cg::thread_block tb = cg::this_thread_block();
24
+ cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
25
+
26
+ // X-dimension of the block
27
+ const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) +
28
+ (tb.thread_index().y * elems_per_row);
29
+ const int thread_offset = tb.thread_index().x * T_per_load;
30
+ const int base_offset = block_offset + thread_offset;
31
+ const int stride = blockDim.x * T_per_load;
32
+
33
+ float var_sum = reduce::init<rop::Add, float>();
34
+
35
+ const T* input_base = vals + base_offset;
36
+
37
+ T local_buffer[UNROLL * T_per_load];
38
+
39
+ #pragma unroll
40
+ for (int i = 0; i < UNROLL; i++) {
41
+ T* iteration_buffer = local_buffer + (i * T_per_load);
42
+
43
+ mem_access::load_global<rms::granularity>(iteration_buffer,
44
+ input_base + (i * stride),
45
+ thread_offset + (i * stride) < elems_per_row);
46
+
47
+ #pragma unroll
48
+ for (int j = 0; j < T_per_load; j++) {
49
+ float up_cast = conversion::to<float>(iteration_buffer[j]);
50
+ float sq_val = up_cast * up_cast;
51
+ var_sum = reduce::element<rop::Add, float>(var_sum, sq_val);
52
+ }
53
+ }
54
+
55
+ reduce::partitioned_block<rop::Add, threadsPerGroup>(tb, warp, var_sum);
56
+ const float var = var_sum / elems_per_row;
57
+ const T denom = conversion::to<T>(__frsqrt_rn(var + epsilon));
58
+
59
+ T* block_output = output + block_offset;
60
+
61
+ #pragma unroll
62
+ for (int i = 0; i < UNROLL; i++) {
63
+ T* iteration_buffer = local_buffer + (i * T_per_load);
64
+ const int iter_idx = i * stride + thread_offset;
65
+ const bool do_loads = (iter_idx < elems_per_row);
66
+
67
+ T gamma_local[T_per_load];
68
+
69
+ mem_access::load_global<rms::granularity>(gamma_local, gamma + iter_idx, do_loads);
70
+
71
+ #pragma unroll
72
+ for (int j = 0; j < T_per_load; j++) {
73
+ iteration_buffer[j] *= denom;
74
+ iteration_buffer[j] *= gamma_local[j];
75
+ }
76
+
77
+ if (do_loads) {
78
+ mem_access::store_global<rms::granularity>(block_output + iter_idx, iteration_buffer);
79
+ }
80
+ }
81
+ }
82
+
83
+ template <typename T, int UNROLL, int threadsPerGroup, int maxThreads>
84
+ __global__ void pre_rms_norm(T* output,
85
+ T* res_out,
86
+ const T* vals,
87
+ const T* residual,
88
+ const T* gamma,
89
+ float epsilon,
90
+ int elems_per_row)
91
+ {
92
+ constexpr int T_per_load = rms::granularity / sizeof(T);
93
+
94
+ cg::thread_block tb = cg::this_thread_block();
95
+ cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
96
+
97
+ // X-dimension of the block
98
+ const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) +
99
+ (tb.thread_index().y * elems_per_row);
100
+ const int thread_offset = tb.thread_index().x * T_per_load;
101
+ const int base_offset = block_offset + thread_offset;
102
+ const int stride = blockDim.x * T_per_load;
103
+
104
+ float var_sum = reduce::init<rop::Add, float>();
105
+
106
+ const T* input_base = vals + base_offset;
107
+ const T* residual_base = residual + base_offset;
108
+ T* res_output = res_out + base_offset;
109
+
110
+ T local_buffer[UNROLL * T_per_load];
111
+
112
+ #pragma unroll
113
+ for (int i = 0; i < UNROLL; i++) {
114
+ T* iteration_buffer = local_buffer + (i * T_per_load);
115
+ T residual_buffer[T_per_load];
116
+
117
+ const int iter_offset = i * stride + thread_offset;
118
+ const bool do_loads = (iter_offset < elems_per_row);
119
+
120
+ mem_access::load_global<rms::granularity>(
121
+ iteration_buffer, input_base + (i * stride), do_loads);
122
+ mem_access::load_global<rms::granularity>(
123
+ residual_buffer, residual_base + (i * stride), do_loads);
124
+
125
+ #pragma unroll
126
+ for (int j = 0; j < T_per_load; j++) {
127
+ iteration_buffer[j] += residual_buffer[j];
128
+ float vals_up_cast = conversion::to<float>(iteration_buffer[j]);
129
+
130
+ var_sum = reduce::element<rop::Add, float>(var_sum, vals_up_cast * vals_up_cast);
131
+ }
132
+
133
+ if (do_loads) {
134
+ mem_access::store_global<rms::granularity>(res_output + i * stride, iteration_buffer);
135
+ }
136
+ }
137
+
138
+ reduce::partitioned_block<rop::Add, threadsPerGroup>(tb, warp, var_sum);
139
+ const float var = var_sum / elems_per_row;
140
+ const T denom = conversion::to<T>(__frsqrt_rn(var + epsilon));
141
+
142
+ T* block_output = output + block_offset;
143
+
144
+ #pragma unroll
145
+ for (int i = 0; i < UNROLL; i++) {
146
+ T* iteration_buffer = local_buffer + (i * T_per_load);
147
+ const int iter_idx = i * stride + thread_offset;
148
+ const bool do_loads = (iter_idx < elems_per_row);
149
+
150
+ T gamma_local[T_per_load];
151
+
152
+ mem_access::load_global<rms::granularity>(gamma_local, gamma + iter_idx, do_loads);
153
+
154
+ #pragma unroll
155
+ for (int j = 0; j < T_per_load; j++) {
156
+ iteration_buffer[j] *= denom;
157
+ iteration_buffer[j] *= gamma_local[j];
158
+ }
159
+
160
+ if (do_loads) {
161
+ mem_access::store_global<rms::granularity>(block_output + iter_idx, iteration_buffer);
162
+ }
163
+ }
164
+ }
165
+
166
+ #define LAUNCH_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \
167
+ rms_norm<T, UNROLL, threadsPerGroup, maxThreads> \
168
+ <<<grid, block, 0, stream>>>(norm_output, vals, gamma, epsilon, elems_per_row);
169
+
170
+ #define LAUNCH_PRE_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \
171
+ pre_rms_norm<T, UNROLL, threadsPerGroup, maxThreads><<<grid, block, 0, stream>>>( \
172
+ norm_output, res_output, vals, residual, gamma, epsilon, elems_per_row);
173
+
174
+ #define LAUNCH_ALL_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \
175
+ if (pre_norm) { \
176
+ LAUNCH_PRE_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \
177
+ } else { \
178
+ LAUNCH_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \
179
+ }
180
+
181
+ template <typename T>
182
+ void launch_rms_norm(T* norm_output,
183
+ T* res_output,
184
+ const T* vals,
185
+ const T* residual,
186
+ const T* gamma,
187
+ float epsilon,
188
+ int rows,
189
+ int elems_per_row,
190
+ cudaStream_t stream)
191
+ {
192
+ // 8 for __half, 4 for float
193
+ constexpr int T_per_load = rms::granularity / sizeof(T);
194
+ constexpr int maxThreads = 256;
195
+ constexpr int internalUnroll = sizeof(T) == 4 ? 4 : 2;
196
+
197
+ const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false;
198
+ const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internalUnroll;
199
+
200
+ // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of
201
+ // warp-sized blocks rather than stepping up to 64/96 threads
202
+ const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step);
203
+ const int threads_per_group = (one_step_threads < maxThreads) ? one_step_threads : maxThreads;
204
+
205
+ const int groups_per_block_max =
206
+ is_subblock_schedule ? (maxThreads + threads_per_group - 1) / threads_per_group : 1;
207
+ const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max;
208
+ const int groups_launch = (groups_per_block + rows - 1) / groups_per_block;
209
+
210
+ dim3 block(threads_per_group, groups_per_block);
211
+ dim3 grid(groups_launch);
212
+
213
+ const int elems_per_step = threads_per_group * h_per_step;
214
+ const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step;
215
+
216
+ bool pre_norm = (residual == nullptr) ? false : true;
217
+
218
+ if (is_subblock_schedule) {
219
+ // <=128
220
+ if (threads_per_group == 1) {
221
+ LAUNCH_ALL_RMS_NORM(1, 1, maxThreads);
222
+ } else if (threads_per_group == 2) {
223
+ LAUNCH_ALL_RMS_NORM(1, 2, maxThreads);
224
+ } else if (threads_per_group == 4) {
225
+ LAUNCH_ALL_RMS_NORM(1, 4, maxThreads);
226
+ } else if (threads_per_group == 8) {
227
+ LAUNCH_ALL_RMS_NORM(1, 8, maxThreads);
228
+ } else if (threads_per_group == 16) {
229
+ LAUNCH_ALL_RMS_NORM(1, 16, maxThreads);
230
+ }
231
+ } else if (external_unRoll == 1) {
232
+ // 129 - 4096 elems
233
+ // (this can launch with 1-7 warps as well)
234
+ LAUNCH_ALL_RMS_NORM(1 * internalUnroll, maxThreads, maxThreads);
235
+ } else if (external_unRoll == 2) {
236
+ // 4097 - 8192 elems
237
+ LAUNCH_ALL_RMS_NORM(2 * internalUnroll, maxThreads, maxThreads);
238
+ } else if (external_unRoll == 3) {
239
+ // 8193 - 12288 elems
240
+ LAUNCH_ALL_RMS_NORM(3 * internalUnroll, maxThreads, maxThreads);
241
+ } else if (external_unRoll == 4) {
242
+ // 12289 - 16384 elems
243
+ LAUNCH_ALL_RMS_NORM(4 * internalUnroll, maxThreads, maxThreads);
244
+ }
245
+ }
246
+
247
+ #define INSTANTIATE_LAUNCH_RMS_NORM(T) \
248
+ template void launch_rms_norm<T>(T * norm_output, \
249
+ T * res_output, \
250
+ const T* vals, \
251
+ const T* residual, \
252
+ const T* gamma, \
253
+ float epsilon, \
254
+ int rows, \
255
+ int elems_per_row, \
256
+ cudaStream_t stream);
257
+
258
+ INSTANTIATE_LAUNCH_RMS_NORM(float)
259
+ INSTANTIATE_LAUNCH_RMS_NORM(__half)
260
+ #ifdef BF16_AVAILABLE
261
+ INSTANTIATE_LAUNCH_RMS_NORM(__nv_bfloat16)
262
+ #endif