applied-ai-018 commited on
Commit
359a939
·
verified ·
1 Parent(s): 4429f2c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/common/custom_cuda_kernel.cu +44 -0
  2. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/StopWatch.h +103 -0
  3. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/Timer.h +51 -0
  4. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/activation_type.h +17 -0
  5. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/compat.h +19 -0
  6. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/context.h +180 -0
  7. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/conversion_utils.h +640 -0
  8. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/cpu_adagrad.h +210 -0
  9. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/cpu_adam.h +327 -0
  10. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/cpu_lion.h +269 -0
  11. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/cublas_wrappers.h +95 -0
  12. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/custom_cuda_layers.h +327 -0
  13. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/dequantization_utils.h +177 -0
  14. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/dropout.h +81 -0
  15. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/ds_kernel_utils.h +58 -0
  16. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/ds_transformer_cuda.h +189 -0
  17. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/feed_forward.h +110 -0
  18. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/gelu.h +41 -0
  19. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/gemm_test.h +334 -0
  20. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/general_kernels.h +56 -0
  21. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/memory_access_utils.h +1144 -0
  22. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/normalize_layer.h +207 -0
  23. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/quantization.h +108 -0
  24. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/quantization_utils.h +468 -0
  25. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/quantizer.h +19 -0
  26. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/reduction_utils.h +826 -0
  27. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/simd.h +198 -0
  28. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/softmax.h +65 -0
  29. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/strided_batch_gemm.h +200 -0
  30. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/type_shim.h +124 -0
  31. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/lamb/fused_lamb_cuda.cpp +113 -0
  32. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/lamb/fused_lamb_cuda_kernel.cu +478 -0
  33. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/quantization/dequantize.cu +72 -0
  34. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/quantization/fake_quantizer.cu +1028 -0
  35. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/quantization/pt_binding.cpp +298 -0
  36. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/quantization/quant_reduce.cu +263 -0
  37. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/quantization/quantize.cu +151 -0
  38. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/quantization/quantize_intX.cu +281 -0
  39. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/quantization/swizzled_quantize.cu +196 -0
  40. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/gather_scatter.cu +186 -0
  41. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/pt_binding.cpp +216 -0
  42. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/slice_attn_masks.cu +128 -0
  43. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/token_sort.cu +194 -0
  44. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/sparse_attention/utils.cpp +127 -0
  45. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/gelu.cu +710 -0
  46. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/pointwise_ops.cu +74 -0
  47. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/rms_norm.cu +263 -0
  48. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/transform.cu +727 -0
  49. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/utils/flatten_unflatten.cpp +29 -0
  50. venv/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adagrad/cpu_adagrad.cpp +196 -0
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/common/custom_cuda_kernel.cu ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "custom_cuda_layers.h"
7
+
8
+ __global__ void param_update_kernel(const float* input, __half* output, int size)
9
+ {
10
+ int id = blockIdx.x * blockDim.x + threadIdx.x;
11
+
12
+ if (id < size) { output[id] = (__half)input[id]; }
13
+ }
14
+
15
+ void launch_param_update(const float* input, __half* output, int size, cudaStream_t stream)
16
+ {
17
+ int threads = 1024;
18
+
19
+ dim3 grid_dim((size - 1) / threads + 1);
20
+ dim3 block_dim(threads);
21
+
22
+ param_update_kernel<<<grid_dim, block_dim, 0, stream>>>(input, output, size);
23
+ }
24
+
25
+ __global__ void param_update_kernel_half(const float* input, __half* output, int size)
26
+ {
27
+ int id = blockIdx.x * blockDim.x + threadIdx.x;
28
+ __half2* output_cast = reinterpret_cast<__half2*>(output);
29
+ if (id < size) {
30
+ float input_f = input[id];
31
+ __half2* input_h = reinterpret_cast<__half2*>(&input_f);
32
+ output_cast[id] = *input_h;
33
+ }
34
+ }
35
+
36
+ void launch_param_update_half(const float* input, __half* output, int size, cudaStream_t stream)
37
+ {
38
+ int threads = 1024;
39
+ size /= 2;
40
+ dim3 grid_dim((size - 1) / threads + 1);
41
+ dim3 block_dim(threads);
42
+
43
+ param_update_kernel_half<<<grid_dim, block_dim, 0, stream>>>(input, output, size);
44
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/StopWatch.h ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+ #ifdef _WIN32
8
+ #include <windows.h>
9
+ #else
10
+ #include <time.h>
11
+ #endif
12
+
13
+ #ifdef _WIN32
14
+
15
+ class Stopwatch {
16
+ private:
17
+ double m_total_time;
18
+ LARGE_INTEGER m_start_time;
19
+
20
+ public:
21
+ Stopwatch() { m_total_time = 0.0; }
22
+
23
+ ~Stopwatch() {}
24
+
25
+ void Reset() { m_total_time = 0.0; }
26
+
27
+ void Start() { QueryPerformanceCounter(&m_start_time); }
28
+
29
+ void Restart()
30
+ {
31
+ m_total_time = 0.0;
32
+ QueryPerformanceCounter(&m_start_time);
33
+ }
34
+
35
+ void Stop()
36
+ {
37
+ LARGE_INTEGER frequency;
38
+ LARGE_INTEGER stop_time;
39
+ QueryPerformanceFrequency(&frequency);
40
+ QueryPerformanceCounter(&stop_time);
41
+ m_total_time +=
42
+ ((double)(stop_time.QuadPart - m_start_time.QuadPart) / (double)frequency.QuadPart);
43
+ }
44
+
45
+ double GetTimeInSeconds() { return m_total_time; }
46
+ };
47
+
48
+ #else
49
+
50
+ class Stopwatch {
51
+ private:
52
+ double m_total_time;
53
+ struct timespec m_start_time;
54
+ bool m_is_started;
55
+
56
+ public:
57
+ Stopwatch()
58
+ {
59
+ m_total_time = 0.0;
60
+ m_is_started = false;
61
+ }
62
+
63
+ ~Stopwatch() {}
64
+
65
+ void Reset() { m_total_time = 0.0; }
66
+
67
+ void Start()
68
+ {
69
+ clock_gettime(CLOCK_MONOTONIC, &m_start_time);
70
+ m_is_started = true;
71
+ }
72
+
73
+ void Restart()
74
+ {
75
+ m_total_time = 0.0;
76
+ clock_gettime(CLOCK_MONOTONIC, &m_start_time);
77
+ m_is_started = true;
78
+ }
79
+
80
+ void Stop()
81
+ {
82
+ if (m_is_started) {
83
+ m_is_started = false;
84
+
85
+ struct timespec end_time;
86
+ clock_gettime(CLOCK_MONOTONIC, &end_time);
87
+
88
+ m_total_time += (double)(end_time.tv_sec - m_start_time.tv_sec) +
89
+ (double)(end_time.tv_nsec - m_start_time.tv_nsec) / 1e9;
90
+ }
91
+ }
92
+
93
+ double GetTimeInSeconds()
94
+ {
95
+ if (m_is_started) {
96
+ Stop();
97
+ Start();
98
+ }
99
+ return m_total_time;
100
+ }
101
+ };
102
+
103
+ #endif
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/Timer.h ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #ifndef __TIMER_H__
7
+ #define __TIMER_H__
8
+
9
+ #include <cuda_runtime.h>
10
+ #include <chrono>
11
+ #include "cuda.h"
12
+
13
+ class GPUTimer {
14
+ cudaEvent_t start, stop;
15
+
16
+ public:
17
+ GPUTimer()
18
+ {
19
+ cudaEventCreate(&start);
20
+ cudaEventCreate(&stop);
21
+ }
22
+ ~GPUTimer()
23
+ {
24
+ cudaEventDestroy(start);
25
+ cudaEventDestroy(stop);
26
+ }
27
+ inline void Record() { cudaEventRecord(start); }
28
+ inline void Elapsed(float& time_elapsed)
29
+ {
30
+ cudaEventRecord(stop);
31
+ cudaEventSynchronize(stop);
32
+ cudaEventElapsedTime(&time_elapsed, start, stop);
33
+ }
34
+ };
35
+
36
+ class CPUTimer {
37
+ std::chrono::high_resolution_clock::time_point start;
38
+
39
+ public:
40
+ CPUTimer() : start(std::chrono::high_resolution_clock::now()) {}
41
+ inline void Reset() { start = std::chrono::high_resolution_clock::now(); }
42
+ inline float Elapsed()
43
+ {
44
+ auto temp = start;
45
+ start = std::chrono::high_resolution_clock::now();
46
+ return (float)(std::chrono::duration_cast<std::chrono::microseconds>(start - temp).count() /
47
+ 1e3);
48
+ }
49
+ };
50
+
51
+ #endif
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/activation_type.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ enum ActivationType {
9
+ GELU = 0,
10
+ RELU = 1,
11
+ SILU = 2,
12
+ GEGLU = 3,
13
+ ReGLU = 4,
14
+ SiGLU = 5,
15
+ IDENTITY = 6,
16
+ InvalidType = -1
17
+ };
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/compat.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Copyright NVIDIA/apex
8
+ This file is adapted from fused adam in NVIDIA/apex, commit a109f85
9
+ */
10
+
11
+ #ifndef TORCH_CHECK
12
+ #define TORCH_CHECK AT_CHECK
13
+ #endif
14
+
15
+ #ifdef VERSION_GE_1_3
16
+ #define DATA_PTR data_ptr
17
+ #else
18
+ #define DATA_PTR data
19
+ #endif
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/context.h ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <ATen/cuda/CUDAContext.h>
9
+ #include <cuda_runtime_api.h>
10
+ #include <cassert>
11
+ #include <iostream>
12
+ #include <vector>
13
+ #include "cublas_v2.h"
14
+ #include "cuda.h"
15
+ #include "curand.h"
16
+ #include "gemm_test.h"
17
+
18
+ #define WARP_SIZE 32
19
+
20
+ #define CUDA_CHECK(callstr) \
21
+ { \
22
+ cudaError_t error_code = callstr; \
23
+ if (error_code != cudaSuccess) { \
24
+ std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \
25
+ assert(0); \
26
+ } \
27
+ }
28
+
29
+ #define CUDA_1D_KERNEL_LOOP(i, n) \
30
+ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
31
+
32
+ #define CUDA_2D_KERNEL_LOOP(i, n, j, m) \
33
+ for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x) \
34
+ for (size_t j = blockIdx.y * blockDim.y + threadIdx.y; j < (m); j += blockDim.y * gridDim.y)
35
+
36
+ #define DS_CUDA_NUM_THREADS 512
37
+ #define DS_MAXIMUM_NUM_BLOCKS 262144
38
+
39
+ inline int DS_GET_BLOCKS(const int N)
40
+ {
41
+ return (std::max)(
42
+ (std::min)((N + DS_CUDA_NUM_THREADS - 1) / DS_CUDA_NUM_THREADS, DS_MAXIMUM_NUM_BLOCKS),
43
+ // Use at least 1 block, since CUDA does not allow empty block
44
+ 1);
45
+ }
46
+
47
+ class TrainingContext {
48
+ public:
49
+ TrainingContext() : _workspace(nullptr), _seed(42), _curr_offset(0)
50
+ {
51
+ curandCreateGenerator(&_gen, CURAND_RNG_PSEUDO_DEFAULT);
52
+ curandSetPseudoRandomGeneratorSeed(_gen, 123);
53
+ cublasStatus_t stat = cublasCreate(&_cublasHandle);
54
+ if (stat != CUBLAS_STATUS_SUCCESS) {
55
+ // It would be nice to use cublasGetStatusName and
56
+ // cublasGetStatusString, but they were only added in CUDA 11.4.2.
57
+ auto message = std::string("Failed to create cublas handle: cublasStatus_t was ") +
58
+ std::to_string(stat);
59
+ std::cerr << message << std::endl;
60
+ throw std::runtime_error(message);
61
+ }
62
+ }
63
+
64
+ virtual ~TrainingContext()
65
+ {
66
+ cublasDestroy(_cublasHandle);
67
+ cudaFree(_workspace);
68
+ }
69
+
70
+ static TrainingContext& Instance()
71
+ {
72
+ static TrainingContext _ctx;
73
+ return _ctx;
74
+ }
75
+
76
+ void SetWorkSpace(void* workspace)
77
+ {
78
+ if (!workspace) { throw std::runtime_error("Workspace is null."); }
79
+ _workspace = workspace;
80
+ }
81
+
82
+ void* GetWorkSpace() { return _workspace; }
83
+
84
+ curandGenerator_t& GetRandGenerator() { return _gen; }
85
+
86
+ cudaStream_t GetCurrentStream()
87
+ {
88
+ // get current pytorch stream.
89
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();
90
+ return stream;
91
+ }
92
+
93
+ cudaStream_t GetNewStream() { return at::cuda::getStreamFromPool(); }
94
+
95
+ cublasHandle_t GetCublasHandle() { return _cublasHandle; }
96
+
97
+ std::pair<uint64_t, uint64_t> IncrementOffset(uint64_t offset_inc)
98
+ {
99
+ uint64_t offset = _curr_offset;
100
+ _curr_offset += offset_inc;
101
+ return std::pair<uint64_t, uint64_t>(_seed, offset);
102
+ }
103
+
104
+ void SetSeed(uint64_t new_seed) { _seed = new_seed; }
105
+
106
+ void TestGemmFP16(bool test_gemm, int batch_size, int seq_len, int head_num, int size_per_head)
107
+ {
108
+ // avoid rerun.
109
+ if (_gemm_algos.size() > 0) return;
110
+
111
+ if (test_gemm) {
112
+ cublasHandle_t handle = GetCublasHandle();
113
+
114
+ std::unique_ptr<GemmTest<__half>> test_qkv_fw(
115
+ new GemmTest<__half>(batch_size * seq_len, // M
116
+ head_num * size_per_head, // N
117
+ head_num * size_per_head, // K
118
+ CUBLAS_OP_T,
119
+ CUBLAS_OP_N,
120
+ handle));
121
+
122
+ std::unique_ptr<GemmTest<__half>> test_inter(
123
+ new GemmTest<__half>(batch_size * seq_len, // M
124
+ 4 * head_num * size_per_head, // N
125
+ head_num * size_per_head, // K
126
+ CUBLAS_OP_T,
127
+ CUBLAS_OP_N,
128
+ handle));
129
+
130
+ std::unique_ptr<GemmTest<__half>> test_output(
131
+ new GemmTest<__half>(batch_size * seq_len, // M
132
+ head_num * size_per_head, // N
133
+ 4 * head_num * size_per_head, // K
134
+ CUBLAS_OP_T,
135
+ CUBLAS_OP_N,
136
+ handle));
137
+
138
+ std::unique_ptr<StridedGemmTest<__half>> test_attn_scores(
139
+ new StridedGemmTest<__half>(batch_size * head_num, // batch
140
+ seq_len, // M
141
+ seq_len, // N
142
+ size_per_head, // K
143
+ CUBLAS_OP_T,
144
+ CUBLAS_OP_N,
145
+ handle));
146
+
147
+ std::unique_ptr<StridedGemmTest<__half>> test_attn_context(
148
+ new StridedGemmTest<__half>(batch_size * head_num, // batch
149
+ size_per_head, // M
150
+ seq_len, // N
151
+ seq_len, // K
152
+ CUBLAS_OP_N,
153
+ CUBLAS_OP_N,
154
+ handle));
155
+
156
+ _gemm_algos.push_back(test_qkv_fw->TestAlgo(100));
157
+ _gemm_algos.push_back(test_inter->TestAlgo(100));
158
+ _gemm_algos.push_back(test_output->TestAlgo(100));
159
+ _gemm_algos.push_back(test_attn_scores->TestAlgo(100));
160
+ _gemm_algos.push_back(test_attn_context->TestAlgo(100));
161
+ } else {
162
+ // Use default algo.
163
+ _gemm_algos.push_back(std::array<int, 3>({99, 99, 99}));
164
+ _gemm_algos.push_back(std::array<int, 3>({99, 99, 99}));
165
+ _gemm_algos.push_back(std::array<int, 3>({99, 99, 99}));
166
+ _gemm_algos.push_back(std::array<int, 3>({99, 99, 99}));
167
+ _gemm_algos.push_back(std::array<int, 3>({99, 99, 99}));
168
+ }
169
+ }
170
+
171
+ const std::vector<std::array<int, 3>>& GetGemmAlgos() const { return _gemm_algos; }
172
+
173
+ private:
174
+ curandGenerator_t _gen;
175
+ cublasHandle_t _cublasHandle;
176
+ void* _workspace;
177
+ uint64_t _seed;
178
+ uint64_t _curr_offset;
179
+ std::vector<std::array<int, 3>> _gemm_algos;
180
+ };
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/conversion_utils.h ADDED
@@ -0,0 +1,640 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include "ds_kernel_utils.h"
9
+
10
+ #include <stdint.h>
11
+
12
+ #ifdef BF16_AVAILABLE
13
+ #include <cuda_bf16.h>
14
+ #endif
15
+
16
+ namespace conversion {
17
+
18
+ // Basic primitive for constructing conversions
19
+ template <typename TO, typename FROM>
20
+ DS_D_INLINE TO to(FROM val)
21
+ {
22
+ return to(val);
23
+ }
24
+
25
+ // Specializations
26
+
27
+ /********************* Identity Conversions *********************/
28
+ /*
29
+ Identity conversions are useful in templated functions where we might have
30
+ a fixed destination type. For example, I might have a kernel that accepts
31
+ __half, __nv_bfloat16, and float but always want to do the core computation
32
+ at floating point:
33
+
34
+ T mem_value = input[idx];
35
+ float compute_value = conversion::to<float, T>(mem_value);
36
+
37
+ In practice, we should be able to elide the second template parameter:
38
+ float compute_val = conversion::to<float>(mem_value);
39
+
40
+ In this case, we need an implementation to handle the T = float case
41
+
42
+ NOTE: The type inferencing system appears to be unable to handle inferring the first
43
+ template parameter, even in the trivial case.
44
+ */
45
+
46
+ // Floating point types
47
+ template <>
48
+ DS_D_INLINE double to(double val)
49
+ {
50
+ return val;
51
+ }
52
+ template <>
53
+ DS_D_INLINE float to(float val)
54
+ {
55
+ return val;
56
+ }
57
+ template <>
58
+ DS_D_INLINE __half to(__half val)
59
+ {
60
+ return val;
61
+ }
62
+ #ifdef BF16_AVAILABLE
63
+ template <>
64
+ DS_D_INLINE __nv_bfloat16 to(__nv_bfloat16 val)
65
+ {
66
+ return val;
67
+ }
68
+ #endif
69
+
70
+ // Integer types
71
+ template <>
72
+ DS_D_INLINE int8_t to(int8_t val)
73
+ {
74
+ return val;
75
+ }
76
+ template <>
77
+ DS_D_INLINE uint8_t to(uint8_t val)
78
+ {
79
+ return val;
80
+ }
81
+ template <>
82
+ DS_D_INLINE int16_t to(int16_t val)
83
+ {
84
+ return val;
85
+ }
86
+ template <>
87
+ DS_D_INLINE uint16_t to(uint16_t val)
88
+ {
89
+ return val;
90
+ }
91
+ template <>
92
+ DS_D_INLINE int32_t to(int32_t val)
93
+ {
94
+ return val;
95
+ }
96
+ template <>
97
+ DS_D_INLINE uint32_t to(uint32_t val)
98
+ {
99
+ return val;
100
+ }
101
+ template <>
102
+ DS_D_INLINE int64_t to(int64_t val)
103
+ {
104
+ return val;
105
+ }
106
+ template <>
107
+ DS_D_INLINE uint64_t to(uint64_t val)
108
+ {
109
+ return val;
110
+ }
111
+
112
+ // TODO: evaluate if we want bools
113
+
114
+ /********************* To Double Conversions *********************/
115
+
116
+ // * to double variants
117
+
118
+ // Would normally like to not use C cast, but this is an important enough conversion
119
+ // to keep
120
+ template <>
121
+ DS_D_INLINE double to(float val)
122
+ {
123
+ #ifdef PTX_AVAILABLE
124
+ double ret_val;
125
+ asm("ctv.rn.f64.f32 %0, %1;\n" : "=d"(ret_val) : "f"(val));
126
+ return ret_val;
127
+ #else
128
+ return double(val);
129
+ #endif
130
+ }
131
+ // Note: there is a CVT instruction for __half -> double, but there's no inline interface
132
+ // for passing a single half value
133
+ template <>
134
+ DS_D_INLINE double to(__half val)
135
+ {
136
+ return to<double>(__half2float(val));
137
+ }
138
+ template <>
139
+ DS_D_INLINE double to(int64_t val)
140
+ {
141
+ return __ll2double_rn(val);
142
+ }
143
+ template <>
144
+ DS_D_INLINE double to(int32_t val)
145
+ {
146
+ return __int2double_rn(val);
147
+ }
148
+ template <>
149
+ DS_D_INLINE double to(int16_t val)
150
+ {
151
+ return __int2double_rn(val);
152
+ }
153
+ template <>
154
+ DS_D_INLINE double to(int8_t val)
155
+ {
156
+ return __int2double_rn(val);
157
+ }
158
+ template <>
159
+ DS_D_INLINE double to(uint64_t val)
160
+ {
161
+ return __ull2double_rn(val);
162
+ }
163
+ template <>
164
+ DS_D_INLINE double to(uint32_t val)
165
+ {
166
+ return __uint2double_rn(val);
167
+ }
168
+ template <>
169
+ DS_D_INLINE double to(uint16_t val)
170
+ {
171
+ return __uint2double_rn(val);
172
+ }
173
+ template <>
174
+ DS_D_INLINE double to(uint8_t val)
175
+ {
176
+ return __uint2double_rn(val);
177
+ }
178
+
179
+ // Same applies here
180
+ #ifdef BF16_AVAILABLE
181
+ template <>
182
+ DS_D_INLINE double to(__nv_bfloat16 val)
183
+ {
184
+ return to<double>(__bfloat162float(val));
185
+ }
186
+ #endif
187
+
188
+ /********************* To Float Conversions *********************/
189
+
190
+ template <>
191
+ DS_D_INLINE float to(double val)
192
+ {
193
+ return __double2float_rn(val);
194
+ }
195
+ template <>
196
+ DS_D_INLINE float to(__half val)
197
+ {
198
+ return __half2float(val);
199
+ }
200
+ template <>
201
+ DS_D_INLINE float to(int64_t val)
202
+ {
203
+ return __ll2float_rn(val);
204
+ }
205
+ template <>
206
+ DS_D_INLINE float to(int32_t val)
207
+ {
208
+ return __int2float_rn(val);
209
+ }
210
+ template <>
211
+ DS_D_INLINE float to(int16_t val)
212
+ {
213
+ return __int2float_rn(val);
214
+ }
215
+ template <>
216
+ DS_D_INLINE float to(int8_t val)
217
+ {
218
+ return __int2float_rn(val);
219
+ }
220
+ template <>
221
+ DS_D_INLINE float to(uint64_t val)
222
+ {
223
+ return __ull2float_rn(val);
224
+ }
225
+ template <>
226
+ DS_D_INLINE float to(uint32_t val)
227
+ {
228
+ return __uint2float_rn(val);
229
+ }
230
+ template <>
231
+ DS_D_INLINE float to(uint16_t val)
232
+ {
233
+ return __uint2float_rn(val);
234
+ }
235
+ template <>
236
+ DS_D_INLINE float to(uint8_t val)
237
+ {
238
+ return __uint2float_rn(val);
239
+ }
240
+
241
+ #ifdef BF16_AVAILABLE
242
+ template <>
243
+ DS_D_INLINE float to(__nv_bfloat16 val)
244
+ {
245
+ return __bfloat162float(val);
246
+ }
247
+ #endif
248
+
249
+ /********************* To Float2 Conversions *********************/
250
+ template <>
251
+ DS_D_INLINE float2 to(__half2 val)
252
+ {
253
+ return __half22float2(val);
254
+ }
255
+
256
+ #ifdef BF16_AVAILABLE
257
+ template <>
258
+ DS_D_INLINE float2 to(__nv_bfloat162 val)
259
+ {
260
+ return __bfloat1622float2(val);
261
+ }
262
+ #endif
263
+
264
+ /********************* To Half Conversions *********************/
265
+ template <>
266
+ DS_D_INLINE __half to(double val)
267
+ {
268
+ #ifdef __HIP_PLATFORM_AMD__
269
+ float val_f = __double2float_rn(val);
270
+ return __float2half(val_f);
271
+ #else
272
+ return __double2half(val);
273
+ #endif
274
+ }
275
+ template <>
276
+ DS_D_INLINE __half to(float val)
277
+ {
278
+ return __float2half(val);
279
+ }
280
+ template <>
281
+ DS_D_INLINE __half to(int64_t val)
282
+ {
283
+ return __ll2half_rn(val);
284
+ }
285
+ template <>
286
+ DS_D_INLINE __half to(int32_t val)
287
+ {
288
+ return __int2half_rn(val);
289
+ }
290
+ template <>
291
+ DS_D_INLINE __half to(int16_t val)
292
+ {
293
+ return __short2half_rn(val);
294
+ }
295
+ template <>
296
+ DS_D_INLINE __half to(int8_t val)
297
+ {
298
+ return __int2half_rn(val);
299
+ }
300
+ template <>
301
+ DS_D_INLINE __half to(uint64_t val)
302
+ {
303
+ return __ull2half_rn(val);
304
+ }
305
+ template <>
306
+ DS_D_INLINE __half to(uint32_t val)
307
+ {
308
+ return __uint2half_rn(val);
309
+ }
310
+ template <>
311
+ DS_D_INLINE __half to(uint16_t val)
312
+ {
313
+ return __ushort2half_rn(val);
314
+ }
315
+ template <>
316
+ DS_D_INLINE __half to(uint8_t val)
317
+ {
318
+ return __uint2half_rn(val);
319
+ }
320
+
321
+ #ifdef BF16_AVAILABLE
322
+ // No direct conversion
323
+ template <>
324
+ DS_D_INLINE __half to(__nv_bfloat16 val)
325
+ {
326
+ return to<__half>(to<float>(val));
327
+ }
328
+ #endif
329
+
330
+ /********************* To Half2 Conversions *********************/
331
+ template <>
332
+ DS_D_INLINE __half2 to(float2 val)
333
+ {
334
+ return __float22half2_rn(val);
335
+ }
336
+ template <>
337
+ DS_D_INLINE __half2 to(float val)
338
+ {
339
+ return __float2half2_rn(val);
340
+ }
341
+
342
+ #ifdef BF16_AVAILABLE
343
+ // No direct conversion
344
+ template <>
345
+ DS_D_INLINE __half2 to(__nv_bfloat162 val)
346
+ {
347
+ return to<__half2>(to<float2>(val));
348
+ }
349
+ #endif
350
+
351
+ /********************* To BF16 Conversions *********************/
352
+ #ifdef BF16_AVAILABLE
353
+ template <>
354
+ DS_D_INLINE __nv_bfloat16 to(double val)
355
+ {
356
+ return __double2bfloat16(val);
357
+ }
358
+ template <>
359
+ DS_D_INLINE __nv_bfloat16 to(float val)
360
+ {
361
+ return __float2bfloat16(val);
362
+ }
363
+ template <>
364
+ DS_D_INLINE __nv_bfloat16 to(int64_t val)
365
+ {
366
+ return __ll2bfloat16_rn(val);
367
+ }
368
+ template <>
369
+ DS_D_INLINE __nv_bfloat16 to(int32_t val)
370
+ {
371
+ return __int2bfloat16_rn(val);
372
+ }
373
+ template <>
374
+ DS_D_INLINE __nv_bfloat16 to(int16_t val)
375
+ {
376
+ return __short2bfloat16_rn(val);
377
+ }
378
+ template <>
379
+ DS_D_INLINE __nv_bfloat16 to(int8_t val)
380
+ {
381
+ return __int2bfloat16_rn(val);
382
+ }
383
+ template <>
384
+ DS_D_INLINE __nv_bfloat16 to(uint64_t val)
385
+ {
386
+ return __ull2bfloat16_rn(val);
387
+ }
388
+ template <>
389
+ DS_D_INLINE __nv_bfloat16 to(uint32_t val)
390
+ {
391
+ return __uint2bfloat16_rn(val);
392
+ }
393
+ template <>
394
+ DS_D_INLINE __nv_bfloat16 to(uint16_t val)
395
+ {
396
+ return __ushort2bfloat16_rn(val);
397
+ }
398
+ template <>
399
+ DS_D_INLINE __nv_bfloat16 to(uint8_t val)
400
+ {
401
+ return __uint2bfloat16_rn(val);
402
+ }
403
+ #endif
404
+
405
+ /********************* To BF162 Conversions *********************/
406
+ #ifdef BF16_AVAILABLE
407
+ template <>
408
+ DS_D_INLINE __nv_bfloat162 to(float2 val)
409
+ {
410
+ return __float22bfloat162_rn(val);
411
+ }
412
+ template <>
413
+ DS_D_INLINE __nv_bfloat162 to(float val)
414
+ {
415
+ return __float2bfloat162_rn(val);
416
+ }
417
+ template <>
418
+ DS_D_INLINE __nv_bfloat162 to(__half2 val)
419
+ {
420
+ return to<__nv_bfloat162>(to<float2>(val));
421
+ }
422
+ #endif
423
+
424
+ /********************* To INT64_T Conversions *********************/
425
+ template <>
426
+ DS_D_INLINE int64_t to(double val)
427
+ {
428
+ return __double2ll_rn(val);
429
+ }
430
+ template <>
431
+ DS_D_INLINE int64_t to(float val)
432
+ {
433
+ return __float2ll_rn(val);
434
+ }
435
+ template <>
436
+ DS_D_INLINE int64_t to(__half val)
437
+ {
438
+ return __half2ll_rn(val);
439
+ }
440
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
441
+ // to demand an PTX at this time
442
+
443
+ #ifdef BF16_AVAILABLE
444
+ template <>
445
+ DS_D_INLINE int64_t to(__nv_bfloat16 val)
446
+ {
447
+ return __bfloat162ll_rn(val);
448
+ }
449
+ #endif
450
+
451
+ /********************* To INT32_T Conversions *********************/
452
+ template <>
453
+ DS_D_INLINE int32_t to(double val)
454
+ {
455
+ return __double2int_rn(val);
456
+ }
457
+ template <>
458
+ DS_D_INLINE int32_t to(float val)
459
+ {
460
+ return __float2int_rn(val);
461
+ }
462
+ template <>
463
+ DS_D_INLINE int32_t to(__half val)
464
+ {
465
+ return __half2int_rn(val);
466
+ }
467
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
468
+ // to demand an PTX at this time
469
+
470
+ #ifdef BF16_AVAILABLE
471
+ template <>
472
+ DS_D_INLINE int32_t to(__nv_bfloat16 val)
473
+ {
474
+ return __bfloat162int_rn(val);
475
+ }
476
+ #endif
477
+
478
+ /********************* To INT16_T Conversions *********************/
479
+ template <>
480
+ DS_D_INLINE int16_t to(double val)
481
+ {
482
+ return __double2int_rn(val);
483
+ }
484
+ template <>
485
+ DS_D_INLINE int16_t to(float val)
486
+ {
487
+ return __float2int_rn(val);
488
+ }
489
+ template <>
490
+ DS_D_INLINE int16_t to(__half val)
491
+ {
492
+ return __half2int_rn(val);
493
+ }
494
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
495
+ // to demand an PTX at this time
496
+
497
+ #ifdef BF16_AVAILABLE
498
+ template <>
499
+ DS_D_INLINE int16_t to(__nv_bfloat16 val)
500
+ {
501
+ return __bfloat162int_rn(val);
502
+ }
503
+ #endif
504
+
505
+ /********************* To INT8_T Conversions *********************/
506
+ template <>
507
+ DS_D_INLINE int8_t to(double val)
508
+ {
509
+ return __double2int_rn(val);
510
+ }
511
+ template <>
512
+ DS_D_INLINE int8_t to(float val)
513
+ {
514
+ return __float2int_rn(val);
515
+ }
516
+ template <>
517
+ DS_D_INLINE int8_t to(__half val)
518
+ {
519
+ return __half2int_rn(val);
520
+ }
521
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
522
+ // to demand an PTX at this time
523
+
524
+ #ifdef BF16_AVAILABLE
525
+ template <>
526
+ DS_D_INLINE int8_t to(__nv_bfloat16 val)
527
+ {
528
+ return __bfloat162int_rn(val);
529
+ }
530
+ #endif
531
+
532
+ /********************* To UINT64_T Conversions *********************/
533
+ template <>
534
+ DS_D_INLINE uint64_t to(double val)
535
+ {
536
+ return __double2ull_rn(val);
537
+ }
538
+ template <>
539
+ DS_D_INLINE uint64_t to(float val)
540
+ {
541
+ return __float2ull_rn(val);
542
+ }
543
+ template <>
544
+ DS_D_INLINE uint64_t to(__half val)
545
+ {
546
+ return __half2ull_rn(val);
547
+ }
548
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
549
+ // to demand an PTX at this time
550
+
551
+ #ifdef BF16_AVAILABLE
552
+ template <>
553
+ DS_D_INLINE uint64_t to(__nv_bfloat16 val)
554
+ {
555
+ return __bfloat162ull_rn(val);
556
+ }
557
+ #endif
558
+
559
+ /********************* To UINT32_T Conversions *********************/
560
+ template <>
561
+ DS_D_INLINE uint32_t to(double val)
562
+ {
563
+ return __double2uint_rn(val);
564
+ }
565
+ template <>
566
+ DS_D_INLINE uint32_t to(float val)
567
+ {
568
+ return __float2uint_rn(val);
569
+ }
570
+ template <>
571
+ DS_D_INLINE uint32_t to(__half val)
572
+ {
573
+ return __half2uint_rn(val);
574
+ }
575
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
576
+ // to demand an PTX at this time
577
+
578
+ #ifdef BF16_AVAILABLE
579
+ template <>
580
+ DS_D_INLINE uint32_t to(__nv_bfloat16 val)
581
+ {
582
+ return __bfloat162uint_rn(val);
583
+ }
584
+ #endif
585
+
586
+ /********************* To UINT16_T Conversions *********************/
587
+ template <>
588
+ DS_D_INLINE uint16_t to(double val)
589
+ {
590
+ return __double2uint_rn(val);
591
+ }
592
+ template <>
593
+ DS_D_INLINE uint16_t to(float val)
594
+ {
595
+ return __float2uint_rn(val);
596
+ }
597
+ template <>
598
+ DS_D_INLINE uint16_t to(__half val)
599
+ {
600
+ return __half2uint_rn(val);
601
+ }
602
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
603
+ // to demand an PTX at this time
604
+
605
+ #ifdef BF16_AVAILABLE
606
+ template <>
607
+ DS_D_INLINE uint16_t to(__nv_bfloat16 val)
608
+ {
609
+ return __bfloat162uint_rn(val);
610
+ }
611
+ #endif
612
+
613
+ /********************* To UINT8_T Conversions *********************/
614
+ template <>
615
+ DS_D_INLINE uint8_t to(double val)
616
+ {
617
+ return __double2uint_rn(val);
618
+ }
619
+ template <>
620
+ DS_D_INLINE uint8_t to(float val)
621
+ {
622
+ return __float2uint_rn(val);
623
+ }
624
+ template <>
625
+ DS_D_INLINE uint8_t to(__half val)
626
+ {
627
+ return __half2uint_rn(val);
628
+ }
629
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
630
+ // to demand an PTX at this time
631
+
632
+ #ifdef BF16_AVAILABLE
633
+ template <>
634
+ DS_D_INLINE uint8_t to(__nv_bfloat16 val)
635
+ {
636
+ return __bfloat162uint_rn(val);
637
+ }
638
+ #endif
639
+
640
+ } // namespace conversion
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/cpu_adagrad.h ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #define NOMINMAX // Windows idiosyncrasy
9
+ // https://stackoverflow.com/questions/4913922/possible-problems-with-nominmax-on-visual-c
10
+
11
+ #include <stdio.h>
12
+ #include <cassert>
13
+ #include "simd.h"
14
+
15
+ #if defined(__ENABLE_CUDA__)
16
+ #include <cuda_fp16.h>
17
+ #include <cuda_runtime_api.h>
18
+ #include "cuda.h"
19
+ #include "custom_cuda_layers.h"
20
+ typedef __half ds_half_precision_t;
21
+ #elif defined(__ENABLE_CANN__)
22
+ #include "acl/acl.h"
23
+ #include "torch_npu/csrc/core/npu/NPUStream.h"
24
+ typedef c10::Half ds_half_precision_t;
25
+ #else
26
+ typedef unsigned short ds_half_precision_t;
27
+ #endif
28
+
29
+ #define STEP(SPAN) \
30
+ void Step_##SPAN(float* _params, \
31
+ float* grads, \
32
+ float* _exp_avg_sq, \
33
+ size_t _param_size, \
34
+ ds_half_precision_t* dev_param = nullptr, \
35
+ bool half_precision = false);
36
+
37
+ class Adagrad_Optimizer {
38
+ public:
39
+ Adagrad_Optimizer(float alpha = 1e-2, float eps = 1e-8, float weight_decay = 0)
40
+ : _alpha(alpha), _eps(eps), _weight_decay(weight_decay)
41
+ {
42
+ #if defined(__ENABLE_CUDA__)
43
+ cudaMallocHost((void**)_doubled_buffer, TILE * sizeof(float));
44
+ cudaMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float));
45
+
46
+ _streams[0] = TrainingContext::Instance().GetCurrentStream();
47
+ _streams[1] = TrainingContext::Instance().GetNewStream();
48
+ _buf_index = false;
49
+ #elif defined(__ENABLE_CANN__)
50
+ aclrtMallocHost((void**)_doubled_buffer, TILE * sizeof(float));
51
+ aclrtMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float));
52
+
53
+ _buf_index = false;
54
+ #endif
55
+ }
56
+ ~Adagrad_Optimizer()
57
+ {
58
+ #if defined(__ENABLE_CUDA__)
59
+ cudaFreeHost(_doubled_buffer[0]);
60
+ cudaFreeHost(_doubled_buffer[1]);
61
+ #elif defined(__ENABLE_CANN__)
62
+ aclrtFreeHost(_doubled_buffer[0]);
63
+ aclrtFreeHost(_doubled_buffer[1]);
64
+ #endif
65
+ }
66
+ #if defined(__AVX512__) or defined(__AVX256__)
67
+ template <int span>
68
+ void Step_AVX(size_t* rounded_size,
69
+ float* _params,
70
+ float* grads,
71
+ float* _exp_avg_sq,
72
+ size_t param_size,
73
+ ds_half_precision_t* dev_param = nullptr,
74
+ bool half_precision = false);
75
+ #endif
76
+ STEP(1)
77
+ STEP(4)
78
+ STEP(8)
79
+ #if defined(__ENABLE_CUDA__)
80
+ inline void SynchronizeStreams()
81
+ {
82
+ for (int i = 0; i < 2; i++) cudaStreamSynchronize(_streams[i]);
83
+ }
84
+ #elif defined(__ENABLE_CANN__)
85
+ inline void SynchronizeStreams()
86
+ {
87
+ for (int i = 0; i < 2; i++) aclrtSynchronizeStream(_streams[i].stream());
88
+ }
89
+ #endif
90
+ inline void IncrementStep(size_t step)
91
+ {
92
+ _step++;
93
+ if (_step != step) { _step = step; }
94
+ }
95
+ inline void update_state(float lr, float epsilon, float weight_decay)
96
+ {
97
+ _alpha = lr;
98
+ _eps = epsilon;
99
+ _weight_decay = weight_decay;
100
+ }
101
+
102
+ private:
103
+ float _alpha;
104
+ float _eps;
105
+ float _weight_decay;
106
+
107
+ float _betta1_t;
108
+ float _betta2_t;
109
+ size_t _step;
110
+
111
+ #if defined(__ENABLE_CUDA__)
112
+ bool _buf_index;
113
+ float* _doubled_buffer[2];
114
+ cudaStream_t _streams[2];
115
+ #elif defined(__ENABLE_CANN__)
116
+ float* _doubled_buffer[2];
117
+ c10_npu::NPUStream _streams[2] = {c10_npu::getCurrentNPUStream(),
118
+ c10_npu::getNPUStreamFromPool()};
119
+ bool _buf_index;
120
+ #endif
121
+ };
122
+
123
+ #if defined(__AVX512__) or defined(__AVX256__)
124
+ template <int span>
125
+ void Adagrad_Optimizer::Step_AVX(size_t* rounded_size,
126
+ float* _params,
127
+ float* grads,
128
+ float* _exp_avg_sq,
129
+ size_t _param_size,
130
+ ds_half_precision_t* dev_params,
131
+ bool half_precision)
132
+ {
133
+ size_t new_rounded_size = 0;
134
+ AVX_Data eps_4;
135
+ eps_4.data = SIMD_SET(_eps);
136
+
137
+ float step_size = -1 * _alpha;
138
+ AVX_Data step_size_4;
139
+ step_size_4.data = SIMD_SET(step_size);
140
+
141
+ AVX_Data weight_decay4;
142
+ if (_weight_decay > 0) weight_decay4.data = SIMD_SET(_weight_decay);
143
+ new_rounded_size = ROUND_DOWN(_param_size, SIMD_WIDTH * span);
144
+ for (size_t t = 0; t < new_rounded_size; t += TILE) {
145
+ size_t copy_size = TILE;
146
+ if ((t + TILE) > new_rounded_size) copy_size = new_rounded_size - t;
147
+ size_t offset = copy_size + t;
148
+ #if defined(__ENABLE_CUDA__)
149
+ if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); }
150
+ #elif defined(__ENABLE_CANN__)
151
+ if ((t / TILE) >= 2) { aclrtSynchronizeStream(_streams[_buf_index].stream()); }
152
+ #endif
153
+ #pragma omp parallel for
154
+ for (size_t i = t; i < offset; i += SIMD_WIDTH * span) {
155
+ AVX_Data grad_4[span];
156
+ simd_load<span>(grad_4, grads + i, half_precision);
157
+
158
+ AVX_Data momentum_4[span];
159
+ simd_load<span>(momentum_4, grads + i, false);
160
+
161
+ AVX_Data variance_4[span];
162
+ simd_load<span>(variance_4, _exp_avg_sq + i, false);
163
+
164
+ AVX_Data param_4[span];
165
+ simd_load<span>(param_4, _params + i, half_precision);
166
+
167
+ if (_weight_decay > 0) { simd_fma<span>(grad_4, param_4, weight_decay4, grad_4); }
168
+
169
+ simd_fma<span>(variance_4, grad_4, grad_4, variance_4);
170
+ simd_sqrt<span>(grad_4, variance_4);
171
+ simd_add<span>(grad_4, grad_4, eps_4);
172
+ simd_div<span>(grad_4, momentum_4, grad_4);
173
+ simd_fma<span>(param_4, grad_4, step_size_4, param_4);
174
+
175
+ simd_store<span>(_params + i, param_4, half_precision);
176
+ #if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__)
177
+ if (dev_params) {
178
+ simd_store<span>(_doubled_buffer[_buf_index] + (i - t), param_4, half_precision);
179
+ }
180
+ #endif
181
+ simd_store<span>(_exp_avg_sq + i, variance_4, false);
182
+ }
183
+ #if defined(__ENABLE_CUDA__)
184
+ if (dev_params) {
185
+ if (half_precision)
186
+ launch_param_update_half(
187
+ _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]);
188
+ else
189
+ launch_param_update(
190
+ _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]);
191
+
192
+ _buf_index = !_buf_index;
193
+ }
194
+ #elif defined(__ENABLE_CANN__)
195
+ if (dev_params) {
196
+ size_t memcpy_size = copy_size * sizeof(_doubled_buffer[_buf_index][0]);
197
+ if (half_precision) memcpy_size /= 2;
198
+ aclrtMemcpy(dev_params + t,
199
+ memcpy_size,
200
+ _doubled_buffer[_buf_index],
201
+ memcpy_size,
202
+ aclrtMemcpyKind::ACL_MEMCPY_HOST_TO_DEVICE);
203
+
204
+ _buf_index = !_buf_index;
205
+ }
206
+ #endif
207
+ }
208
+ *rounded_size = new_rounded_size;
209
+ }
210
+ #endif
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/cpu_adam.h ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #define NOMINMAX // Windows idiosyncrasy
9
+ // https://stackoverflow.com/questions/4913922/possible-problems-with-nominmax-on-visual-c
10
+
11
+ #include <stdio.h>
12
+ #include <torch/extension.h>
13
+ #include <cassert>
14
+ #include "simd.h"
15
+
16
+ #if defined(__ENABLE_CUDA__)
17
+ #include <cuda_fp16.h>
18
+ #include <cuda_runtime_api.h>
19
+ #include "cuda.h"
20
+ #include "custom_cuda_layers.h"
21
+ typedef __half ds_half_precision_t;
22
+ #elif defined(__ENABLE_CANN__)
23
+ #include "acl/acl.h"
24
+ #include "torch_npu/csrc/core/npu/NPUStream.h"
25
+ typedef c10::Half ds_half_precision_t;
26
+ #else
27
+ #include <cmath>
28
+ typedef unsigned short ds_half_precision_t;
29
+ #endif
30
+
31
+ #define STEP(SPAN) \
32
+ void Step_##SPAN(float* _params, \
33
+ float* grads, \
34
+ float* _exp_avg, \
35
+ float* _exp_avg_sq, \
36
+ size_t _param_size, \
37
+ ds_half_precision_t* dev_param = nullptr, \
38
+ bool half_precision = false);
39
+
40
+ class Adam_Optimizer {
41
+ public:
42
+ Adam_Optimizer(float alpha = 1e-3,
43
+ float betta1 = 0.9,
44
+ float betta2 = 0.999,
45
+ float eps = 1e-8,
46
+ float weight_decay = 0,
47
+ bool adamw_mode = true)
48
+ : _alpha(alpha),
49
+ _betta1(betta1),
50
+ _betta2(betta2),
51
+ _eps(eps),
52
+ _weight_decay(weight_decay),
53
+ _betta1_t(1.0),
54
+ _betta2_t(1.0),
55
+ _step(0),
56
+ _adamw_mode(adamw_mode)
57
+ {
58
+ #if defined(__ENABLE_CUDA__)
59
+ cudaMallocHost((void**)_doubled_buffer, TILE * sizeof(float));
60
+ cudaMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float));
61
+
62
+ _streams[0] = TrainingContext::Instance().GetCurrentStream();
63
+ _streams[1] = TrainingContext::Instance().GetNewStream();
64
+ _buf_index = false;
65
+ #elif defined(__ENABLE_CANN__)
66
+ aclrtMallocHost((void**)_doubled_buffer, TILE * sizeof(float));
67
+ aclrtMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float));
68
+
69
+ _buf_index = false;
70
+ #endif
71
+ }
72
+ ~Adam_Optimizer()
73
+ {
74
+ #if defined(__ENABLE_CUDA__)
75
+ cudaFreeHost(_doubled_buffer[0]);
76
+ cudaFreeHost(_doubled_buffer[1]);
77
+ #elif defined(__ENABLE_CANN__)
78
+ aclrtFreeHost(_doubled_buffer[0]);
79
+ aclrtFreeHost(_doubled_buffer[1]);
80
+ #endif
81
+ }
82
+
83
+ #if defined(__AVX512__) or defined(__AVX256__)
84
+ template <int span>
85
+ void Step_AVX(size_t* rounded_size,
86
+ float* _params,
87
+ float* grads,
88
+ float* _exp_avg,
89
+ float* _exp_avg_sq,
90
+ size_t param_size,
91
+ ds_half_precision_t* dev_param = nullptr,
92
+ bool half_precision = false);
93
+ #endif
94
+ STEP(1)
95
+ STEP(4)
96
+ STEP(8)
97
+ #if defined(__ENABLE_CUDA__)
98
+ inline void SynchronizeStreams()
99
+ {
100
+ for (int i = 0; i < 2; i++) cudaStreamSynchronize(_streams[i]);
101
+ }
102
+ #elif defined(__ENABLE_CANN__)
103
+ inline void SynchronizeStreams()
104
+ {
105
+ for (int i = 0; i < 2; i++) aclrtSynchronizeStream(_streams[i].stream());
106
+ }
107
+ #endif
108
+ inline void IncrementStep(size_t step, float beta1, float beta2)
109
+ {
110
+ if (beta1 != _betta1 || beta2 != _betta2) {
111
+ _step = step;
112
+ _betta1 = beta1;
113
+ _betta2 = beta2;
114
+ _betta1_t = std::pow(_betta1, step);
115
+ _betta2_t = std::pow(_betta2, step);
116
+ } else {
117
+ _step++;
118
+ if (_step != step) {
119
+ _betta1_t = std::pow(_betta1, step);
120
+ _betta2_t = std::pow(_betta2, step);
121
+ _step = step;
122
+ } else {
123
+ _betta1_t *= _betta1;
124
+ _betta2_t *= _betta2;
125
+ }
126
+ }
127
+ }
128
+ inline void update_state(float lr, float epsilon, float weight_decay, bool bias_correction)
129
+ {
130
+ _alpha = lr;
131
+ _eps = epsilon;
132
+ _weight_decay = weight_decay;
133
+
134
+ _bias_correction1 = 1.0f;
135
+ _bias_correction2 = 1.0f;
136
+ if (bias_correction == 1) {
137
+ _bias_correction1 = 1 - _betta1_t;
138
+ _bias_correction2 = 1 / sqrt(1 - _betta2_t);
139
+ }
140
+ }
141
+
142
+ private:
143
+ float _alpha;
144
+ float _betta1;
145
+ float _betta2;
146
+ float _eps;
147
+ float _weight_decay;
148
+
149
+ float _betta1_t;
150
+ float _betta2_t;
151
+ size_t _step;
152
+
153
+ float _bias_correction1;
154
+ float _bias_correction2;
155
+
156
+ bool _adamw_mode;
157
+
158
+ #if defined(__ENABLE_CUDA__)
159
+ float* _doubled_buffer[2];
160
+ cudaStream_t _streams[2];
161
+ bool _buf_index;
162
+ #elif defined(__ENABLE_CANN__)
163
+ float* _doubled_buffer[2];
164
+ c10_npu::NPUStream _streams[2] = {c10_npu::getCurrentNPUStream(),
165
+ c10_npu::getNPUStreamFromPool()};
166
+ bool _buf_index;
167
+ #endif
168
+ };
169
+
170
+ #if defined(__AVX512__) or defined(__AVX256__)
171
+ template <int span>
172
+ void Adam_Optimizer::Step_AVX(size_t* rounded_size,
173
+ float* _params,
174
+ float* grads,
175
+ float* _exp_avg,
176
+ float* _exp_avg_sq,
177
+ size_t _param_size,
178
+ ds_half_precision_t* dev_params,
179
+ bool half_precision)
180
+ {
181
+ size_t new_rounded_size = 0;
182
+ int rshft = half_precision ? 1 : 0;
183
+
184
+ AVX_Data betta1_4;
185
+ betta1_4.data = SIMD_SET(_betta1);
186
+ AVX_Data betta2_4;
187
+ betta2_4.data = SIMD_SET(_betta2);
188
+
189
+ float betta1_minus1 = 1 - _betta1;
190
+ float betta2_minus1 = 1 - _betta2;
191
+ AVX_Data betta1_minus1_4;
192
+ betta1_minus1_4.data = SIMD_SET(betta1_minus1);
193
+ AVX_Data betta2_minus1_4;
194
+ betta2_minus1_4.data = SIMD_SET(betta2_minus1);
195
+
196
+ AVX_Data bias2_sqrt;
197
+ bias2_sqrt.data = SIMD_SET(_bias_correction2);
198
+
199
+ AVX_Data eps_4;
200
+ eps_4.data = SIMD_SET(_eps);
201
+
202
+ float step_size = -1 * _alpha / _bias_correction1;
203
+ AVX_Data step_size_4;
204
+ step_size_4.data = SIMD_SET(step_size);
205
+
206
+ float w_decay = -1 * _alpha * _weight_decay;
207
+ AVX_Data weight_decay4;
208
+ if (_weight_decay > 0)
209
+ weight_decay4.data = (_adamw_mode ? SIMD_SET(w_decay) : SIMD_SET(_weight_decay));
210
+ new_rounded_size = ROUND_DOWN(_param_size, SIMD_WIDTH * span);
211
+ for (size_t t = 0; t < new_rounded_size; t += TILE) {
212
+ size_t copy_size = TILE;
213
+ if ((t + TILE) > new_rounded_size) copy_size = new_rounded_size - t;
214
+ size_t offset = copy_size + t;
215
+ #if defined(__ENABLE_CUDA__)
216
+ if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); }
217
+ #elif defined(__ENABLE_CANN__)
218
+ if ((t / TILE) >= 2) { aclrtSynchronizeStream(_streams[_buf_index].stream()); }
219
+ #endif
220
+ #pragma omp parallel for
221
+ for (size_t i = t; i < offset; i += SIMD_WIDTH * span) {
222
+ AVX_Data grad_4[span];
223
+ simd_load<span>(grad_4, grads + (i >> rshft), half_precision);
224
+
225
+ AVX_Data momentum_4[span];
226
+ simd_load<span>(momentum_4, _exp_avg + i, false);
227
+
228
+ AVX_Data variance_4[span];
229
+ simd_load<span>(variance_4, _exp_avg_sq + i, false);
230
+
231
+ AVX_Data param_4[span];
232
+ simd_load<span>(param_4, _params + (i >> rshft), half_precision);
233
+
234
+ if (_weight_decay > 0 && !_adamw_mode) {
235
+ simd_fma<span>(grad_4, param_4, weight_decay4, grad_4);
236
+ }
237
+
238
+ simd_mul<span>(momentum_4, momentum_4, betta1_4);
239
+ simd_fma<span>(momentum_4, grad_4, betta1_minus1_4, momentum_4);
240
+ simd_mul<span>(variance_4, variance_4, betta2_4);
241
+ simd_mul<span>(grad_4, grad_4, grad_4);
242
+ simd_fma<span>(variance_4, grad_4, betta2_minus1_4, variance_4);
243
+ simd_sqrt<span>(grad_4, variance_4);
244
+ simd_fma<span>(grad_4, grad_4, bias2_sqrt, eps_4);
245
+ simd_div<span>(grad_4, momentum_4, grad_4);
246
+
247
+ if (_weight_decay > 0 && _adamw_mode) {
248
+ simd_fma<span>(param_4, param_4, weight_decay4, param_4);
249
+ }
250
+
251
+ simd_fma<span>(param_4, grad_4, step_size_4, param_4);
252
+
253
+ simd_store<span>(_params + (i >> rshft), param_4, half_precision);
254
+ #if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__)
255
+ if (dev_params) {
256
+ simd_store<span>(_doubled_buffer[_buf_index] + (i - t), param_4, half_precision);
257
+ }
258
+ #endif
259
+ simd_store<span>(_exp_avg + i, momentum_4, false);
260
+ simd_store<span>(_exp_avg_sq + i, variance_4, false);
261
+ }
262
+ #if defined(__ENABLE_CUDA__)
263
+ if (dev_params) {
264
+ if (half_precision)
265
+ launch_param_update_half(
266
+ _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]);
267
+ else
268
+ launch_param_update(
269
+ _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]);
270
+
271
+ _buf_index = !_buf_index;
272
+ }
273
+ #elif defined(__ENABLE_CANN__)
274
+ if (dev_params) {
275
+ size_t memcpy_size = copy_size * sizeof(_doubled_buffer[_buf_index][0]);
276
+ if (half_precision) memcpy_size /= 2;
277
+ aclrtMemcpy(dev_params + t,
278
+ memcpy_size,
279
+ _doubled_buffer[_buf_index],
280
+ memcpy_size,
281
+ aclrtMemcpyKind::ACL_MEMCPY_HOST_TO_DEVICE);
282
+
283
+ _buf_index = !_buf_index;
284
+ }
285
+ #endif
286
+ }
287
+ *rounded_size = new_rounded_size;
288
+ }
289
+ #endif
290
+
291
+ int create_adam_optimizer(int optimizer_id,
292
+ float alpha = 1e-3,
293
+ float betta1 = 0.9,
294
+ float betta2 = 0.999,
295
+ float eps = 1e-8,
296
+ float weight_decay = 0,
297
+ bool adamw_mode = true,
298
+ bool should_log = false);
299
+
300
+ int ds_adam_step(int optimizer_id,
301
+ size_t step,
302
+ float lr,
303
+ float beta1,
304
+ float beta2,
305
+ float epsilon,
306
+ float weight_decay,
307
+ bool bias_correction,
308
+ torch::Tensor& params,
309
+ torch::Tensor& grads,
310
+ torch::Tensor& exp_avg,
311
+ torch::Tensor& exp_avg_sq);
312
+
313
+ int ds_adam_step_plus_copy(int optimizer_id,
314
+ size_t step,
315
+ float lr,
316
+ float beta1,
317
+ float beta2,
318
+ float epsilon,
319
+ float weight_decay,
320
+ bool bias_correction,
321
+ torch::Tensor& params,
322
+ torch::Tensor& grads,
323
+ torch::Tensor& exp_avg,
324
+ torch::Tensor& exp_avg_sq,
325
+ torch::Tensor& gpu_params);
326
+
327
+ int destroy_adam_optimizer(int optimizer_id);
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/cpu_lion.h ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #define NOMINMAX // Windows idiosyncrasy
9
+ // https://stackoverflow.com/questions/4913922/possible-problems-with-nominmax-on-visual-c
10
+
11
+ #include <stdio.h>
12
+ #include <torch/extension.h>
13
+ #include <cassert>
14
+ #include "simd.h"
15
+
16
+ #if defined(__ENABLE_CUDA__)
17
+ #include <cuda_fp16.h>
18
+ #include <cuda_runtime_api.h>
19
+ #include "cuda.h"
20
+ #include "custom_cuda_layers.h"
21
+ typedef __half ds_half_precision_t;
22
+ #elif defined(__ENABLE_CANN__)
23
+ #include "acl/acl.h"
24
+ #include "torch_npu/csrc/core/npu/NPUStream.h"
25
+ typedef c10::Half ds_half_precision_t;
26
+ #else
27
+ #include <cmath>
28
+ typedef unsigned short ds_half_precision_t;
29
+ #endif
30
+
31
+ #define STEP(SPAN) \
32
+ void Step_##SPAN(float* _params, \
33
+ float* grads, \
34
+ float* _exp_avg, \
35
+ size_t _param_size, \
36
+ ds_half_precision_t* dev_param = nullptr, \
37
+ bool half_precision = false);
38
+
39
+ class Lion_Optimizer {
40
+ public:
41
+ Lion_Optimizer(float alpha = 1e-3,
42
+ float betta1 = 0.9,
43
+ float betta2 = 0.999,
44
+ float weight_decay = 0)
45
+ : _alpha(alpha), _betta1(betta1), _betta2(betta2), _weight_decay(weight_decay), _step(0)
46
+ {
47
+ #if defined(__ENABLE_CUDA__)
48
+ cudaMallocHost((void**)_doubled_buffer, TILE * sizeof(float));
49
+ cudaMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float));
50
+
51
+ _streams[0] = TrainingContext::Instance().GetCurrentStream();
52
+ _streams[1] = TrainingContext::Instance().GetNewStream();
53
+ _buf_index = false;
54
+ #elif defined(__ENABLE_CANN__)
55
+ aclrtMallocHost((void**)_doubled_buffer, TILE * sizeof(float));
56
+ aclrtMallocHost((void**)(_doubled_buffer + 1), TILE * sizeof(float));
57
+
58
+ _buf_index = false;
59
+ #endif
60
+ }
61
+ ~Lion_Optimizer()
62
+ {
63
+ #if defined(__ENABLE_CUDA__)
64
+ cudaFreeHost(_doubled_buffer[0]);
65
+ cudaFreeHost(_doubled_buffer[1]);
66
+ #elif defined(__ENABLE_CANN__)
67
+ aclrtFreeHost(_doubled_buffer[0]);
68
+ aclrtFreeHost(_doubled_buffer[1]);
69
+ #endif
70
+ }
71
+
72
+ #if defined(__AVX512__) or defined(__AVX256__)
73
+ template <int span>
74
+ void Step_AVX(size_t* rounded_size,
75
+ float* _params,
76
+ float* grads,
77
+ float* _exp_avg,
78
+ size_t param_size,
79
+ ds_half_precision_t* dev_param = nullptr,
80
+ bool half_precision = false);
81
+ #endif
82
+ STEP(1)
83
+ STEP(4)
84
+ STEP(8)
85
+ #if defined(__ENABLE_CUDA__)
86
+ inline void SynchronizeStreams()
87
+ {
88
+ for (int i = 0; i < 2; i++) cudaStreamSynchronize(_streams[i]);
89
+ }
90
+ #elif defined(__ENABLE_CANN__)
91
+ inline void SynchronizeStreams()
92
+ {
93
+ for (int i = 0; i < 2; i++) aclrtSynchronizeStream(_streams[i].stream());
94
+ }
95
+ #endif
96
+ inline void IncrementStep(size_t step, float beta1, float beta2)
97
+ {
98
+ _step++;
99
+ if (_step != step || beta1 != _betta1 || beta2 != _betta2) {
100
+ _step = step;
101
+ _betta1 = beta1;
102
+ _betta2 = beta2;
103
+ }
104
+ }
105
+ inline void update_state(float lr, float weight_decay)
106
+ {
107
+ _alpha = lr;
108
+ _weight_decay = weight_decay;
109
+ }
110
+
111
+ private:
112
+ float _alpha;
113
+ float _betta1;
114
+ float _betta2;
115
+ float _weight_decay;
116
+ size_t _step;
117
+
118
+ #if defined(__ENABLE_CUDA__)
119
+ float* _doubled_buffer[2];
120
+ cudaStream_t _streams[2];
121
+ bool _buf_index;
122
+ #elif defined(__ENABLE_CANN__)
123
+ float* _doubled_buffer[2];
124
+ c10_npu::NPUStream _streams[2] = {c10_npu::getCurrentNPUStream(),
125
+ c10_npu::getNPUStreamFromPool()};
126
+ bool _buf_index;
127
+ #endif
128
+ };
129
+
130
+ #if defined(__AVX512__) or defined(__AVX256__)
131
+ template <int span>
132
+ void Lion_Optimizer::Step_AVX(size_t* rounded_size,
133
+ float* _params,
134
+ float* grads,
135
+ float* _exp_avg,
136
+ size_t _param_size,
137
+ ds_half_precision_t* dev_params,
138
+ bool half_precision)
139
+ {
140
+ size_t new_rounded_size = 0;
141
+ int rshft = half_precision ? 1 : 0;
142
+
143
+ constexpr float neg1 = -1.0f;
144
+ AVX_Data neg1_4;
145
+ neg1_4.data = SIMD_SET(neg1);
146
+
147
+ AVX_Data betta1_4;
148
+ betta1_4.data = SIMD_SET(_betta1);
149
+ AVX_Data betta2_4;
150
+ betta2_4.data = SIMD_SET(_betta2);
151
+
152
+ float betta1_minus1 = 1 - _betta1;
153
+ float betta2_minus1 = 1 - _betta2;
154
+ AVX_Data betta1_minus1_4;
155
+ betta1_minus1_4.data = SIMD_SET(betta1_minus1);
156
+ AVX_Data betta2_minus1_4;
157
+ betta2_minus1_4.data = SIMD_SET(betta2_minus1);
158
+
159
+ float step_size = -_alpha;
160
+ AVX_Data step_size_4;
161
+ step_size_4.data = SIMD_SET(step_size);
162
+
163
+ float after_decay = 1.0f - _alpha * _weight_decay;
164
+ AVX_Data after_decay_4;
165
+ if (_weight_decay > 0) after_decay_4.data = SIMD_SET(after_decay);
166
+
167
+ new_rounded_size = ROUND_DOWN(_param_size, SIMD_WIDTH * span);
168
+ for (size_t t = 0; t < new_rounded_size; t += TILE) {
169
+ size_t copy_size = TILE;
170
+ if ((t + TILE) > new_rounded_size) copy_size = new_rounded_size - t;
171
+ size_t offset = copy_size + t;
172
+ #if defined(__ENABLE_CUDA__)
173
+ if ((t / TILE) >= 2) { cudaStreamSynchronize(_streams[_buf_index]); }
174
+ #elif defined(__ENABLE_CANN__)
175
+ if ((t / TILE) >= 2) { aclrtSynchronizeStream(_streams[_buf_index].stream()); }
176
+ #endif
177
+ #pragma omp parallel for
178
+ for (size_t i = t; i < offset; i += SIMD_WIDTH * span) {
179
+ AVX_Data grad_4[span];
180
+ simd_load<span>(grad_4, grads + (i >> rshft), half_precision);
181
+
182
+ AVX_Data momentum_4[span];
183
+ simd_load<span>(momentum_4, _exp_avg + i, false);
184
+
185
+ AVX_Data param_4[span];
186
+ simd_load<span>(param_4, _params + (i >> rshft), half_precision);
187
+
188
+ AVX_Data tmp_4[span];
189
+
190
+ simd_mul<span>(tmp_4, momentum_4, betta1_4);
191
+ simd_fma<span>(tmp_4, grad_4, betta1_minus1_4, tmp_4);
192
+ // We already used intrinsics, so consider the machine representation fixed.
193
+ simd_and<span>(tmp_4, tmp_4, neg1_4);
194
+ simd_xor<span>(tmp_4, tmp_4, step_size_4);
195
+ if (_weight_decay > 0) {
196
+ simd_fma<span>(param_4, param_4, after_decay_4, tmp_4);
197
+ } else {
198
+ simd_add<span>(param_4, param_4, tmp_4);
199
+ }
200
+
201
+ simd_mul<span>(momentum_4, momentum_4, betta2_4);
202
+ simd_fma<span>(momentum_4, grad_4, betta2_minus1_4, momentum_4);
203
+
204
+ simd_store<span>(_params + (i >> rshft), param_4, half_precision);
205
+ #if defined(__ENABLE_CUDA__) or defined(__ENABLE_CANN__)
206
+ if (dev_params) {
207
+ simd_store<span>(_doubled_buffer[_buf_index] + (i - t), param_4, half_precision);
208
+ }
209
+ #endif
210
+ simd_store<span>(_exp_avg + i, momentum_4, false);
211
+ }
212
+ #if defined(__ENABLE_CUDA__)
213
+ if (dev_params) {
214
+ if (half_precision)
215
+ launch_param_update_half(
216
+ _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]);
217
+ else
218
+ launch_param_update(
219
+ _doubled_buffer[_buf_index], dev_params + t, copy_size, _streams[_buf_index]);
220
+
221
+ _buf_index = !_buf_index;
222
+ }
223
+ #elif defined(__ENABLE_CANN__)
224
+ if (dev_params) {
225
+ size_t memcpy_size = copy_size * sizeof(_doubled_buffer[_buf_index][0]);
226
+ if (half_precision) memcpy_size /= 2;
227
+ aclrtMemcpy(dev_params + t,
228
+ memcpy_size,
229
+ _doubled_buffer[_buf_index],
230
+ memcpy_size,
231
+ aclrtMemcpyKind::ACL_MEMCPY_HOST_TO_DEVICE);
232
+
233
+ _buf_index = !_buf_index;
234
+ }
235
+ #endif
236
+ }
237
+ *rounded_size = new_rounded_size;
238
+ }
239
+ #endif
240
+
241
+ int create_lion_optimizer(int optimizer_id,
242
+ float alpha = 1e-3,
243
+ float betta1 = 0.9,
244
+ float betta2 = 0.999,
245
+ float weight_decay = 0,
246
+ bool should_log = false);
247
+
248
+ int ds_lion_step(int optimizer_id,
249
+ size_t step,
250
+ float lr,
251
+ float beta1,
252
+ float beta2,
253
+ float weight_decay,
254
+ torch::Tensor& params,
255
+ torch::Tensor& grads,
256
+ torch::Tensor& exp_avg);
257
+
258
+ int ds_lion_step_plus_copy(int optimizer_id,
259
+ size_t step,
260
+ float lr,
261
+ float beta1,
262
+ float beta2,
263
+ float weight_decay,
264
+ torch::Tensor& params,
265
+ torch::Tensor& grads,
266
+ torch::Tensor& exp_avg,
267
+ torch::Tensor& gpu_params);
268
+
269
+ int destroy_lion_optimizer(int optimizer_id);
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/cublas_wrappers.h ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <assert.h>
9
+ #include <cublas_v2.h>
10
+ #include <cuda.h>
11
+ #include <cuda_fp16.h>
12
+ #include <cuda_runtime.h>
13
+ #ifndef __HIP_PLATFORM_AMD__
14
+ #include <mma.h>
15
+ #endif
16
+ #ifdef __HIP_PLATFORM_AMD__
17
+ #include <rocblas/rocblas.h>
18
+ #endif
19
+ #include <stdio.h>
20
+
21
+ int cublas_gemm_ex(cublasHandle_t handle,
22
+ cublasOperation_t transa,
23
+ cublasOperation_t transb,
24
+ int m,
25
+ int n,
26
+ int k,
27
+ const float* alpha,
28
+ const float* beta,
29
+ const float* A,
30
+ const float* B,
31
+ float* C,
32
+ #ifdef __HIP_PLATFORM_AMD__
33
+ rocblas_gemm_algo algo = rocblas_gemm_algo_standard);
34
+ #else
35
+ cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT);
36
+ #endif
37
+
38
+ int cublas_gemm_ex(cublasHandle_t handle,
39
+ cublasOperation_t transa,
40
+ cublasOperation_t transb,
41
+ int m,
42
+ int n,
43
+ int k,
44
+ const float* alpha,
45
+ const float* beta,
46
+ const __half* A,
47
+ const __half* B,
48
+ __half* C,
49
+ #ifdef __HIP_PLATFORM_AMD__
50
+ rocblas_gemm_algo algo = rocblas_gemm_algo_standard);
51
+ #else
52
+ cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP);
53
+ #endif
54
+
55
+ int cublas_strided_batched_gemm(cublasHandle_t handle,
56
+ int m,
57
+ int n,
58
+ int k,
59
+ const float* alpha,
60
+ const float* beta,
61
+ const float* A,
62
+ const float* B,
63
+ float* C,
64
+ cublasOperation_t op_A,
65
+ cublasOperation_t op_B,
66
+ int stride_A,
67
+ int stride_B,
68
+ int stride_C,
69
+ int batch,
70
+ #ifdef __HIP_PLATFORM_AMD__
71
+ rocblas_gemm_algo algo = rocblas_gemm_algo_standard);
72
+ #else
73
+ cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT);
74
+ #endif
75
+
76
+ int cublas_strided_batched_gemm(cublasHandle_t handle,
77
+ int m,
78
+ int n,
79
+ int k,
80
+ const float* alpha,
81
+ const float* beta,
82
+ const __half* A,
83
+ const __half* B,
84
+ __half* C,
85
+ cublasOperation_t op_A,
86
+ cublasOperation_t op_B,
87
+ int stride_A,
88
+ int stride_B,
89
+ int stride_C,
90
+ int batch,
91
+ #ifdef __HIP_PLATFORM_AMD__
92
+ rocblas_gemm_algo algo = rocblas_gemm_algo_standard);
93
+ #else
94
+ cublasGemmAlgo_t algo = CUBLAS_GEMM_DEFAULT_TENSOR_OP);
95
+ #endif
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/custom_cuda_layers.h ADDED
@@ -0,0 +1,327 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include "ds_kernel_utils.h"
9
+
10
+ #include <cuda.h>
11
+ #include <cuda_fp16.h>
12
+ #include <curand_kernel.h>
13
+ #include <stdio.h>
14
+ #include <stdlib.h>
15
+
16
+ #include "context.h"
17
+ #include "cublas_wrappers.h"
18
+
19
+ #define CUDA_CHECK(callstr) \
20
+ { \
21
+ cudaError_t error_code = callstr; \
22
+ if (error_code != cudaSuccess) { \
23
+ std::cerr << "CUDA error " << error_code << " at " << __FILE__ << ":" << __LINE__; \
24
+ assert(0); \
25
+ } \
26
+ }
27
+
28
+ #define MAX_THREADS 1024
29
+ #define THREADS 256
30
+
31
+ #define MAX_THREAD_STRIDE 32
32
+ #define TILE_DIM 32
33
+
34
+ // Maximum sequence-length support based on the number of threads (2048) allowed in each block and
35
+ // this MAX is 8K For higher sequence length we need to use higher Max, like for 64K : 32
36
+ #define MAX_THREAD_ITERATIONS 8 // Maximum 8K
37
+ #define MAX_WARP_NUM 32
38
+
39
+ #define MAX_REGISTERS 256
40
+
41
+ #define MAX_REG 256
42
+
43
+ #define WARP_SIZE_BITS 5
44
+
45
+ // Fused bias add with gelu activation
46
+ template <typename T>
47
+ void launch_bias_gelu(const T* input,
48
+ const T* bias,
49
+ T* output,
50
+ int intermediate_size,
51
+ int batch_size,
52
+ cudaStream_t stream);
53
+
54
+ template <typename T>
55
+ void launch_gelu(const T* input,
56
+ T* output,
57
+ int intermediate_size,
58
+ int batch_size,
59
+ cudaStream_t stream);
60
+
61
+ template <typename T>
62
+ void launch_d_gelu(T* d_output,
63
+ const T* input,
64
+ const T* bias,
65
+ int intermediate_size,
66
+ int batch_size,
67
+ cudaStream_t stream);
68
+
69
+ // Custom fused bias add with layer normalization
70
+ template <typename T>
71
+ void launch_bias_residual_layer_norm(T* vals,
72
+ const T* residual,
73
+ const T* gamma,
74
+ const T* beta,
75
+ float epsilon,
76
+ int batch_size,
77
+ int hidden_dim,
78
+ cudaStream_t stream,
79
+ bool preLayerNorm,
80
+ bool training,
81
+ T* vars,
82
+ T* means);
83
+
84
+ template <typename T>
85
+ void launch_bias_residual_layer_norm(T* vals,
86
+ const T* residual,
87
+ const T* gamma,
88
+ const T* beta,
89
+ float epsilon,
90
+ int batch_size,
91
+ int hidden_dim,
92
+ cudaStream_t stream,
93
+ bool preLayerNorm,
94
+ bool training,
95
+ T* vars);
96
+
97
+ template <typename T>
98
+ void launch_layerNorm_backward_fused_add(const T* out_grad1,
99
+ const T* out_grad2,
100
+ const T* X_data,
101
+ const T* vars,
102
+ const T* means,
103
+ const T* gamma,
104
+ T* gamma_grad,
105
+ T* betta_grad,
106
+ T* inp_grad,
107
+ int batch_size,
108
+ int hidden_dim,
109
+ cudaStream_t stream[2]);
110
+ template <typename T>
111
+ void launch_layerNorm_backward_fused_add(const T* out_grad1,
112
+ const T* out_grad2,
113
+ const T* vals_hat,
114
+ const T* vars,
115
+ const T* gamma,
116
+ T* gamma_grad,
117
+ T* betta_grad,
118
+ T* inp_grad,
119
+ int batch_size,
120
+ int hidden_dim,
121
+ cudaStream_t stream[2],
122
+ bool invertible = false,
123
+ const T* betta = nullptr);
124
+
125
+ template <typename T>
126
+ void launch_layerNorm_backward(const T* out_grad,
127
+ const T* X_data,
128
+ const T* vars,
129
+ const T* means,
130
+ const T* gamma,
131
+ T* gamma_grad,
132
+ T* betta_grad,
133
+ T* inp_grad,
134
+ int batch_size,
135
+ int hidden_dim,
136
+ cudaStream_t stream[2]);
137
+
138
+ template <typename T>
139
+ void launch_layerNorm_backward(const T* out_grad,
140
+ const T* vals_hat,
141
+ const T* vars,
142
+ const T* gamma,
143
+ T* gamma_grad,
144
+ T* betta_grad,
145
+ T* inp_grad,
146
+ int batch_size,
147
+ int hidden_dim,
148
+ cudaStream_t stream[2],
149
+ bool invertible = false,
150
+ const T* betta = nullptr);
151
+
152
+ template <typename T>
153
+ void launch_layerNorm_backward_nreversible(const T* out_grad,
154
+ const T* vals,
155
+ const T* out_grad_trans,
156
+ const T* vals_trans,
157
+ const T* means,
158
+ const T* vars,
159
+ const T* gamma,
160
+ T* gamma_grad,
161
+ T* betta_grad,
162
+ T* inp_grad,
163
+ int batch_size,
164
+ int hidden_dim,
165
+ cudaStream_t stream[2]);
166
+
167
+ template <typename T>
168
+ void Transpose(const T* inp_mat, T* out_mat, int rows, int cols, cudaStream_t stream);
169
+
170
+ template <typename T>
171
+ void launch_attn_softmax_backward(T* out_grad,
172
+ const T* soft_inp,
173
+ int batch_size,
174
+ int heads,
175
+ int seq_length,
176
+ cudaStream_t stream);
177
+
178
+ template <typename T>
179
+ void launch_attn_softmax_backward_v2(T* out_grad,
180
+ const T* soft_inp,
181
+ int batch_size,
182
+ int heads,
183
+ int seq_length,
184
+ cudaStream_t stream);
185
+
186
+ // Custom softmax with scaling and attention mask addition
187
+ template <typename T>
188
+ void launch_attn_softmax(T* vals,
189
+ const T* attn_mask,
190
+ int batch_size,
191
+ int heads,
192
+ int sequence_length,
193
+ cudaStream_t stream);
194
+
195
+ template <typename T>
196
+ void launch_transform_0213(T* output,
197
+ const T* vals,
198
+ int batch_size,
199
+ int seq_length,
200
+ int hidden_dim,
201
+ int heads,
202
+ cudaStream_t stream);
203
+
204
+ // Custom bias add
205
+ template <typename T>
206
+ void launch_bias_add_transform_0213(T* outputs,
207
+ const T* vals,
208
+ const T* bias,
209
+ int batch_size,
210
+ int seq_length,
211
+ int hidden_dim,
212
+ int heads,
213
+ cudaStream_t stream,
214
+ int trans_count);
215
+
216
+ // 4D transform [0, 1, 2, 3] -> [0, 2, 1, 3]
217
+ template <typename T>
218
+ void launch_transform4d_0213(T* out,
219
+ const T* in,
220
+ int batch_size,
221
+ int heads,
222
+ int seq_length,
223
+ int hidden_dim,
224
+ cudaStream_t stream,
225
+ int trans_count);
226
+
227
+ template <typename T>
228
+ void launch_dropout(T* vals,
229
+ const T* bias,
230
+ uint8_t* mask,
231
+ int batch,
232
+ int dim,
233
+ float ratio,
234
+ cudaStream_t stream);
235
+
236
+ template <typename T>
237
+ void launch_dropout(T* vals_out,
238
+ const T* vals,
239
+ uint8_t* mask,
240
+ int total_count,
241
+ int dim,
242
+ float ratio,
243
+ cudaStream_t stream,
244
+ bool bwd = false);
245
+
246
+ template <typename T>
247
+ void launch_dropout(T* out,
248
+ const T* vals,
249
+ const T* residual,
250
+ const T* bias,
251
+ uint8_t* mask,
252
+ int batch,
253
+ int dim,
254
+ float ratio,
255
+ cudaStream_t stream);
256
+
257
+ template <typename T>
258
+ void launch_dropout_grad(T* vals, uint8_t* mask, int total_count, float ratio, cudaStream_t stream);
259
+
260
+ template <typename T>
261
+ void launch_dropout_grad(T* vals_out,
262
+ const T* vals,
263
+ uint8_t* mask,
264
+ int total_count,
265
+ float ratio,
266
+ cudaStream_t stream);
267
+
268
+ template <typename T>
269
+ void launch_fuse_transpose_bias_kernel(const T* inp,
270
+ T* out,
271
+ int rows,
272
+ int cols,
273
+ cudaStream_t stream);
274
+
275
+ void launch_param_update(const float* input, __half* output, int size, cudaStream_t stream);
276
+ void launch_param_update_half(const float* input, __half* output, int size, cudaStream_t stream);
277
+
278
+ void launch_token_sort(int32_t* indices,
279
+ int layers,
280
+ int batch_size,
281
+ int reserved_size,
282
+ int original_tokens,
283
+ cudaStream_t stream);
284
+
285
+ template <typename T>
286
+ void launch_gather_tokens(T* retained_tokens,
287
+ T* activations,
288
+ int32_t* gather_indices,
289
+ int32_t batch_size,
290
+ int32_t sampled_tokens,
291
+ int32_t channels,
292
+ int32_t read_batch_stride,
293
+ int32_t read_seq_stride,
294
+ int32_t write_batch_stride,
295
+ int32_t write_seq_stride,
296
+ cudaStream_t stream);
297
+
298
+ template <typename T>
299
+ void launch_scatter_tokens(T* all_activations,
300
+ T* layer_activations,
301
+ int32_t* gather_indices,
302
+ int32_t batch_size,
303
+ int32_t sampled_tokens,
304
+ int32_t channels,
305
+ int32_t read_batch_stride,
306
+ int32_t read_seq_stride,
307
+ int32_t write_batch_stride,
308
+ int32_t write_seq_stride,
309
+ cudaStream_t stream);
310
+
311
+ template <typename T>
312
+ void launch_slice_gpt_mask(T* output_mask,
313
+ const T* input_mask,
314
+ int batch_size,
315
+ int truncated_seq_len,
316
+ int orig_seq_len,
317
+ cudaStream_t stream);
318
+
319
+ template <typename T>
320
+ void launch_slice_bert_mask(T* output_mask,
321
+ const T* input_mask,
322
+ const int32_t* retained_indices,
323
+ int32_t layers,
324
+ int32_t batch_size,
325
+ int32_t truncated_seq_len,
326
+ int32_t orig_seq_len,
327
+ cudaStream_t stream);
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/dequantization_utils.h ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "conversion_utils.h"
7
+ #include "ds_kernel_utils.h"
8
+ #include "quantization.h"
9
+ #include "quantization_utils.h"
10
+
11
+ namespace cg = cooperative_groups;
12
+
13
+ #pragma once
14
+
15
+ namespace dequantize {
16
+ using Type = quantize::Type;
17
+
18
+ template <Type qType, int numBits>
19
+ using Params = quantize::Params<qType, numBits>;
20
+
21
+ constexpr int granularity = quantize::granularity;
22
+ using PackedInt4 = quantize::PackedInt4;
23
+
24
+ constexpr int h_per_chunk = granularity / sizeof(__half);
25
+ constexpr int h2_per_chunk = granularity / sizeof(__half2);
26
+
27
+ /*
28
+ Device function that reads quantized data from global memory, dequantizes
29
+ it, and stores it to global memory.
30
+ Template Arguments :
31
+ numBits - Number of bits in quantized element. int: 4, 8
32
+ qType - Type of quantization to perform. Type::Symmetric or Type::Asymmetric
33
+ unroll - Number of load steps to internally unroll int
34
+ threads - Number of threads to perform dequant int
35
+ Function arguments:
36
+ global_output - __half pointer in global memory
37
+ data - Quantized data in global memory
38
+ global_params - Quantization parameters in global memory
39
+ elems_per_group - Number of elements in each quantization group
40
+ total_elems - Tensor size (note, does not need to be multiple of elems_per_group)
41
+ */
42
+ template <int numBits, Type qType, int unroll, int threads>
43
+ DS_D_INLINE void to_global(__half* global_output,
44
+ const int8_t* data,
45
+ const float* global_params,
46
+ const int elems_per_group,
47
+ const int total_elems);
48
+
49
+ /*
50
+ Device function that quantizes 16 bytes of __half type input data.
51
+ Template Arguments :
52
+ numBits - Number of bits in quantized element. int : 8 or 4
53
+ qType - Type of quantization to perform. Type::Symmetric or Type::Asymmetric
54
+ Function Arguments :
55
+ local_output - Local array to store dequantized data __half* or __half2*
56
+ data - Pointer to quantized input data. int8_t*
57
+ Params - Parameters for quantization. Params<qType, numBits>
58
+ */
59
+ template <int numBits, Type qType>
60
+ DS_D_INLINE void chunk(__half2* local_output, const int8_t* data, Params<qType, numBits> q_params);
61
+
62
+ template <typename T, int numBits, Type qType>
63
+ DS_D_INLINE void chunk(T* local_output, const int8_t* data, Params<qType, numBits> q_params);
64
+
65
+ /**************** Implementations ******************/
66
+
67
+ template <typename T, int numBits, Type qType>
68
+ DS_D_INLINE void chunk(T* local_output, const int8_t* data, Params<qType, numBits> q_params)
69
+ {
70
+ constexpr int32_t num_elems_packed = 8 / numBits;
71
+ constexpr int32_t iters = h_per_chunk / num_elems_packed;
72
+
73
+ #pragma unroll
74
+ for (int i = 0; i < iters; i++) {
75
+ if constexpr (num_elems_packed == 1) {
76
+ local_output[i] = q_params.template dequantize<T>(data[i]);
77
+ } else {
78
+ auto accessible_data = *(PackedInt4*)(&data[i]);
79
+ local_output[2 * i] = q_params.template dequantize<T>(accessible_data.low);
80
+ local_output[2 * i + 1] = q_params.template dequantize<T>(accessible_data.high);
81
+ }
82
+ }
83
+ }
84
+
85
+ template <int numBits, Type qType>
86
+ DS_D_INLINE void chunk(__half2* local_output, const int8_t* data, Params<qType, numBits> q_params)
87
+ {
88
+ __half* local_output_cast = reinterpret_cast<__half*>(local_output);
89
+ chunk<__half, numBits>(local_output_cast, data, q_params);
90
+ }
91
+
92
+ template <typename T, int numBits, Type qType, int unroll, int threads>
93
+ DS_D_INLINE void _to_global(T* global_output,
94
+ const int8_t* data,
95
+ const float* global_params,
96
+ const int elems_per_group,
97
+ const int total_elems)
98
+ {
99
+ cg::thread_block tb = cg::this_thread_block();
100
+ cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
101
+
102
+ // Load constants
103
+ // TODO(cmikeh2): Refactor into functions?
104
+ constexpr int load_granularity = (granularity / (sizeof(T))) / (numBits == 8 ? 1 : 2);
105
+ constexpr int load_step_stride = load_granularity * threads;
106
+ constexpr int load_block_stride = load_step_stride * unroll;
107
+
108
+ // Store constants
109
+ constexpr int T_per_chunk = granularity / sizeof(T);
110
+ constexpr int store_step_stride = T_per_chunk * threads;
111
+ constexpr int store_block_stride = store_step_stride * unroll;
112
+
113
+ // Load offsets
114
+ const int load_block_offset = tb.group_index().x * load_block_stride;
115
+ // Note: we can use `load_granularity` since the dtype is `int8_t`.
116
+ const int load_thread_offset = tb.thread_index().x * load_granularity;
117
+ const int8_t* load_base = data + load_block_offset + load_thread_offset;
118
+
119
+ // Store offsets
120
+ const int store_block_offset = tb.group_index().x * store_block_stride;
121
+ const int store_thread_offset = tb.thread_index().x * T_per_chunk;
122
+ const int elem_id_base = store_block_offset + store_thread_offset;
123
+
124
+ int8_t local_load_buffer[load_granularity * unroll];
125
+ T local_dequant_buffer[T_per_chunk * unroll];
126
+
127
+ /*
128
+ Note: Splitting this loop in half gave about 3-5% performance increase for reasons that aren't
129
+ totally clear to me, so this is a deliberately weird code structure.
130
+ */
131
+ #pragma unroll
132
+ for (int i = 0; i < unroll; i++) {
133
+ const int elem_id_iter = elem_id_base + i * store_step_stride;
134
+
135
+ if (elem_id_iter < total_elems) {
136
+ mem_access::load_global<load_granularity>(local_load_buffer + i * load_granularity,
137
+ load_base + i * load_step_stride);
138
+ }
139
+ }
140
+
141
+ #pragma unroll
142
+ for (int i = 0; i < unroll; i++) {
143
+ const int elem_id_iter = elem_id_base + i * store_step_stride;
144
+ if (elem_id_iter < total_elems) {
145
+ // TODO(cmikeh2): Can we amortize this division? Perform once on the first iteration and
146
+ // use indexing math to do division free interpolation of the successive groups?
147
+ const int group_index = elem_id_iter / elems_per_group;
148
+ Params<qType, numBits> q_params(global_params, group_index);
149
+
150
+ chunk<T, numBits, qType>(local_dequant_buffer + i * T_per_chunk,
151
+ local_load_buffer + i * load_granularity,
152
+ q_params);
153
+ mem_access::store_global<granularity>(global_output + elem_id_iter,
154
+ local_dequant_buffer + i * T_per_chunk);
155
+ }
156
+ }
157
+ }
158
+
159
+ template <typename T, int numBits, Type qType, int unroll, int threads>
160
+ DS_D_INLINE void to_global(T* global_output,
161
+ const int8_t* data,
162
+ const float* global_params,
163
+ const int elems_per_group,
164
+ const int total_elems)
165
+ {
166
+ if constexpr (numBits == 4 || numBits == 8) {
167
+ _to_global<T, numBits, qType, unroll, threads>(
168
+ global_output, data, global_params, elems_per_group, total_elems);
169
+ } else if constexpr (numBits == 3) {
170
+ // TODO(cmikeh2): Need this implementation
171
+ assert(false);
172
+ } else {
173
+ assert(false);
174
+ }
175
+ }
176
+
177
+ } // namespace dequantize
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/dropout.h ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <cuda.h>
9
+ #include <cuda_fp16.h>
10
+ #include <stdio.h>
11
+
12
+ template <typename T>
13
+ class Dropout {
14
+ public:
15
+ struct Config {
16
+ float ratio;
17
+ uint32_t dim;
18
+ bool training;
19
+
20
+ Config(float r, uint32_t d) : ratio(r), dim(d), training(true) {}
21
+
22
+ float RATIO() const { return training ? ratio : 0.0; }
23
+ inline void SetDim(uint32_t d) { dim = d; }
24
+ };
25
+
26
+ Dropout(const Config& config) : _config(config), _mask(nullptr) {}
27
+
28
+ virtual ~Dropout() {}
29
+
30
+ void Forward(int bsz, T* out, const T* vals, cudaStream_t stream, bool bwd = false)
31
+ {
32
+ launch_dropout<T>(
33
+ out, vals, _mask, bsz * _config.dim, _config.dim, _config.RATIO(), stream, bwd);
34
+ }
35
+
36
+ void ForwardWithBias(int bsz, T* vals, const T* bias, cudaStream_t stream)
37
+ {
38
+ launch_dropout<T>(vals, bias, _mask, bsz, _config.dim, _config.RATIO(), stream);
39
+ }
40
+
41
+ void ForwardWithBias(int bsz,
42
+ T* out,
43
+ const T* vals,
44
+ const T* residual,
45
+ const T* bias,
46
+ cudaStream_t stream)
47
+ {
48
+ launch_dropout<T>(
49
+ out, vals, residual, bias, _mask, bsz, _config.dim, _config.RATIO(), stream);
50
+ }
51
+
52
+ void Backward(int bsz, T* d_vals, cudaStream_t stream)
53
+ {
54
+ launch_dropout_grad<T>(d_vals, _mask, bsz * _config.dim, _config.RATIO(), stream);
55
+ }
56
+
57
+ void Backward(int bsz, T* d_vals_out, const T* d_vals, cudaStream_t stream)
58
+ {
59
+ launch_dropout_grad<T>(
60
+ d_vals_out, d_vals, _mask, bsz * _config.dim, _config.RATIO(), stream);
61
+ }
62
+
63
+ bool HasDropout() const { return _config.RATIO() > 0.0; }
64
+
65
+ void SetTrainingMode(bool training) { _config.training = training; }
66
+
67
+ void SetMask(uint8_t* mask)
68
+ {
69
+ if (!mask) { throw std::runtime_error("Dropout mask is null."); }
70
+
71
+ _mask = mask;
72
+ }
73
+
74
+ Config GetConfig() const { return _config; }
75
+
76
+ inline void SetDimension(uint32_t dim) { _config.SetDim(dim); }
77
+
78
+ private:
79
+ uint8_t* _mask;
80
+ Config _config;
81
+ };
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/ds_kernel_utils.h ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Centralized header file for preprocessor macros and constants
8
+ used throughout the codebase.
9
+ */
10
+
11
+ #pragma once
12
+
13
+ #include <cuda.h>
14
+ #include <cuda_fp16.h>
15
+
16
+ #ifdef BF16_AVAILABLE
17
+ #include <cuda_bf16.h>
18
+ #endif
19
+
20
+ #define DS_HD_INLINE __host__ __device__ __forceinline__
21
+ #define DS_D_INLINE __device__ __forceinline__
22
+
23
+ #ifdef __HIP_PLATFORM_AMD__
24
+
25
+ // constexpr variant of warpSize for templating
26
+ constexpr int hw_warp_size = 64;
27
+ #define HALF_PRECISION_AVAILABLE = 1
28
+ #include <hip/hip_cooperative_groups.h>
29
+ #include <hip/hip_fp16.h>
30
+
31
+ #else // !__HIP_PLATFORM_AMD__
32
+
33
+ // constexpr variant of warpSize for templating
34
+ constexpr int hw_warp_size = 32;
35
+
36
+ #if __CUDA_ARCH__ >= 530
37
+ #define HALF_PRECISION_AVAILABLE = 1
38
+ #define PTX_AVAILABLE
39
+ #endif // __CUDA_ARCH__ >= 530
40
+
41
+ #if __CUDA_ARCH__ >= 800
42
+ #define ASYNC_COPY_AVAILABLE
43
+ #endif // __CUDA_ARCH__ >= 800
44
+
45
+ #include <cooperative_groups.h>
46
+ #include <cuda_fp16.h>
47
+
48
+ #endif //__HIP_PLATFORM_AMD__
49
+
50
+ inline int next_pow2(const int val)
51
+ {
52
+ int rounded_val = val - 1;
53
+ rounded_val |= rounded_val >> 1;
54
+ rounded_val |= rounded_val >> 2;
55
+ rounded_val |= rounded_val >> 4;
56
+ rounded_val |= rounded_val >> 8;
57
+ return rounded_val + 1;
58
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/ds_transformer_cuda.h ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <cuda_runtime_api.h>
9
+ #include <curand.h>
10
+ #include <memory>
11
+ #include <vector>
12
+ #include "cublas_v2.h"
13
+ #include "cuda.h"
14
+ #include "dropout.h"
15
+ #include "feed_forward.h"
16
+ #include "gelu.h"
17
+ #include "general_kernels.h"
18
+ #include "normalize_layer.h"
19
+ #include "softmax.h"
20
+ #include "strided_batch_gemm.h"
21
+
22
+ struct BertGemmAlgos {
23
+ int m_gemm_qkv_algo;
24
+ int m_gemm_inter_algo;
25
+ int m_gemm_output_algo;
26
+ int m_gemm_batch1_algo;
27
+ int m_gemm_batch2_algo;
28
+
29
+ BertGemmAlgos()
30
+ : m_gemm_qkv_algo(-1),
31
+ m_gemm_inter_algo(-1),
32
+ m_gemm_output_algo(-1),
33
+ m_gemm_batch1_algo(-1),
34
+ m_gemm_batch2_algo(-1)
35
+ {
36
+ }
37
+ };
38
+
39
+ template <typename T>
40
+ class BertTransformerLayer {
41
+ public:
42
+ BertTransformerLayer(unsigned layer_id,
43
+ unsigned batch_size,
44
+ unsigned hidden_size,
45
+ unsigned num_heads,
46
+ unsigned intermediate_size,
47
+ unsigned seq_length,
48
+ float attn_dropout_ratio,
49
+ float hidden_output_dropout_ratio,
50
+ float layer_norm_eps,
51
+ bool pre_or_postLayerNorm,
52
+ const std::vector<std::array<int, 3>>& gemm_algos,
53
+ bool attn_dropout_checkpoint,
54
+ bool normalize_invertible,
55
+ bool gelu_checkpoint,
56
+ bool stochastic_mode);
57
+
58
+ virtual ~BertTransformerLayer();
59
+
60
+ void Forward(unsigned bsz,
61
+ const T* input_ptr,
62
+ const T* input_mask_ptr,
63
+ const T* attn_qkvw_ptr,
64
+ const T* attn_qkvb_ptr,
65
+ const T* attn_ow_ptr,
66
+ const T* attn_ob_ptr,
67
+ const T* attn_nw_ptr,
68
+ const T* attn_nb_ptr,
69
+ const T* inter_w_ptr,
70
+ const T* inter_b_ptr,
71
+ const T* output_w_ptr,
72
+ const T* output_b_ptr,
73
+ const T* norm_w_ptr,
74
+ const T* norm_b_ptr,
75
+ T* out_ptr,
76
+ T* inp_norm_ptr,
77
+ T* q_tf_ptr,
78
+ T* k_tf_ptr,
79
+ T* v_tf_ptr,
80
+ T* softmax_output_ptr,
81
+ T* ctx_bufB_ptr,
82
+ T* attn_o_inp_ptr,
83
+ T* add_res_ptr,
84
+ T* ff1_inp_ptr,
85
+ T* gelu_inp_ptr,
86
+ T* ff2_inp_ptr);
87
+
88
+ void Backward(unsigned bsz,
89
+ const T* grad_output_ptr,
90
+ const T* input_ptr,
91
+ const T* output_ptr,
92
+ const T* inp_norm_ptr,
93
+ const T* q_tf_ptr,
94
+ const T* k_tf_ptr,
95
+ const T* v_tf_ptr,
96
+ const T* softmax_output_ptr,
97
+ const T* ctx_bufB_ptr,
98
+ const T* attn_o_inp_ptr,
99
+ const T* add_res_ptr,
100
+ const T* ff1_inp_ptr,
101
+ const T* gelu_inp_ptr,
102
+ const T* ff2_inp_ptr,
103
+ const T* input_mask_ptr,
104
+ const T* attn_qkvw_ptr,
105
+ const T* attn_ow_ptr,
106
+ const T* attn_nw_ptr,
107
+ const T* attn_nb_ptr,
108
+ const T* inter_w_ptr,
109
+ const T* inter_b_ptr,
110
+ const T* output_w_ptr,
111
+ const T* norm_w_ptr,
112
+ const T* norm_b_ptr,
113
+
114
+ T* grad_input_ptr,
115
+ T* grad_attn_qkvw_ptr,
116
+ T* grad_attn_qkvb_ptr,
117
+ T* grad_attn_ow_ptr,
118
+ T* grad_attn_ob_ptr,
119
+ T* grad_attn_nw_ptr,
120
+ T* grad_attn_nb_ptr,
121
+ T* grad_inter_w_ptr,
122
+ T* grad_inter_b_ptr,
123
+ T* grad_output_w_ptr,
124
+ T* grad_output_b_ptr,
125
+ T* grad_norm_w_ptr,
126
+ T* grad_norm_b_ptr);
127
+
128
+ void SetIntermediateBuffers(uint8_t* attn_prob_dropout_mask_ptr,
129
+ uint8_t* attn_output_dropout_mask_ptr,
130
+ uint8_t* layer_output_dropout_mask_ptr,
131
+ T* layer_norm_var,
132
+ T* layer_norm_mean,
133
+ T* attn_layer_norm_var,
134
+ T* attn_layer_norm_mean);
135
+
136
+ inline unsigned GetBatchSize() const { return _batch_size; }
137
+ inline unsigned GetNumHeads() const { return _heads; }
138
+ inline unsigned GetSeqLength() const { return _seq_length; }
139
+ inline unsigned GetIntermediateSize() const { return _intermediate_size; }
140
+
141
+ void SetSeqLength(unsigned seq_len);
142
+ inline unsigned GetHiddenSize() const { return _hidden_size; }
143
+ void SetTrainingMode(bool training);
144
+ inline bool IsTrainingMode() const { return _training; }
145
+ inline bool GeluCheckpoint() const { return _gelu_checkpoint; }
146
+
147
+ private:
148
+ void Initialize();
149
+ size_t getWorkspaceSize(int maxBatchSize) const;
150
+
151
+ // Params
152
+ unsigned _layer_id;
153
+ unsigned _batch_size;
154
+ unsigned _hidden_size;
155
+ unsigned _heads;
156
+ unsigned _size_per_head;
157
+ unsigned _intermediate_size;
158
+ unsigned _seq_length;
159
+
160
+ bool _pre_or_postLayerNorm;
161
+
162
+ cublasHandle_t _cublasHandle;
163
+ cudaStream_t _stream;
164
+
165
+ // layers
166
+ FeedForward<T> _qkv_linear;
167
+ FeedForward<T> _attn_out_linear;
168
+ Normalize_Layer<T> _attn_layer_norm;
169
+ Normalize_Layer<T> _layer_norm;
170
+ Normalize_Layer<T>* _last_normalize;
171
+ FeedForward<T> _ff1, _ff2;
172
+ Softmax<T> _softmax;
173
+ Gelu<T> _gelu;
174
+ Dropout<T> _attn_prob_dropout;
175
+ Dropout<T> _attn_output_dropout;
176
+ Dropout<T> _layer_output_dropout;
177
+ StridedBatchGemm<T> _attn_scores;
178
+ StridedBatchGemm<T> _attn_context;
179
+
180
+ bool _training;
181
+
182
+ // Memory saving flags
183
+ bool _attn_dropout_checkpoint;
184
+ bool _normalize_invertible;
185
+ bool _gelu_checkpoint;
186
+
187
+ // High Performance flags
188
+ bool _stochastic_mode;
189
+ };
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/feed_forward.h ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #ifndef __FEEDFORWARD_H__
7
+ #define __FEEDFORWARD_H__
8
+
9
+ #include <cuda.h>
10
+ #include <cuda_fp16.h>
11
+ #include <stdio.h>
12
+ #include "custom_cuda_layers.h"
13
+
14
+ template <typename T>
15
+ class FeedForward {
16
+ public:
17
+ struct Config {
18
+ int batchSize, outputSize;
19
+ int inputSize;
20
+ std::array<int, 3> gemm_algos;
21
+ Config(int batch, int outputs, int inputs, const std::array<int, 3>& algos)
22
+ : batchSize(batch), outputSize(outputs), inputSize(inputs), gemm_algos(algos)
23
+ {
24
+ }
25
+ };
26
+
27
+ FeedForward(Config config) : config_(config) {}
28
+
29
+ ~FeedForward() {}
30
+
31
+ void Forward(int bsz,
32
+ const T* input_ptr,
33
+ const T* weights,
34
+ T* out,
35
+ cublasHandle_t& _cublasHandle)
36
+ {
37
+ float alpha = T(1.);
38
+ float beta = T(0.);
39
+
40
+ cublas_gemm_ex(_cublasHandle,
41
+ CUBLAS_OP_T,
42
+ CUBLAS_OP_N,
43
+ config_.outputSize,
44
+ bsz,
45
+ config_.inputSize,
46
+ &alpha,
47
+ &beta,
48
+ weights,
49
+ input_ptr,
50
+ out,
51
+ #ifdef __HIP_PLATFORM_AMD__
52
+ rocblas_gemm_algo(config_.gemm_algos[0]));
53
+ #else
54
+ cublasGemmAlgo_t(config_.gemm_algos[0]));
55
+ #endif
56
+ }
57
+ void Backward(int bsz,
58
+ const T* out_grad,
59
+ const T* input_ptr,
60
+ const T* weights,
61
+ T* weights_grad,
62
+ T* bias_grad,
63
+ cublasHandle_t& _cublasHandle,
64
+ cudaStream_t& stream,
65
+ T* inp_grad_out = nullptr,
66
+ T* out_grad_trans_out = nullptr)
67
+ {
68
+ float alpha = (T)1.0, beta = (T)0.0;
69
+ cublas_gemm_ex(_cublasHandle,
70
+ CUBLAS_OP_N,
71
+ CUBLAS_OP_T,
72
+ config_.inputSize,
73
+ config_.outputSize,
74
+ bsz,
75
+ &alpha,
76
+ &beta,
77
+ input_ptr,
78
+ out_grad,
79
+ weights_grad,
80
+ #ifdef __HIP_PLATFORM_AMD__
81
+ rocblas_gemm_algo(config_.gemm_algos[1]));
82
+ #else
83
+ cublasGemmAlgo_t(config_.gemm_algos[1]));
84
+ #endif
85
+
86
+ cublas_gemm_ex(_cublasHandle,
87
+ CUBLAS_OP_N,
88
+ CUBLAS_OP_N,
89
+ config_.inputSize,
90
+ bsz,
91
+ config_.outputSize,
92
+ &alpha,
93
+ &beta,
94
+ weights,
95
+ out_grad,
96
+ inp_grad_out,
97
+ #ifdef __HIP_PLATFORM_AMD__
98
+ rocblas_gemm_algo(config_.gemm_algos[2]));
99
+ #else
100
+ cublasGemmAlgo_t(config_.gemm_algos[2]));
101
+ #endif
102
+
103
+ launch_fuse_transpose_bias_kernel<T>(out_grad, bias_grad, bsz, config_.outputSize, stream);
104
+ }
105
+
106
+ private:
107
+ Config config_;
108
+ };
109
+
110
+ #endif
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/gelu.h ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <cuda.h>
9
+ #include <cuda_fp16.h>
10
+ #include <stdio.h>
11
+ #include "custom_cuda_layers.h"
12
+
13
+ template <typename T>
14
+ class Gelu {
15
+ public:
16
+ struct Config {
17
+ uint32_t intermediate_size;
18
+ Config(uint32_t inter_size) : intermediate_size(inter_size) {}
19
+ };
20
+
21
+ Gelu(const Config& config) : _config(config) {}
22
+
23
+ virtual ~Gelu() {}
24
+
25
+ void ForwardWithBiasAdd(int bsz,
26
+ const T* input_buf,
27
+ const T* bias,
28
+ T* output,
29
+ cudaStream_t stream)
30
+ {
31
+ launch_bias_gelu<T>(input_buf, bias, output, _config.intermediate_size, bsz, stream);
32
+ }
33
+
34
+ void Backward(int bsz, T* d_output, const T* input_buf, const T* bias, cudaStream_t stream)
35
+ {
36
+ launch_d_gelu<T>(d_output, input_buf, bias, _config.intermediate_size, bsz, stream);
37
+ }
38
+
39
+ private:
40
+ Config _config;
41
+ };
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/gemm_test.h ADDED
@@ -0,0 +1,334 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <cuda_fp16.h>
9
+ #ifndef __HIP_PLATFORM_AMD__
10
+ #include <cuda_profiler_api.h>
11
+ #endif
12
+ #ifdef __HIP_PLATFORM_AMD__
13
+ #include <rocblas/rocblas.h>
14
+ #endif
15
+ #include <array>
16
+ #include <cstdio>
17
+ #include <cstdlib>
18
+ #include <ctime>
19
+ #include <limits>
20
+ #include <memory>
21
+ #include "StopWatch.h"
22
+ #include "cublas_wrappers.h"
23
+
24
+ template <typename T>
25
+ void check(T result, char const* const func, const char* const file, int const line)
26
+ {
27
+ if (result) {
28
+ std::cout << (std::string("CUDA runtime error: ") + +file + ":" + std::to_string(line) +
29
+ " \n");
30
+ }
31
+ }
32
+
33
+ #define check_cuda_error(val) check((val), #val, __FILE__, __LINE__)
34
+
35
+ template <typename T>
36
+ class GemmTest {
37
+ public:
38
+ GemmTest(int m, int n, int k, cublasOperation_t ta, cublasOperation_t tb, cublasHandle_t h)
39
+ : M(m), N(n), K(k), transa(ta), transb(tb), handle(h)
40
+ {
41
+ check_cuda_error(cudaMalloc((void**)&A, sizeof(T) * M * K));
42
+ check_cuda_error(cudaMalloc((void**)&B, sizeof(T) * K * N));
43
+ check_cuda_error(cudaMalloc((void**)&C, sizeof(T) * M * N));
44
+ }
45
+
46
+ ~GemmTest()
47
+ {
48
+ check_cuda_error(cudaFree(A));
49
+ check_cuda_error(cudaFree(B));
50
+ check_cuda_error(cudaFree(C));
51
+ }
52
+
53
+ std::array<int, 3> TestAlgo(int loops)
54
+ {
55
+ float alpha = (T)1.0f;
56
+ float beta = (T)0.0f;
57
+
58
+ int algo_fw = Run(loops, [=](int algo) {
59
+ cublas_gemm_ex(handle,
60
+ CUBLAS_OP_T,
61
+ CUBLAS_OP_N,
62
+ N,
63
+ M,
64
+ K,
65
+ &alpha,
66
+ &beta,
67
+ B,
68
+ A,
69
+ C,
70
+ #ifdef __HIP_PLATFORM_AMD__
71
+ static_cast<rocblas_gemm_algo>(algo));
72
+ #else
73
+ static_cast<cublasGemmAlgo_t>(algo));
74
+ #endif
75
+ });
76
+
77
+ int algo_bw1 = Run(loops, [=](int algo) {
78
+ cublas_gemm_ex(handle,
79
+ CUBLAS_OP_N,
80
+ CUBLAS_OP_T,
81
+ K,
82
+ N,
83
+ M,
84
+ &alpha,
85
+ &beta,
86
+ A,
87
+ C,
88
+ B,
89
+ #ifdef __HIP_PLATFORM_AMD__
90
+ static_cast<rocblas_gemm_algo>(algo));
91
+ #else
92
+ static_cast<cublasGemmAlgo_t>(algo));
93
+ #endif
94
+ });
95
+
96
+ int algo_bw2 = Run(loops, [=](int algo) {
97
+ cublas_gemm_ex(handle,
98
+ CUBLAS_OP_N,
99
+ CUBLAS_OP_N,
100
+ K,
101
+ M,
102
+ N,
103
+ &alpha,
104
+ &beta,
105
+ B,
106
+ C,
107
+ A,
108
+ #ifdef __HIP_PLATFORM_AMD__
109
+ static_cast<rocblas_gemm_algo>(algo));
110
+ #else
111
+ static_cast<cublasGemmAlgo_t>(algo));
112
+ #endif
113
+ });
114
+
115
+ return std::array<int, 3>({algo_fw, algo_bw1, algo_bw2});
116
+ }
117
+
118
+ template <typename Func>
119
+ int Run(int loops, Func f)
120
+ {
121
+ float fast_latency = (std::numeric_limits<float>::max)();
122
+ int fast_algo = 0;
123
+
124
+ #ifdef __HIP_PLATFORM_AMD__
125
+ for (int algo = (int)rocblas_gemm_algo_standard; algo <= (int)rocblas_gemm_algo_standard;
126
+ #else
127
+ for (int algo = (int)CUBLAS_GEMM_DEFAULT_TENSOR_OP;
128
+ algo <= (int)CUBLAS_GEMM_ALGO15_TENSOR_OP;
129
+ #endif
130
+ algo++) {
131
+ int warm_up = 5;
132
+ for (int i = 0; i < warm_up; ++i) f(algo);
133
+
134
+ cudaDeviceSynchronize();
135
+ Stopwatch timer;
136
+ timer.Restart();
137
+
138
+ for (int i = 0; i < loops; ++i) f(algo);
139
+
140
+ cudaDeviceSynchronize();
141
+ timer.Stop();
142
+
143
+ float avg_latency = (float)timer.GetTimeInSeconds() * 1000 / loops;
144
+
145
+ printf("algo-%d: %.3fms\n", algo, avg_latency);
146
+
147
+ if (avg_latency < fast_latency) {
148
+ fast_latency = avg_latency;
149
+ fast_algo = algo;
150
+ }
151
+ }
152
+
153
+ printf("fast_algo %d: %.3f ms\n", fast_algo, fast_latency);
154
+
155
+ return fast_algo;
156
+ }
157
+
158
+ private:
159
+ int M, N, K;
160
+ cublasHandle_t handle;
161
+ cublasOperation_t transa, transb;
162
+ T *A, *B, *C;
163
+ };
164
+
165
+ template <typename T>
166
+ class StridedGemmTest {
167
+ public:
168
+ StridedGemmTest(int b,
169
+ int m,
170
+ int n,
171
+ int k,
172
+ cublasOperation_t ta,
173
+ cublasOperation_t tb,
174
+ cublasHandle_t h)
175
+ : bsz(b), M(m), N(n), K(k), transa(ta), transb(tb), handle(h)
176
+ {
177
+ check_cuda_error(cudaMalloc((void**)&A, sizeof(T) * M * K * bsz));
178
+ check_cuda_error(cudaMalloc((void**)&B, sizeof(T) * K * N * bsz));
179
+ check_cuda_error(cudaMalloc((void**)&C, sizeof(T) * M * N * bsz));
180
+ }
181
+
182
+ ~StridedGemmTest()
183
+ {
184
+ check_cuda_error(cudaFree(A));
185
+ check_cuda_error(cudaFree(B));
186
+ check_cuda_error(cudaFree(C));
187
+ }
188
+
189
+ std::array<int, 3> TestAlgo(int loops)
190
+ {
191
+ float alpha = (T)1.0f;
192
+ float beta = (T)0.0f;
193
+
194
+ int algo_fw = Run(loops, [=](int algo) {
195
+ int stride_a = M * K;
196
+ int stride_b = N * K;
197
+ int stride_c = M * N;
198
+
199
+ cublas_strided_batched_gemm(handle,
200
+ M,
201
+ N,
202
+ K,
203
+ &alpha,
204
+ &beta,
205
+ A,
206
+ B,
207
+ C,
208
+ transa,
209
+ transb,
210
+ stride_a,
211
+ stride_b,
212
+ stride_c,
213
+ bsz,
214
+ #ifdef __HIP_PLATFORM_AMD__
215
+ static_cast<rocblas_gemm_algo>(algo));
216
+ #else
217
+ static_cast<cublasGemmAlgo_t>(algo));
218
+ #endif
219
+ });
220
+
221
+ int algo_bw1 = Run(loops, [=](int algo) {
222
+ int mb = (transa == CUBLAS_OP_T ? K : M);
223
+ int kb = (transa == CUBLAS_OP_T ? M : K);
224
+
225
+ int stride_a = mb * N;
226
+ int stride_b = N * kb;
227
+ int stride_c = M * K;
228
+
229
+ // B need to transpose.
230
+ cublasOperation_t op_b = (transb == CUBLAS_OP_T ? CUBLAS_OP_N : CUBLAS_OP_T);
231
+
232
+ // Calculate d_A.
233
+ cublas_strided_batched_gemm(handle,
234
+ mb,
235
+ kb,
236
+ N,
237
+ &alpha,
238
+ &beta,
239
+ (transa == CUBLAS_OP_T ? B : C),
240
+ (transa == CUBLAS_OP_T ? C : B),
241
+ A,
242
+ CUBLAS_OP_N,
243
+ op_b,
244
+ stride_a,
245
+ stride_b,
246
+ stride_c,
247
+ bsz,
248
+ #ifdef __HIP_PLATFORM_AMD__
249
+ static_cast<rocblas_gemm_algo>(algo));
250
+ #else
251
+ static_cast<cublasGemmAlgo_t>(algo));
252
+ #endif
253
+ });
254
+
255
+ int algo_bw2 = Run(loops, [=](int algo) {
256
+ // A need to transpose.
257
+ cublasOperation_t op_a = (transa == CUBLAS_OP_T ? CUBLAS_OP_N : CUBLAS_OP_T);
258
+
259
+ int stride_a = M * K;
260
+ int stride_b = M * N;
261
+ int stride_c = N * K;
262
+
263
+ // Calculate d_B.
264
+ cublas_strided_batched_gemm(handle,
265
+ K,
266
+ N,
267
+ M,
268
+ &alpha,
269
+ &beta,
270
+ A,
271
+ C,
272
+ B,
273
+ op_a,
274
+ CUBLAS_OP_N,
275
+ stride_a,
276
+ stride_b,
277
+ stride_c,
278
+ bsz,
279
+ #ifdef __HIP_PLATFORM_AMD__
280
+ static_cast<rocblas_gemm_algo>(algo));
281
+ #else
282
+ static_cast<cublasGemmAlgo_t>(algo));
283
+ #endif
284
+ });
285
+
286
+ return std::array<int, 3>({algo_fw, algo_bw1, algo_bw2});
287
+ }
288
+
289
+ template <typename Func>
290
+ int Run(int loops, Func f)
291
+ {
292
+ float fast_latency = (std::numeric_limits<float>::max)();
293
+ int fast_algo = 0;
294
+
295
+ #ifdef __HIP_PLATFORM_AMD__
296
+ for (int algo = (int)rocblas_gemm_algo_standard; algo <= (int)rocblas_gemm_algo_standard;
297
+ #else
298
+ for (int algo = (int)CUBLAS_GEMM_DEFAULT_TENSOR_OP;
299
+ algo <= (int)CUBLAS_GEMM_ALGO15_TENSOR_OP;
300
+ #endif
301
+ algo++) {
302
+ int warm_up = 5;
303
+ for (int i = 0; i < warm_up; ++i) f(algo);
304
+
305
+ cudaDeviceSynchronize();
306
+ Stopwatch timer;
307
+ timer.Restart();
308
+
309
+ for (int i = 0; i < loops; ++i) f(algo);
310
+
311
+ cudaDeviceSynchronize();
312
+ timer.Stop();
313
+
314
+ float avg_latency = (float)timer.GetTimeInSeconds() * 1000 / loops;
315
+
316
+ printf("algo-%d: %.3fms\n", algo, avg_latency);
317
+
318
+ if (avg_latency < fast_latency) {
319
+ fast_latency = avg_latency;
320
+ fast_algo = algo;
321
+ }
322
+ }
323
+
324
+ printf("fast_algo %d: %.3f ms\n", fast_algo, fast_latency);
325
+
326
+ return fast_algo;
327
+ }
328
+
329
+ private:
330
+ int bsz, M, N, K;
331
+ cublasHandle_t handle;
332
+ cublasOperation_t transa, transb;
333
+ T *A, *B, *C;
334
+ };
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/general_kernels.h ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <cuda.h>
7
+ #include <cuda_fp16.h>
8
+ #include <stdio.h>
9
+ #include <stdlib.h>
10
+
11
+ #ifdef __HIP_PLATFORM_AMD__
12
+ #include <hip/hip_cooperative_groups.h>
13
+ #else
14
+ #include <cooperative_groups.h>
15
+ #endif
16
+ #include <curand_kernel.h>
17
+
18
+ #include "context.h"
19
+ #include "cublas_wrappers.h"
20
+
21
+ #define THREADS 256
22
+ #define TILE_DIM 32
23
+
24
+ #define minus_infinity -1 * std::numeric_limits<float>::infinity()
25
+
26
+ #define FINAL_MASK 0xffffffff
27
+
28
+ template <typename T>
29
+ void launch_fused_add2(T* out,
30
+ const T* inp1,
31
+ const T* inp2,
32
+ int batch_size,
33
+ int seq_length,
34
+ int hidden_size,
35
+ cudaStream_t& stream);
36
+
37
+ template <typename T>
38
+ void launch_fused_add4(T* out,
39
+ const T* inp1,
40
+ const T* inp2,
41
+ const T* inp3,
42
+ const T* inp4,
43
+ int batch_size,
44
+ int seq_length,
45
+ int hidden_size,
46
+ cudaStream_t& stream);
47
+
48
+ template <typename T>
49
+ void launch_fused_add3(T* out,
50
+ const T* inp1,
51
+ const T* inp2,
52
+ const T* inp3,
53
+ int batch_size,
54
+ int seq_length,
55
+ int hidden_size,
56
+ cudaStream_t& stream);
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/memory_access_utils.h ADDED
@@ -0,0 +1,1144 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <cuda.h>
9
+ #include "ds_kernel_utils.h"
10
+
11
+ /////////////////////////////// Memory Access Utils ///////////////////////////////
12
+ namespace mem_access {
13
+
14
+ enum class LoadPolicy {
15
+ CacheAll, // Cache at all levels
16
+ CacheGlobal, // Cache at L2 only
17
+ CacheStreaming // Cache with evict first policy
18
+ };
19
+
20
+ enum class StorePolicy {
21
+ Writeback, // Cache in L1, write-back on eviction
22
+ CacheGlobal, // Bypass L1, write-back on eviction
23
+ CacheStreaming // Allocate cache line with evict first policy
24
+ };
25
+
26
+ template <int AccessSize, LoadPolicy policy = LoadPolicy::CacheAll>
27
+ __device__ __forceinline__ void load_global(void* dst, const void* src);
28
+
29
+ template <int AccessSize, LoadPolicy policy = LoadPolicy::CacheAll>
30
+ __device__ __forceinline__ void load_global(void* dst, const void* src, bool do_access);
31
+
32
+ // Shared accesses have no cache policy
33
+ template <int AccessSize>
34
+ __device__ __forceinline__ void load_shared(void* dst, const void* src);
35
+
36
+ template <int AccessSize>
37
+ __device__ __forceinline__ void load_shared(void* dst, const void* src, bool do_access);
38
+
39
+ template <int AccessSize, StorePolicy policy = StorePolicy::Writeback>
40
+ __device__ __forceinline__ void store_global(void* dst, const void* src);
41
+
42
+ // Shared accesses have no cache policy
43
+ template <int AccessSize>
44
+ __device__ __forceinline__ void store_shared(void* dst, const void* src);
45
+
46
+ #ifdef ASYNC_COPY_AVAILABLE
47
+ template <int AccessSize>
48
+ __device__ __forceinline__ void memcpy_async(void* shr, const void* gbl);
49
+
50
+ template <int AccessSize>
51
+ __device__ __forceinline__ void memcpy_async_nop(void* shr, const void* gbl, bool predicate);
52
+
53
+ template <int AccessSize>
54
+ __device__ __forceinline__ void memcpy_async_zero(void* shr, const void* gbl, bool predicate);
55
+
56
+ __device__ __forceinline__ void memcpy_async_fence();
57
+
58
+ template <int stages>
59
+ __device__ __forceinline__ void memcpy_async_wait();
60
+
61
+ template <int stages>
62
+ __device__ __forceinline__ void tail_complete_wait(int remaining_stages);
63
+ #endif
64
+
65
+ // Util for tracking pipeline buffers
66
+ // TODO: Evaluate whether this should also be guarded by ASYNC_COPY_AVAILABLE
67
+ template <int max>
68
+ class BufferTracker {
69
+ public:
70
+ int current_state;
71
+
72
+ __device__ __forceinline__ BufferTracker() : current_state(0) {}
73
+
74
+ __device__ __forceinline__ int get()
75
+ {
76
+ int return_val = current_state++;
77
+ current_state = (current_state == max ? 0 : current_state);
78
+ return return_val;
79
+ }
80
+ };
81
+
82
+ __device__ __forceinline__ uint32_t lane_id()
83
+ {
84
+ #ifdef PTX_AVAILABLE
85
+ unsigned int lane_id;
86
+ asm volatile("mov.u32 %0, %%laneid;" : "=r"(lane_id));
87
+ return lane_id;
88
+ #else
89
+ return threadIdx.x & (warpSize - 1); // Portable
90
+ #endif
91
+ }
92
+
93
+ /////////// Load Global ///////////
94
+ template <>
95
+ __device__ __forceinline__ void load_global<16>(void* dst, const void* src)
96
+ {
97
+ uint4* data = reinterpret_cast<uint4*>(dst);
98
+ #ifdef PTX_AVAILABLE
99
+ asm volatile("ld.global.ca.v4.u32 {%0, %1, %2, %3}, [%4];\n"
100
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
101
+ : "l"(src));
102
+ #else
103
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
104
+ data[0] = src_cast[0];
105
+ #endif
106
+ }
107
+
108
+ template <>
109
+ __device__ __forceinline__ void load_global<16>(void* dst, const void* src, bool do_access)
110
+ {
111
+ uint4* data = reinterpret_cast<uint4*>(dst);
112
+ #ifdef PTX_AVAILABLE
113
+ asm volatile(
114
+ "{\n"
115
+ "\t.reg .pred p;\n"
116
+ "\tsetp.ne.b32 p, %5, 0;\n"
117
+ "\tmov.b32 %0, 0;\n"
118
+ "\tmov.b32 %1, 0;\n"
119
+ "\tmov.b32 %2, 0;\n"
120
+ "\tmov.b32 %3, 0;\n"
121
+ "\t@p ld.global.v4.u32 {%0, %1, %2, %3}, [%4];\n"
122
+ "}\n"
123
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
124
+ : "l"(src), "r"((int)do_access));
125
+ #else
126
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
127
+ if (do_access) {
128
+ data[0] = src_cast[0];
129
+ } else {
130
+ data[0].x = 0;
131
+ data[0].y = 0;
132
+ data[0].z = 0;
133
+ data[0].w = 0;
134
+ }
135
+ #endif
136
+ }
137
+
138
+ template <>
139
+ __device__ __forceinline__ void load_global<16, LoadPolicy::CacheGlobal>(void* dst, const void* src)
140
+ {
141
+ uint4* data = reinterpret_cast<uint4*>(dst);
142
+ #ifdef PTX_AVAILABLE
143
+ asm volatile("ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n"
144
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
145
+ : "l"(src));
146
+ #else
147
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
148
+ data[0] = src_cast[0];
149
+ #endif
150
+ }
151
+
152
+ template <>
153
+ __device__ __forceinline__ void load_global<16, LoadPolicy::CacheGlobal>(void* dst,
154
+ const void* src,
155
+ bool do_access)
156
+ {
157
+ uint4* data = reinterpret_cast<uint4*>(dst);
158
+ #ifdef PTX_AVAILABLE
159
+ asm volatile(
160
+ "{\n"
161
+ "\t.reg .pred p;\n"
162
+ "\tsetp.ne.b32 p, %5, 0;\n"
163
+ "\tmov.b32 %0, 0;\n"
164
+ "\tmov.b32 %1, 0;\n"
165
+ "\tmov.b32 %2, 0;\n"
166
+ "\tmov.b32 %3, 0;\n"
167
+ "\t@p ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n"
168
+ "}\n"
169
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
170
+ : "l"(src), "r"((int)do_access));
171
+ #else
172
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
173
+ if (do_access) {
174
+ data[0] = src_cast[0];
175
+ } else {
176
+ data[0].x = 0;
177
+ data[0].y = 0;
178
+ data[0].z = 0;
179
+ data[0].w = 0;
180
+ }
181
+ #endif
182
+ }
183
+
184
+ template <>
185
+ __device__ __forceinline__ void load_global<16, LoadPolicy::CacheStreaming>(void* dst,
186
+ const void* src)
187
+ {
188
+ uint4* data = reinterpret_cast<uint4*>(dst);
189
+ #ifdef PTX_AVAILABLE
190
+ asm volatile("ld.global.cs.v4.u32 {%0, %1, %2, %3}, [%4];\n"
191
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
192
+ : "l"(src));
193
+ #else
194
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
195
+ data[0] = src_cast[0];
196
+ #endif
197
+ }
198
+
199
+ template <>
200
+ __device__ __forceinline__ void load_global<16, LoadPolicy::CacheStreaming>(void* dst,
201
+ const void* src,
202
+ bool do_access)
203
+ {
204
+ uint4* data = reinterpret_cast<uint4*>(dst);
205
+ #ifdef PTX_AVAILABLE
206
+ asm volatile(
207
+ "{\n"
208
+ "\t.reg .pred p;\n"
209
+ "\tsetp.ne.b32 p, %5, 0;\n"
210
+ "\tmov.b32 %0, 0;\n"
211
+ "\tmov.b32 %1, 0;\n"
212
+ "\tmov.b32 %2, 0;\n"
213
+ "\tmov.b32 %3, 0;\n"
214
+ "\t@p ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n"
215
+ "}\n"
216
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
217
+ : "l"(src), "r"((int)do_access));
218
+ #else
219
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
220
+ if (do_access) {
221
+ data[0] = src_cast[0];
222
+ } else {
223
+ data[0].x = 0;
224
+ data[0].y = 0;
225
+ data[0].z = 0;
226
+ data[0].w = 0;
227
+ }
228
+ #endif
229
+ }
230
+
231
+ template <>
232
+ __device__ __forceinline__ void load_global<8>(void* dst, const void* src)
233
+ {
234
+ uint2* data = reinterpret_cast<uint2*>(dst);
235
+ #ifdef PTX_AVAILABLE
236
+ asm volatile("ld.global.ca.v2.u32 {%0, %1}, [%2];\n"
237
+ : "=r"(data[0].x), "=r"(data[0].y)
238
+ : "l"(src));
239
+ #else
240
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
241
+ data[0] = src_cast[0];
242
+ #endif
243
+ }
244
+
245
+ template <>
246
+ __device__ __forceinline__ void load_global<8>(void* dst, const void* src, bool do_access)
247
+ {
248
+ uint2* data = reinterpret_cast<uint2*>(dst);
249
+ #ifdef PTX_AVAILABLE
250
+ asm volatile(
251
+ "{\n"
252
+ "\t.reg .pred p;\n"
253
+ "\tsetp.ne.b32 p, %3, 0;\n"
254
+ "\tmov.b32 %0, 0;\n"
255
+ "\tmov.b32 %1, 0;\n"
256
+ "\t@p ld.global.v2.u32 {%0, %1}, [%2];\n"
257
+ "}\n"
258
+ : "=r"(data[0].x), "=r"(data[0].y)
259
+ : "l"(src), "r"((int)do_access));
260
+ #else
261
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
262
+ if (do_access) {
263
+ data[0] = src_cast[0];
264
+ } else {
265
+ data[0].x = 0;
266
+ data[0].y = 0;
267
+ }
268
+ #endif
269
+ }
270
+
271
+ template <>
272
+ __device__ __forceinline__ void load_global<8, LoadPolicy::CacheGlobal>(void* dst, const void* src)
273
+ {
274
+ uint2* data = reinterpret_cast<uint2*>(dst);
275
+ #ifdef PTX_AVAILABLE
276
+ asm volatile("ld.global.cg.v2.u32 {%0, %1}, [%2];\n"
277
+ : "=r"(data[0].x), "=r"(data[0].y)
278
+ : "l"(src));
279
+ #else
280
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
281
+ data[0] = src_cast[0];
282
+ #endif
283
+ }
284
+
285
+ template <>
286
+ __device__ __forceinline__ void load_global<8, LoadPolicy::CacheGlobal>(void* dst,
287
+ const void* src,
288
+ bool do_access)
289
+ {
290
+ uint2* data = reinterpret_cast<uint2*>(dst);
291
+ #ifdef PTX_AVAILABLE
292
+ asm volatile(
293
+ "{\n"
294
+ "\t.reg .pred p;\n"
295
+ "\tsetp.ne.b32 p, %3, 0;\n"
296
+ "\tmov.b32 %0, 0;\n"
297
+ "\tmov.b32 %1, 0;\n"
298
+ "\t@p ld.global.cg.v2.u32 {%0, %1}, [%2];\n"
299
+ "}\n"
300
+ : "=r"(data[0].x), "=r"(data[0].y)
301
+ : "l"(src), "r"((int)do_access));
302
+ #else
303
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
304
+ if (do_access) {
305
+ data[0] = src_cast[0];
306
+ } else {
307
+ data[0].x = 0;
308
+ data[0].y = 0;
309
+ }
310
+ #endif
311
+ }
312
+
313
+ template <>
314
+ __device__ __forceinline__ void load_global<8, LoadPolicy::CacheStreaming>(void* dst,
315
+ const void* src)
316
+ {
317
+ uint2* data = reinterpret_cast<uint2*>(dst);
318
+ #ifdef PTX_AVAILABLE
319
+ asm volatile("ld.global.cs.v2.u32 {%0, %1}, [%2];\n"
320
+ : "=r"(data[0].x), "=r"(data[0].y)
321
+ : "l"(src));
322
+ #else
323
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
324
+ data[0] = src_cast[0];
325
+ #endif
326
+ }
327
+
328
+ template <>
329
+ __device__ __forceinline__ void load_global<8, LoadPolicy::CacheStreaming>(void* dst,
330
+ const void* src,
331
+ bool do_access)
332
+ {
333
+ uint2* data = reinterpret_cast<uint2*>(dst);
334
+ #ifdef PTX_AVAILABLE
335
+ asm volatile(
336
+ "{\n"
337
+ "\t.reg .pred p;\n"
338
+ "\tsetp.ne.b32 p, %3, 0;\n"
339
+ "\tmov.b32 %0, 0;\n"
340
+ "\tmov.b32 %1, 0;\n"
341
+ "\t@p ld.global.cs.v2.u32 {%0, %1}, [%2];\n"
342
+ "}\n"
343
+ : "=r"(data[0].x), "=r"(data[0].y)
344
+ : "l"(src), "r"((int)do_access));
345
+ #else
346
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
347
+ if (do_access) {
348
+ data[0] = src_cast[0];
349
+ } else {
350
+ data[0].x = 0;
351
+ data[0].y = 0;
352
+ }
353
+ #endif
354
+ }
355
+
356
+ template <>
357
+ __device__ __forceinline__ void load_global<4>(void* dst, const void* src)
358
+ {
359
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
360
+ #ifdef PTX_AVAILABLE
361
+ asm volatile("ld.global.ca.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src));
362
+ #else
363
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
364
+ data[0] = src_cast[0];
365
+ #endif
366
+ }
367
+
368
+ template <>
369
+ __device__ __forceinline__ void load_global<4>(void* dst, const void* src, bool do_access)
370
+ {
371
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
372
+ #ifdef PTX_AVAILABLE
373
+ asm volatile(
374
+ "{\n"
375
+ "\t.reg .pred p;\n"
376
+ "\tsetp.ne.b32 p, %2, 0;\n"
377
+ "\tmov.b32 %0, 0;\n"
378
+ "\t@p ld.global.u32 {%0}, [%1];\n"
379
+ "}\n"
380
+ : "=r"(data[0])
381
+ : "l"(src), "r"((int)do_access));
382
+ #else
383
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
384
+ if (do_access) {
385
+ data[0] = src_cast[0];
386
+ } else {
387
+ data[0] = 0;
388
+ }
389
+ #endif
390
+ }
391
+
392
+ template <>
393
+ __device__ __forceinline__ void load_global<4, LoadPolicy::CacheGlobal>(void* dst, const void* src)
394
+ {
395
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
396
+ #ifdef PTX_AVAILABLE
397
+ asm volatile("ld.global.cg.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src));
398
+ #else
399
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
400
+ data[0] = src_cast[0];
401
+ #endif
402
+ }
403
+
404
+ template <>
405
+ __device__ __forceinline__ void load_global<4, LoadPolicy::CacheGlobal>(void* dst,
406
+ const void* src,
407
+ bool do_access)
408
+ {
409
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
410
+ #ifdef PTX_AVAILABLE
411
+ asm volatile(
412
+ "{\n"
413
+ "\t.reg .pred p;\n"
414
+ "\tsetp.ne.b32 p, %2, 0;\n"
415
+ "\tmov.b32 %0, 0;\n"
416
+ "\t@p ld.global.cg.u32 {%0}, [%1];\n"
417
+ "}\n"
418
+ : "=r"(data[0])
419
+ : "l"(src), "r"((int)do_access));
420
+ #else
421
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
422
+ if (do_access) {
423
+ data[0] = src_cast[0];
424
+ } else {
425
+ data[0] = 0;
426
+ }
427
+ #endif
428
+ }
429
+
430
+ template <>
431
+ __device__ __forceinline__ void load_global<4, LoadPolicy::CacheStreaming>(void* dst,
432
+ const void* src)
433
+ {
434
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
435
+ #ifdef PTX_AVAILABLE
436
+ asm volatile("ld.global.cs.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src));
437
+ #else
438
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
439
+ data[0] = src_cast[0];
440
+ #endif
441
+ }
442
+
443
+ template <>
444
+ __device__ __forceinline__ void load_global<4, LoadPolicy::CacheStreaming>(void* dst,
445
+ const void* src,
446
+ bool do_access)
447
+ {
448
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
449
+ #ifdef PTX_AVAILABLE
450
+ asm volatile(
451
+ "{\n"
452
+ "\t.reg .pred p;\n"
453
+ "\tsetp.ne.b32 p, %2, 0;\n"
454
+ "\tmov.b32 %0, 0;\n"
455
+ "\t@p ld.global.cs.u32 {%0}, [%1];\n"
456
+ "}\n"
457
+ : "=r"(data[0])
458
+ : "l"(src), "r"((int)do_access));
459
+ #else
460
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
461
+ if (do_access) {
462
+ data[0] = src_cast[0];
463
+ } else {
464
+ data[0] = 0;
465
+ }
466
+ #endif
467
+ }
468
+
469
+ template <>
470
+ __device__ __forceinline__ void load_global<2>(void* dst, const void* src)
471
+ {
472
+ int16_t* data = reinterpret_cast<int16_t*>(dst);
473
+ #ifdef PTX_AVAILABLE
474
+ asm volatile("ld.global.ca.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src));
475
+ #else
476
+ const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
477
+ data[0] = src_cast[0];
478
+ #endif
479
+ }
480
+
481
+ template <>
482
+ __device__ __forceinline__ void load_global<2>(void* dst, const void* src, bool do_access)
483
+ {
484
+ int16_t* data = reinterpret_cast<int16_t*>(dst);
485
+ #ifdef PTX_AVAILABLE
486
+ asm volatile(
487
+ "{\n"
488
+ "\t.reg .pred p;\n"
489
+ "\tsetp.ne.b32 p, %2, 0;\n"
490
+ "\tmov.u16 %0, 0;\n"
491
+ "\t@p ld.global.u16 {%0}, [%1];\n"
492
+ "}\n"
493
+ : "=h"(*data)
494
+ : "l"(src), "r"((int)do_access));
495
+ #else
496
+ const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
497
+ if (do_access) {
498
+ data[0] = src_cast[0];
499
+ } else {
500
+ data[0] = 0;
501
+ }
502
+ #endif
503
+ }
504
+
505
+ template <>
506
+ __device__ __forceinline__ void load_global<2, LoadPolicy::CacheGlobal>(void* dst, const void* src)
507
+ {
508
+ int16_t* data = reinterpret_cast<int16_t*>(dst);
509
+ #ifdef PTX_AVAILABLE
510
+ asm volatile("ld.global.cg.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src));
511
+ #else
512
+ const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
513
+ data[0] = src_cast[0];
514
+ #endif
515
+ }
516
+
517
+ template <>
518
+ __device__ __forceinline__ void load_global<2, LoadPolicy::CacheGlobal>(void* dst,
519
+ const void* src,
520
+ bool do_access)
521
+ {
522
+ int16_t* data = reinterpret_cast<int16_t*>(dst);
523
+ #ifdef PTX_AVAILABLE
524
+ asm volatile(
525
+ "{\n"
526
+ "\t.reg .pred p;\n"
527
+ "\tsetp.ne.b32 p, %2, 0;\n"
528
+ "\tmov.u16 %0, 0;\n"
529
+ "\t@p ld.global.cg.u16 {%0}, [%1];\n"
530
+ "}\n"
531
+ : "=h"(*data)
532
+ : "l"(src), "r"((int)do_access));
533
+ #else
534
+ const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
535
+ if (do_access) {
536
+ data[0] = src_cast[0];
537
+ } else {
538
+ data[0] = 0;
539
+ }
540
+ #endif
541
+ }
542
+
543
+ template <>
544
+ __device__ __forceinline__ void load_global<2, LoadPolicy::CacheStreaming>(void* dst,
545
+ const void* src)
546
+ {
547
+ int16_t* data = reinterpret_cast<int16_t*>(dst);
548
+ #ifdef PTX_AVAILABLE
549
+ asm volatile("ld.global.cs.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src));
550
+ #else
551
+ const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
552
+ data[0] = src_cast[0];
553
+ #endif
554
+ }
555
+
556
+ template <>
557
+ __device__ __forceinline__ void load_global<2, LoadPolicy::CacheStreaming>(void* dst,
558
+ const void* src,
559
+ bool do_access)
560
+ {
561
+ int16_t* data = reinterpret_cast<int16_t*>(dst);
562
+ #ifdef PTX_AVAILABLE
563
+ asm volatile(
564
+ "{\n"
565
+ "\t.reg .pred p;\n"
566
+ "\tsetp.ne.b32 p, %2, 0;\n"
567
+ "\tmov.u16 %0, 0;\n"
568
+ "\t@p ld.global.cs.u16 {%0}, [%1];\n"
569
+ "}\n"
570
+ : "=h"(*data)
571
+ : "l"(src), "r"((int)do_access));
572
+ #else
573
+ const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
574
+ if (do_access) {
575
+ data[0] = src_cast[0];
576
+ } else {
577
+ data[0] = 0;
578
+ }
579
+ #endif
580
+ }
581
+
582
+ /////////// Load Shared ///////////
583
+ namespace internal {
584
+
585
+ #ifdef PTX_AVAILABLE
586
+ __device__ __forceinline__ unsigned convert_to_shared(const void* ptr)
587
+ {
588
+ #if __CUDACC_VER_MAJOR__ >= 11
589
+ // In CUDA 11 we have a builtin intrinsic
590
+ return __cvta_generic_to_shared(ptr);
591
+ #else
592
+ unsigned ret_val;
593
+ asm volatile(
594
+ "{\n"
595
+ "\t.reg .u64 p1;\n"
596
+ "\tcvta.to.shared.u64 p1, %1\n"
597
+ "\tcvt.u32.u64 %0, p1;\n"
598
+ "}\n"
599
+ : "=r"(ret_val)
600
+ : "l"(ptr));
601
+ return ret_val;
602
+ #endif
603
+ }
604
+ #endif
605
+
606
+ } // namespace internal
607
+
608
+ template <>
609
+ __device__ __forceinline__ void load_shared<16>(void* dst, const void* src)
610
+ {
611
+ uint4* data = reinterpret_cast<uint4*>(dst);
612
+ #ifdef PTX_AVAILABLE
613
+ unsigned src_shr = internal::convert_to_shared(src);
614
+
615
+ asm volatile("ld.shared.v4.u32 {%0, %1, %2, %3}, [%4];\n"
616
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
617
+ : "r"(src_shr));
618
+ #else
619
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
620
+ data[0] = src_cast[0];
621
+ #endif
622
+ }
623
+
624
+ template <>
625
+ __device__ __forceinline__ void load_shared<16>(void* dst, const void* src, bool do_access)
626
+ {
627
+ uint4* data = reinterpret_cast<uint4*>(dst);
628
+ #ifdef PTX_AVAILABLE
629
+ unsigned src_shr = internal::convert_to_shared(src);
630
+
631
+ asm volatile(
632
+ "{\n"
633
+ "\t.reg .pred p;\n"
634
+ "\tsetp.ne.b32 p, %5, 0;\n"
635
+ "\tmov.b32 %0, 0;\n"
636
+ "\tmov.b32 %1, 0;\n"
637
+ "\tmov.b32 %2, 0;\n"
638
+ "\tmov.b32 %3, 0;\n"
639
+ "\t@p ld.shared.v4.u32 {%0, %1, %2, %3}, [%4];\n"
640
+ "}\n"
641
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
642
+ : "r"(src_shr), "r"((int)do_access));
643
+ #else
644
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
645
+ if (do_access) {
646
+ data[0] = src_cast[0];
647
+ } else {
648
+ data[0].x = 0;
649
+ data[0].y = 0;
650
+ data[0].z = 0;
651
+ data[0].w = 0;
652
+ }
653
+ #endif
654
+ }
655
+
656
+ template <>
657
+ __device__ __forceinline__ void load_shared<8>(void* dst, const void* src)
658
+ {
659
+ uint2* data = reinterpret_cast<uint2*>(dst);
660
+ #ifdef PTX_AVAILABLE
661
+ unsigned src_shr = internal::convert_to_shared(src);
662
+
663
+ asm volatile("ld.shared.v2.u32 {%0, %1}, [%2];\n"
664
+ : "=r"(data[0].x), "=r"(data[0].y)
665
+ : "r"(src_shr));
666
+ #else
667
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
668
+ data[0] = src_cast[0];
669
+ #endif
670
+ }
671
+
672
+ template <>
673
+ __device__ __forceinline__ void load_shared<8>(void* dst, const void* src, bool do_access)
674
+ {
675
+ uint2* data = reinterpret_cast<uint2*>(dst);
676
+ #ifdef PTX_AVAILABLE
677
+ unsigned src_shr = internal::convert_to_shared(src);
678
+
679
+ asm volatile(
680
+ "{\n"
681
+ "\t.reg .pred p;\n"
682
+ "\tsetp.ne.b32 p, %3, 0;\n"
683
+ "\tmov.b32 %0, 0;\n"
684
+ "\tmov.b32 %1, 0;\n"
685
+ "\t@p ld.shared.v2.u32 {%0, %1}, [%2];\n"
686
+ "}\n"
687
+ : "=r"(data[0].x), "=r"(data[0].y)
688
+ : "r"(src_shr), "r"((int)do_access));
689
+ #else
690
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
691
+ if (do_access) {
692
+ data[0] = src_cast[0];
693
+ } else {
694
+ data[0].x = 0;
695
+ data[0].y = 0;
696
+ }
697
+ #endif
698
+ }
699
+
700
+ template <>
701
+ __device__ __forceinline__ void load_shared<4>(void* dst, const void* src)
702
+ {
703
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
704
+ #ifdef PTX_AVAILABLE
705
+ unsigned src_shr = internal::convert_to_shared(src);
706
+
707
+ asm volatile("ld.shared.u32 {%0}, [%1];\n" : "=r"(*data) : "r"(src_shr));
708
+ #else
709
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
710
+ data[0] = src_cast[0];
711
+ #endif
712
+ }
713
+
714
+ template <>
715
+ __device__ __forceinline__ void load_shared<4>(void* dst, const void* src, bool do_access)
716
+ {
717
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
718
+ #ifdef PTX_AVAILABLE
719
+ unsigned src_shr = internal::convert_to_shared(src);
720
+
721
+ asm volatile(
722
+ "{\n"
723
+ "\t.reg .pred p;\n"
724
+ "\tsetp.ne.b32 p, %2, 0;\n"
725
+ "\tmov.b32 %0, 0;\n"
726
+ "\t@p ld.shared.u32 %0, [%1];\n"
727
+ "}\n"
728
+ : "=r"(data[0])
729
+ : "r"(src_shr), "r"((int)do_access));
730
+ #else
731
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
732
+ if (do_access) {
733
+ data[0] = src_cast[0];
734
+ } else {
735
+ data[0] = 0;
736
+ }
737
+ #endif
738
+ }
739
+
740
+ /////////// Store Global ///////////
741
+
742
+ template <>
743
+ __device__ __forceinline__ void store_global<16>(void* dst, const void* src)
744
+ {
745
+ const uint4* data = reinterpret_cast<const uint4*>(src);
746
+ #ifdef PTX_AVAILABLE
747
+ asm volatile("st.global.wb.v4.u32 [%0], {%1, %2, %3, %4};\n"
748
+ :
749
+ : "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w)
750
+ : "memory");
751
+ #else
752
+ uint4* dst_cast = reinterpret_cast<uint4*>(dst);
753
+ dst_cast[0] = data[0];
754
+ #endif
755
+ }
756
+
757
+ template <>
758
+ __device__ __forceinline__ void store_global<16, StorePolicy::CacheGlobal>(void* dst,
759
+ const void* src)
760
+ {
761
+ const uint4* data = reinterpret_cast<const uint4*>(src);
762
+ #ifdef PTX_AVAILABLE
763
+ asm volatile("st.global.cg.v4.u32 [%0], {%1, %2, %3, %4};\n"
764
+ :
765
+ : "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w)
766
+ : "memory");
767
+ #else
768
+ uint4* dst_cast = reinterpret_cast<uint4*>(dst);
769
+ dst_cast[0] = data[0];
770
+ #endif
771
+ }
772
+
773
+ template <>
774
+ __device__ __forceinline__ void store_global<16, StorePolicy::CacheStreaming>(void* dst,
775
+ const void* src)
776
+ {
777
+ const uint4* data = reinterpret_cast<const uint4*>(src);
778
+ #ifdef PTX_AVAILABLE
779
+ asm volatile("st.global.cs.v4.u32 [%0], {%1, %2, %3, %4};\n"
780
+ :
781
+ : "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w)
782
+ : "memory");
783
+ #else
784
+ uint4* dst_cast = reinterpret_cast<uint4*>(dst);
785
+ dst_cast[0] = data[0];
786
+ #endif
787
+ }
788
+
789
+ template <>
790
+ __device__ __forceinline__ void store_global<8>(void* dst, const void* src)
791
+ {
792
+ const uint2* data = reinterpret_cast<const uint2*>(src);
793
+ #ifdef PTX_AVAILABLE
794
+ asm volatile("st.global.wb.v2.u32 [%0], {%1, %2};\n"
795
+ :
796
+ : "l"(dst), "r"(data[0].x), "r"(data[0].y));
797
+ #else
798
+ uint2* dst_cast = reinterpret_cast<uint2*>(dst);
799
+ dst_cast[0] = data[0];
800
+ #endif
801
+ }
802
+
803
+ template <>
804
+ __device__ __forceinline__ void store_global<8, StorePolicy::CacheGlobal>(void* dst,
805
+ const void* src)
806
+ {
807
+ const uint2* data = reinterpret_cast<const uint2*>(src);
808
+ #ifdef PTX_AVAILABLE
809
+ asm volatile("st.global.cg.v2.u32 [%0], {%1, %2};\n"
810
+ :
811
+ : "l"(dst), "r"(data[0].x), "r"(data[0].y));
812
+ #else
813
+ uint2* dst_cast = reinterpret_cast<uint2*>(dst);
814
+ dst_cast[0] = data[0];
815
+ #endif
816
+ }
817
+
818
+ template <>
819
+ __device__ __forceinline__ void store_global<8, StorePolicy::CacheStreaming>(void* dst,
820
+ const void* src)
821
+ {
822
+ const uint2* data = reinterpret_cast<const uint2*>(src);
823
+ #ifdef PTX_AVAILABLE
824
+ asm volatile("st.global.cs.v2.u32 [%0], {%1, %2};\n"
825
+ :
826
+ : "l"(dst), "r"(data[0].x), "r"(data[0].y));
827
+ #else
828
+ uint2* dst_cast = reinterpret_cast<uint2*>(dst);
829
+ dst_cast[0] = data[0];
830
+ #endif
831
+ }
832
+
833
+ template <>
834
+ __device__ __forceinline__ void store_global<4>(void* dst, const void* src)
835
+ {
836
+ const int32_t* data = reinterpret_cast<const int32_t*>(src);
837
+ #ifdef PTX_AVAILABLE
838
+ asm volatile("st.global.wb.u32 [%0], %1;\n" : : "l"(dst), "r"(*data));
839
+ #else
840
+ int32_t* dst_cast = reinterpret_cast<int32_t*>(dst);
841
+ dst_cast[0] = data[0];
842
+ #endif
843
+ }
844
+
845
+ template <>
846
+ __device__ __forceinline__ void store_global<4, StorePolicy::CacheGlobal>(void* dst,
847
+ const void* src)
848
+ {
849
+ const int32_t* data = reinterpret_cast<const int32_t*>(src);
850
+ #ifdef PTX_AVAILABLE
851
+ asm volatile("st.global.cg.u32 [%0], %1;\n" : : "l"(dst), "r"(*data));
852
+ #else
853
+ int32_t* dst_cast = reinterpret_cast<int32_t*>(dst);
854
+ dst_cast[0] = data[0];
855
+ #endif
856
+ }
857
+
858
+ template <>
859
+ __device__ __forceinline__ void store_global<4, StorePolicy::CacheStreaming>(void* dst,
860
+ const void* src)
861
+ {
862
+ const int32_t* data = reinterpret_cast<const int32_t*>(src);
863
+ #ifdef PTX_AVAILABLE
864
+ asm volatile("st.global.cs.u32 [%0], %1;\n" : : "l"(dst), "r"(*data));
865
+ #else
866
+ int32_t* dst_cast = reinterpret_cast<int32_t*>(dst);
867
+ dst_cast[0] = data[0];
868
+ #endif
869
+ }
870
+
871
+ template <>
872
+ __device__ __forceinline__ void store_global<2>(void* dst, const void* src)
873
+ {
874
+ const int16_t* data = reinterpret_cast<const int16_t*>(src);
875
+
876
+ int16_t* dst_cast = reinterpret_cast<int16_t*>(dst);
877
+ dst_cast[0] = data[0];
878
+ }
879
+
880
+ template <>
881
+ __device__ __forceinline__ void store_global<2, StorePolicy::CacheGlobal>(void* dst,
882
+ const void* src)
883
+ {
884
+ const int16_t* data = reinterpret_cast<const int16_t*>(src);
885
+
886
+ int16_t* dst_cast = reinterpret_cast<int16_t*>(dst);
887
+ dst_cast[0] = data[0];
888
+ }
889
+
890
+ template <>
891
+ __device__ __forceinline__ void store_global<2, StorePolicy::CacheStreaming>(void* dst,
892
+ const void* src)
893
+ {
894
+ const int16_t* data = reinterpret_cast<const int16_t*>(src);
895
+
896
+ int16_t* dst_cast = reinterpret_cast<int16_t*>(dst);
897
+ dst_cast[0] = data[0];
898
+ }
899
+
900
+ /////////// Store Shared ///////////
901
+
902
+ template <>
903
+ __device__ __forceinline__ void store_shared<16>(void* dst, const void* src)
904
+ {
905
+ const uint4* data = reinterpret_cast<const uint4*>(src);
906
+ #ifdef PTX_AVAILABLE
907
+ unsigned dst_int = internal::convert_to_shared(dst);
908
+
909
+ asm volatile("st.shared.v4.u32 [%0], {%1, %2, %3, %4};\n"
910
+ :
911
+ : "r"(dst_int), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w));
912
+ #else
913
+ uint4* dst_cast = reinterpret_cast<uint4*>(dst);
914
+ dst_cast[0] = data[0];
915
+ #endif
916
+ }
917
+
918
+ template <>
919
+ __device__ __forceinline__ void store_shared<8>(void* dst, const void* src)
920
+ {
921
+ const uint2* data = reinterpret_cast<const uint2*>(src);
922
+ #ifdef PTX_AVAILABLE
923
+ unsigned dst_int = internal::convert_to_shared(dst);
924
+
925
+ asm volatile("st.shared.v2.u32 [%0], {%1, %2};\n"
926
+ :
927
+ : "r"(dst_int), "r"(data[0].x), "r"(data[0].y));
928
+ #else
929
+ uint2* dst_cast = reinterpret_cast<uint2*>(dst);
930
+ dst_cast[0] = data[0];
931
+ #endif
932
+ }
933
+
934
+ template <>
935
+ __device__ __forceinline__ void store_shared<4>(void* dst, const void* src)
936
+ {
937
+ const int32_t* data = reinterpret_cast<const int32_t*>(src);
938
+ #ifdef PTX_AVAILABLE
939
+ unsigned dst_int = internal::convert_to_shared(dst);
940
+
941
+ asm volatile("st.shared.u32 [%0], %1;\n" : : "r"(dst_int), "r"(*data));
942
+ #else
943
+ int32_t* dst_cast = reinterpret_cast<int32_t*>(dst);
944
+ dst_cast[0] = data[0];
945
+ #endif
946
+ }
947
+
948
+ /////////// Asynchronous Memory Copy ///////////
949
+
950
+ #ifdef ASYNC_COPY_AVAILABLE
951
+ template <int AccessSize>
952
+ __device__ __forceinline__ void memcpy_async(void* shr, const void* gbl)
953
+ {
954
+ static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16));
955
+ unsigned shr_int = internal::convert_to_shared(shr);
956
+
957
+ asm volatile("cp.async.ca.shared.global [%0], [%1], %2;\n"
958
+ :
959
+ : "r"(shr_int), "l"(gbl), "n"(AccessSize));
960
+ }
961
+
962
+ template <int AccessSize>
963
+ __device__ __forceinline__ void memcpy_async_nop(void* shr, const void* gbl, bool predicate)
964
+ {
965
+ static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16));
966
+ unsigned shr_int = internal::convert_to_shared(shr);
967
+
968
+ asm volatile(
969
+ "{\n"
970
+ " .reg .pred p;\n"
971
+ " setp.ne.b32 p, %0, 0;\n"
972
+ " @p cp.async.ca.shared.global [%1], [%2], %3;\n"
973
+ "}\n"
974
+ :
975
+ : "r"((int)predicate), "r"(shr_int), "l"(gbl), "n"(AccessSize));
976
+ }
977
+
978
+ template <int AccessSize>
979
+ __device__ __forceinline__ void memcpy_async_zero(void* shr, const void* gbl, bool predicate)
980
+ {
981
+ static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16));
982
+ unsigned shr_int = internal::convert_to_shared(shr);
983
+ int bytes_to_copy = (predicate ? AccessSize : 0);
984
+
985
+ asm volatile("cp.async.ca.shared.global [%0], [%1], %2, %3;\n"
986
+ :
987
+ : "r"(shr_int), "l"(gbl), "n"(AccessSize), "r"(bytes_to_copy));
988
+ }
989
+
990
+ template <int AccessSize>
991
+ __device__ __forceinline__ void memcpy_async_zero_nop(void* shr,
992
+ const void* gbl,
993
+ bool zero_predicate,
994
+ bool nop_predicate)
995
+ {
996
+ static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16));
997
+ unsigned shr_int = internal::convert_to_shared(shr);
998
+ int bytes_to_copy = (zero_predicate ? AccessSize : 0);
999
+
1000
+ asm volatile(
1001
+ "{\n"
1002
+ " .reg .pred p;\n"
1003
+ " setp.ne.b32 p, %0, 0;\n"
1004
+ " @p cp.async.ca.shared.global [%1], [%2], %3, %4;\n"
1005
+ "}\n"
1006
+ :
1007
+ : "r"((int)nop_predicate), "r"(shr_int), "l"(gbl), "n"(AccessSize), "r"(bytes_to_copy));
1008
+ }
1009
+
1010
+ // Cache global variants. Separate interface to require deliberate use of them.
1011
+ __device__ __forceinline__ void memcpy_async_cg(void* shr, const void* gbl)
1012
+ {
1013
+ unsigned shr_int = internal::convert_to_shared(shr);
1014
+
1015
+ asm volatile("cp.async.cg.shared.global [%0], [%1], 16;\n" : : "r"(shr_int), "l"(gbl));
1016
+ }
1017
+
1018
+ __device__ __forceinline__ void memcpy_async_nop_cg(void* shr, const void* gbl, bool predicate)
1019
+ {
1020
+ unsigned shr_int = internal::convert_to_shared(shr);
1021
+
1022
+ asm volatile(
1023
+ "{\n"
1024
+ " .reg .pred p;\n"
1025
+ " setp.ne.b32 p, %0, 0;\n"
1026
+ " @p cp.async.cg.shared.global [%1], [%2], 16;\n"
1027
+ "}\n"
1028
+ :
1029
+ : "r"((int)predicate), "r"(shr_int), "l"(gbl));
1030
+ }
1031
+
1032
+ __device__ __forceinline__ void memcpy_async_zero_cg(void* shr, const void* gbl, bool predicate)
1033
+ {
1034
+ unsigned shr_int = internal::convert_to_shared(shr);
1035
+ int bytes_to_copy = (predicate ? 16 : 0);
1036
+
1037
+ asm volatile("cp.async.cg.shared.global [%0], [%1], 16, %2;\n"
1038
+ :
1039
+ : "r"(shr_int), "l"(gbl), "r"(bytes_to_copy));
1040
+ }
1041
+
1042
+ __device__ __forceinline__ void memcpy_async_zero_nop_cg(void* shr,
1043
+ const void* gbl,
1044
+ bool zero_predicate,
1045
+ bool nop_predicate)
1046
+ {
1047
+ unsigned shr_int = internal::convert_to_shared(shr);
1048
+ int bytes_to_copy = (zero_predicate ? 16 : 0);
1049
+
1050
+ asm volatile(
1051
+ "{\n"
1052
+ " .reg .pred p;\n"
1053
+ " setp.ne.b32 p, %0, 0;\n"
1054
+ " @p cp.async.cg.shared.global [%1], [%2], 16, %3;\n"
1055
+ "}\n"
1056
+ :
1057
+ : "r"((int)nop_predicate), "r"(shr_int), "l"(gbl), "r"(bytes_to_copy));
1058
+ }
1059
+
1060
+ __device__ __forceinline__ void memcpy_async_fence() { asm volatile("cp.async.commit_group;\n"); }
1061
+
1062
+ template <int stages>
1063
+ __device__ __forceinline__ void memcpy_async_wait()
1064
+ {
1065
+ static_assert(stages <= 8);
1066
+
1067
+ asm volatile("cp.async.wait_group %0;\n" : : "n"(stages));
1068
+ }
1069
+
1070
+ // TODO: The tail complete should be a known compile time artifact, should try and induce this
1071
+ // without all of the branches from the call-site. This is a hacky solution.
1072
+ template <>
1073
+ __device__ __forceinline__ void tail_complete_wait<1>(int remaining_stages)
1074
+ {
1075
+ if (remaining_stages == 0) memcpy_async_wait<0>();
1076
+ }
1077
+
1078
+ template <>
1079
+ __device__ __forceinline__ void tail_complete_wait<2>(int remaining_stages)
1080
+ {
1081
+ if (remaining_stages == 1)
1082
+ memcpy_async_wait<1>();
1083
+ else if (remaining_stages == 0)
1084
+ memcpy_async_wait<0>();
1085
+ }
1086
+
1087
+ template <>
1088
+ __device__ __forceinline__ void tail_complete_wait<3>(int remaining_stages)
1089
+ {
1090
+ if (remaining_stages == 2)
1091
+ memcpy_async_wait<2>();
1092
+ else if (remaining_stages == 1)
1093
+ memcpy_async_wait<1>();
1094
+ else if (remaining_stages == 0)
1095
+ memcpy_async_wait<0>();
1096
+ }
1097
+
1098
+ template <>
1099
+ __device__ __forceinline__ void tail_complete_wait<4>(int remaining_stages)
1100
+ {
1101
+ if (remaining_stages == 3)
1102
+ memcpy_async_wait<3>();
1103
+ else if (remaining_stages == 2)
1104
+ memcpy_async_wait<2>();
1105
+ else if (remaining_stages == 1)
1106
+ memcpy_async_wait<1>();
1107
+ else if (remaining_stages == 0)
1108
+ memcpy_async_wait<0>();
1109
+ }
1110
+
1111
+ template <>
1112
+ __device__ __forceinline__ void tail_complete_wait<5>(int remaining_stages)
1113
+ {
1114
+ if (remaining_stages == 4)
1115
+ memcpy_async_wait<4>();
1116
+ else if (remaining_stages == 3)
1117
+ memcpy_async_wait<3>();
1118
+ else if (remaining_stages == 2)
1119
+ memcpy_async_wait<2>();
1120
+ else if (remaining_stages == 1)
1121
+ memcpy_async_wait<1>();
1122
+ else if (remaining_stages == 0)
1123
+ memcpy_async_wait<0>();
1124
+ }
1125
+
1126
+ template <>
1127
+ __device__ __forceinline__ void tail_complete_wait<6>(int remaining_stages)
1128
+ {
1129
+ if (remaining_stages == 5)
1130
+ memcpy_async_wait<5>();
1131
+ else if (remaining_stages == 4)
1132
+ memcpy_async_wait<4>();
1133
+ else if (remaining_stages == 3)
1134
+ memcpy_async_wait<3>();
1135
+ else if (remaining_stages == 2)
1136
+ memcpy_async_wait<2>();
1137
+ else if (remaining_stages == 1)
1138
+ memcpy_async_wait<1>();
1139
+ else if (remaining_stages == 0)
1140
+ memcpy_async_wait<0>();
1141
+ }
1142
+ #endif
1143
+
1144
+ } // namespace mem_access
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/normalize_layer.h ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <cuda.h>
9
+ #include <cuda_fp16.h>
10
+ #include <stdio.h>
11
+ #include <fstream>
12
+ #include "custom_cuda_layers.h"
13
+
14
+ using namespace std;
15
+
16
+ template <typename T>
17
+ class Normalize_Layer {
18
+ public:
19
+ struct Config {
20
+ uint32_t batchSize;
21
+ uint32_t seqLength;
22
+ uint32_t hiddenDim;
23
+ float epsilon;
24
+ bool training;
25
+ bool useMean;
26
+ Config(uint32_t batch,
27
+ uint32_t seq,
28
+ uint32_t h,
29
+ float epsilon = 1e-12,
30
+ bool training = true,
31
+ bool useMean = true)
32
+ : batchSize(batch),
33
+ seqLength(seq),
34
+ hiddenDim(h),
35
+ epsilon(epsilon),
36
+ training(training),
37
+ useMean(useMean)
38
+ {
39
+ }
40
+ };
41
+
42
+ Normalize_Layer(Config config)
43
+ : config_(config), vars(nullptr), means(nullptr), vals_hat(nullptr)
44
+ {
45
+ }
46
+
47
+ ~Normalize_Layer() {}
48
+
49
+ void ForwardCheckpoint(int bsz, // batch * seq
50
+ T* vals,
51
+ const T* residual,
52
+ const T* gamma,
53
+ const T* betta,
54
+ cudaStream_t& stream,
55
+ bool preLayerNorm = false)
56
+ {
57
+ launch_bias_residual_layer_norm(vals,
58
+ residual,
59
+ gamma,
60
+ betta,
61
+ config_.epsilon,
62
+ bsz,
63
+ config_.hiddenDim,
64
+ stream,
65
+ preLayerNorm,
66
+ config_.training,
67
+ vars,
68
+ means);
69
+ }
70
+
71
+ void Forward(int bsz,
72
+ T* vals,
73
+ const T* residual,
74
+ const T* gamma,
75
+ const T* betta,
76
+ cudaStream_t& stream,
77
+ bool preLayerNorm = false)
78
+ {
79
+ launch_bias_residual_layer_norm(vals,
80
+ residual,
81
+ gamma,
82
+ betta,
83
+ config_.epsilon,
84
+ bsz,
85
+ config_.hiddenDim,
86
+ stream,
87
+ preLayerNorm,
88
+ config_.training,
89
+ vars);
90
+ }
91
+
92
+ void Backward(int bsz,
93
+ const T* out_grad,
94
+ const T* gamma,
95
+ T* gamma_grad,
96
+ T* betta_grad,
97
+ cudaStream_t stream[2],
98
+ T* inp_grad_out,
99
+ const T* norm_in = nullptr)
100
+ {
101
+ launch_layerNorm_backward(out_grad,
102
+ norm_in,
103
+ vars,
104
+ means,
105
+ gamma,
106
+ gamma_grad,
107
+ betta_grad,
108
+ inp_grad_out,
109
+ bsz,
110
+ config_.hiddenDim,
111
+ stream);
112
+ }
113
+
114
+ void Backward(int bsz,
115
+ const T* out_grad,
116
+ const T* gamma,
117
+ const T* betta,
118
+ T* gamma_grad,
119
+ T* betta_grad,
120
+ cudaStream_t stream[2],
121
+ T* inp_grad_out,
122
+ const T* norm_out)
123
+ {
124
+ launch_layerNorm_backward(out_grad,
125
+ norm_out,
126
+ vars,
127
+ gamma,
128
+ gamma_grad,
129
+ betta_grad,
130
+ inp_grad_out,
131
+ bsz,
132
+ config_.hiddenDim,
133
+ stream,
134
+ !config_.useMean,
135
+ betta);
136
+ }
137
+
138
+ void BackwardFusedAdd(int bsz,
139
+ const T* out_grad1,
140
+ const T* out_grad2,
141
+ const T* gamma,
142
+ T* gamma_grad,
143
+ T* betta_grad,
144
+ cudaStream_t stream[2],
145
+ T* inp_grad_out,
146
+ const T* norm_in = nullptr)
147
+ {
148
+ launch_layerNorm_backward_fused_add(out_grad1,
149
+ out_grad2,
150
+ norm_in,
151
+ vars,
152
+ means,
153
+ gamma,
154
+ gamma_grad,
155
+ betta_grad,
156
+ inp_grad_out,
157
+ bsz,
158
+ config_.hiddenDim,
159
+ stream);
160
+ }
161
+
162
+ void BackwardFusedAdd(int bsz,
163
+ const T* out_grad1,
164
+ const T* out_grad2,
165
+ const T* gamma,
166
+ const T* betta,
167
+ T* gamma_grad,
168
+ T* betta_grad,
169
+ cudaStream_t stream[2],
170
+ T* inp_grad_out,
171
+ const T* norm_out)
172
+ {
173
+ launch_layerNorm_backward_fused_add(out_grad1,
174
+ out_grad2,
175
+ norm_out,
176
+ vars,
177
+ gamma,
178
+ gamma_grad,
179
+ betta_grad,
180
+ inp_grad_out,
181
+ bsz,
182
+ config_.hiddenDim,
183
+ stream,
184
+ !config_.useMean,
185
+ betta);
186
+ }
187
+
188
+ inline bool UseMean() const { return config_.useMean; }
189
+
190
+ inline void SetVar(T* variance)
191
+ {
192
+ if (!variance) { throw std::runtime_error("Normalize variance is null."); }
193
+ vars = variance;
194
+ }
195
+
196
+ inline void SetMean(T* mean)
197
+ {
198
+ if (!mean) { throw std::runtime_error("Normalize mean is null."); }
199
+ means = mean;
200
+ }
201
+
202
+ private:
203
+ Config config_;
204
+ T* vars;
205
+ T* means;
206
+ T* vals_hat;
207
+ };
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/quantization.h ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <cuda_fp16.h>
9
+ #include "ds_kernel_utils.h"
10
+
11
+ namespace quantize {
12
+
13
+ enum class Type { Symmetric, Asymmetric };
14
+
15
+ struct PackedInt4 {
16
+ int8_t high : 4;
17
+ int8_t low : 4;
18
+ };
19
+
20
+ DS_HD_INLINE bool requires_offset(Type qType) { return qType == Type::Asymmetric; }
21
+
22
+ } // namespace quantize
23
+
24
+ void launch_quant(int8_t* output_data,
25
+ float* params,
26
+ const __half* input_data,
27
+ const int groups,
28
+ const int elems_per_group,
29
+ const int num_bits,
30
+ const quantize::Type quant_type,
31
+ cudaStream_t stream);
32
+
33
+ template <typename T>
34
+ void launch_dequantize_kernel(T* dequant_data,
35
+ const int8_t* q_data,
36
+ const float* q_params,
37
+ quantize::Type q_type,
38
+ int num_bits,
39
+ int elems_per_group,
40
+ int total_elems,
41
+ cudaStream_t stream);
42
+
43
+ void launch_swizzled_quant(int8_t* q_data,
44
+ float* q_scales,
45
+ const __half* input_data,
46
+ int num_bits,
47
+ quantize::Type q_type,
48
+ int groups,
49
+ int elems_per_group,
50
+ int pipelining,
51
+ int nodes,
52
+ int devices_per_node,
53
+ cudaStream_t stream);
54
+
55
+ void launch_dequant_reduce(int8_t* reduced_data,
56
+ float* reduced_scales,
57
+ const int8_t* input_data,
58
+ const float* input_scales,
59
+ int num_gpus,
60
+ int num_bits,
61
+ quantize::Type quant_type,
62
+ int out_groups,
63
+ int elems_per_out_group,
64
+ int elems_per_in_tensor,
65
+ int groups_per_in_tensor,
66
+ int elems_per_in_group,
67
+ cudaStream_t stream);
68
+
69
+ template <typename T>
70
+ void launch_fake_quantize_kernel(T* vals,
71
+ int total_count,
72
+ int group_num,
73
+ int num_bits,
74
+ cudaStream_t stream);
75
+ template <typename T>
76
+ void launch_sr_fake_quantize_kernel(T* vals,
77
+ int total_count,
78
+ int group_num,
79
+ int num_bits,
80
+ cudaStream_t stream);
81
+ template <typename T>
82
+ void launch_fake_quantize_kernel_asym(T* vals,
83
+ int total_count,
84
+ int group_num,
85
+ int num_bits,
86
+ cudaStream_t stream);
87
+ template <typename T>
88
+ void launch_sr_fake_quantize_kernel_asym(T* vals,
89
+ int total_count,
90
+ int group_num,
91
+ int num_bits,
92
+ cudaStream_t stream);
93
+
94
+ void launch_dequantize_int4_to_half_experimental(uint8_t* data_in,
95
+ half* data_out,
96
+ half* scale_buffer,
97
+ half* min_val_buffer,
98
+ int num_group,
99
+ int group_size,
100
+ cudaStream_t stream);
101
+
102
+ void launch_dequantize_int8_to_half_experimental(uint8_t* data_in,
103
+ half* data_out,
104
+ half* scale_buffer,
105
+ half* min_val_buffer,
106
+ int num_group,
107
+ int group_size,
108
+ cudaStream_t stream);
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/quantization_utils.h ADDED
@@ -0,0 +1,468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <cassert>
7
+ #include "conversion_utils.h"
8
+ #include "ds_kernel_utils.h"
9
+ #include "memory_access_utils.h"
10
+ #include "quantization.h"
11
+ #include "reduction_utils.h"
12
+
13
+ #pragma once
14
+
15
+ using rop = reduce::ROpType;
16
+
17
+ namespace quantize {
18
+ constexpr int granularity = 16;
19
+ constexpr int h_per_load = granularity / sizeof(__half);
20
+ constexpr int h2_per_load = granularity / sizeof(__half2);
21
+ constexpr int max_threads = 1024;
22
+
23
+ /*
24
+ Class to hold the quantization parameters for a given tensor.
25
+ Holds the implementation of the quantization operation.
26
+ */
27
+ template <Type qType, int numBits>
28
+ class Params {
29
+ public:
30
+ /*
31
+ Quantization implementation, supports
32
+ 1) 4 Bit
33
+ 2) 8 Bit
34
+ 3) Symmetric
35
+ 4) Asymmetric
36
+ Function Arguments :
37
+ val : The __half value to quantize.
38
+ */
39
+ DS_D_INLINE int8_t quantize(__half val);
40
+
41
+ template <typename T>
42
+ DS_D_INLINE T dequantize(int8_t val);
43
+
44
+ DS_D_INLINE void store(float* params, int group_index);
45
+
46
+ // Initialize from memory
47
+ DS_D_INLINE Params(const float* params, int group_index);
48
+ };
49
+
50
+ template <int numBits>
51
+ class Params<Type::Symmetric, numBits> {
52
+ public:
53
+ float scale;
54
+
55
+ DS_D_INLINE Params(float max)
56
+ {
57
+ if (max == 0) {
58
+ scale = 1.0;
59
+ } else {
60
+ scale = (1 << numBits) / (2 * max);
61
+ }
62
+ }
63
+
64
+ DS_D_INLINE int8_t quantize(__half val)
65
+ {
66
+ constexpr int32_t q_min = -(1 << (numBits - 1));
67
+ constexpr int32_t q_max = (1 << (numBits - 1)) - 1;
68
+
69
+ float val_f = conversion::to<float>(val) * scale;
70
+ int32_t data_i32 = conversion::to<int32_t>(val_f);
71
+ data_i32 = min(max(data_i32, q_min), q_max);
72
+ return (int8_t)data_i32;
73
+ }
74
+
75
+ template <typename T>
76
+ DS_D_INLINE T dequantize(int8_t val)
77
+ {
78
+ const float val_deq_f = conversion::to<float>(val) * scale;
79
+ return conversion::to<T>(val_deq_f);
80
+ }
81
+
82
+ DS_D_INLINE void store(float* params, int group_index)
83
+ {
84
+ const float store_scale = 1 / scale;
85
+ mem_access::store_global<sizeof(float)>(params + group_index, &store_scale);
86
+ }
87
+
88
+ DS_D_INLINE Params(const float* params, int group_index)
89
+ {
90
+ mem_access::load_global<sizeof(float)>(&scale, params + group_index);
91
+ }
92
+ };
93
+
94
+ template <int numBits>
95
+ class Params<Type::Asymmetric, numBits> {
96
+ public:
97
+ float scale;
98
+ float offset;
99
+
100
+ DS_D_INLINE Params(float max, float min)
101
+ {
102
+ if (max == min) {
103
+ scale = 1.0;
104
+ } else {
105
+ scale = ((1 << numBits)) / (max - min);
106
+ }
107
+ offset = (max + min) / 2;
108
+ }
109
+
110
+ DS_D_INLINE int8_t quantize(__half val)
111
+ {
112
+ constexpr int32_t q_min = -(1 << (numBits - 1));
113
+ constexpr int32_t q_max = (1 << (numBits - 1)) - 1;
114
+
115
+ float val_f = (conversion::to<float>(val) - offset) * scale;
116
+ int32_t data_i32 = conversion::to<int32_t>(val_f);
117
+ data_i32 = min(max(data_i32, q_min), q_max);
118
+ return (int8_t)data_i32;
119
+ }
120
+
121
+ template <typename T>
122
+ DS_D_INLINE T dequantize(int8_t val)
123
+ {
124
+ const float val_deq_f = ((conversion::to<float>(val)) * scale) + offset;
125
+ return conversion::to<__half>(val_deq_f);
126
+ }
127
+
128
+ DS_D_INLINE void store(float* params, int group_index)
129
+ {
130
+ // Codegen should turn this into stg.64
131
+ const float store_scale = 1 / scale;
132
+ mem_access::store_global<sizeof(float)>(params + 2 * group_index, &store_scale);
133
+ mem_access::store_global<sizeof(float)>(params + 2 * group_index + 1, &offset);
134
+ }
135
+
136
+ DS_D_INLINE Params(const float* params, int group_index)
137
+ {
138
+ // Codegen should turn this into ldg.64
139
+ mem_access::load_global<sizeof(float)>(&scale, params + 2 * group_index);
140
+ mem_access::load_global<sizeof(float)>(&offset, params + 2 * group_index + 1);
141
+ }
142
+ };
143
+
144
+ /*
145
+ Group stats tracks the necessary statistics about the quantized group
146
+ to abstract the particulars for the main loop.
147
+ */
148
+ template <Type qType>
149
+ class GroupStats {
150
+ public:
151
+ DS_D_INLINE void update(__half2 val);
152
+
153
+ DS_D_INLINE void reduce(cg::thread_block& tb, cg::thread_block_tile<hw_warp_size>& warp);
154
+ };
155
+
156
+ template <>
157
+ class GroupStats<Type::Symmetric> {
158
+ public:
159
+ // Symmetric quantization only tracks the maximum absolute value
160
+ __half2 cur_max;
161
+ float max;
162
+
163
+ /*
164
+ Technically, this would give bad results if there
165
+ are 0 values to process since the reduction would
166
+ give -inf instead of 0. We do not consider this
167
+ to be a reasonable edge case.
168
+ */
169
+ DS_D_INLINE GroupStats() { cur_max = reduce::init<rop::Max, __half2>(); }
170
+
171
+ /*
172
+ Updated the running absmax used to calculate params.
173
+ Function Arguments :
174
+ val : The __half2 value to update the running min and max with.
175
+ */
176
+ DS_D_INLINE void update(__half2 val)
177
+ {
178
+ cur_max = reduce::element<rop::Max>(cur_max, __habs2(val));
179
+ }
180
+
181
+ /*
182
+ Function to return calculated quantization params.
183
+ Template Arguments :
184
+ numBits - Number of bits in quantized element. int : 8 or 4
185
+ Function Arguments :
186
+ tb - Threadblock object. cg::thread_block
187
+ warp - Warp object. cg::thread_block_tile<hw_warp_size>
188
+ */
189
+ template <int numBits, int threads_per_group>
190
+ DS_D_INLINE Params<Type::Symmetric, numBits> get_params(
191
+ cg::thread_block& tb,
192
+ cg::thread_block_tile<hw_warp_size>& warp)
193
+ {
194
+ const float2 partial_max = conversion::to<float2>(cur_max);
195
+ float max = reduce::element<rop::Max>(partial_max.x, partial_max.y);
196
+
197
+ reduce::partitioned_block<rop::Max, threads_per_group>(tb, warp, max);
198
+ Params<Type::Symmetric, numBits> params(max);
199
+
200
+ return params;
201
+ }
202
+ };
203
+
204
+ template <>
205
+ class GroupStats<Type::Asymmetric> {
206
+ public:
207
+ __half2 cur_max;
208
+ __half2 cur_min;
209
+
210
+ /*
211
+ Initialize cur_max to -inf, cur_min to inf since
212
+ we are doing a true range analysis.
213
+ */
214
+ DS_D_INLINE GroupStats()
215
+ {
216
+ cur_max = reduce::init<rop::Max, __half2>();
217
+ cur_min = reduce::init<rop::Min, __half2>();
218
+ }
219
+
220
+ /*
221
+ Updated the running min and max used to calculate params.
222
+ Function Arguments :
223
+ val : The __half2 value to update the running min and max with.
224
+ */
225
+ DS_D_INLINE void update(__half2 val)
226
+ {
227
+ cur_max = reduce::element<rop::Max>(cur_max, val);
228
+ cur_min = reduce::element<rop::Min>(cur_min, val);
229
+ }
230
+
231
+ /*
232
+ Function to return calculated quantization params.
233
+ Template Arguments :
234
+ numBits - Number of bits in quantized element. int : 8 or 4
235
+ Function Arguments :
236
+ tb - Threadblock object. cg::thread_block
237
+ warp - Warp object. cg::thread_block_tile<hw_warp_size>
238
+ */
239
+ template <int numBits, int threads_per_group>
240
+ DS_D_INLINE Params<Type::Asymmetric, numBits> get_params(
241
+ cg::thread_block& tb,
242
+ cg::thread_block_tile<hw_warp_size>& warp)
243
+ {
244
+ const float2 partial_max = conversion::to<float2>(cur_max);
245
+ float max = reduce::element<rop::Max>(partial_max.x, partial_max.y);
246
+
247
+ const float2 partial_min = conversion::to<float2>(cur_min);
248
+ float min = reduce::element<rop::Min>(partial_min.x, partial_min.y);
249
+
250
+ reduce::partitioned_block<rop::Max, rop::Min, threads_per_group>(tb, warp, max, min);
251
+
252
+ Params<Type::Asymmetric, numBits> params(max, min);
253
+
254
+ return params;
255
+ }
256
+ };
257
+
258
+ /*
259
+ Device function that quantizes 16 bytes of __half type input data.
260
+ Template Arguments :
261
+ numBits - Number of bits in quantized element. int : 8 or 4
262
+ qType - Type of quantization to perform. Type::Symmetric or Type::Asymmetric
263
+ Function Arguments :
264
+ local_output - Pointer to local memory to store quantized data. int8_t*
265
+ data - Pointer to input data. __half*
266
+ Params - Parameters for quantization. Params<qType, numBits>
267
+ */
268
+ template <int numBits, Type qType>
269
+ DS_D_INLINE void _chunk(int8_t* local_output, const __half* data, Params<qType, numBits> q_params);
270
+
271
+ /*
272
+ Device function that quantizes 16 bytes of __half2 type input data.
273
+ Template Arguments :
274
+ numBits - Number of bits in quantized element. int : 8 or 4
275
+ qType - Type of quantization to perform. Type::Symmetric or Type::Asymmetric
276
+ Function Arguments :
277
+ local_output - Pointer to local memory to store quantized data. int8_t*
278
+ data - Pointer to input data. __half2*
279
+ Params - Parameters for quantization. Params<qType, numBits>
280
+ */
281
+ template <int numBits, Type qType>
282
+ DS_D_INLINE void _chunk(int8_t* local_output, const __half2* data, Params<qType, numBits> q_params);
283
+
284
+ /*
285
+ Helper function to do serial reduction on register-file arrays.
286
+ Template Arguments :
287
+ qType - Type of quantization to perform. Type::Symmetric or Type::Asymmetric
288
+ numChunks - Number of bits in quantized element. int : 8 or 4
289
+ Function Arguments :
290
+ local_buffer - Pointer memory with input half2 data to be quantized.
291
+ */
292
+ template <Type qType, int numChunks>
293
+ DS_D_INLINE GroupStats<qType> _local_serial_reduce(__half2* local_buffer);
294
+
295
+ /*
296
+ The main loop of the kernel that quantizes array in local memory of __half2 type input data, when
297
+ Quantization parameters are pre-computed.
298
+ Template Arguments :
299
+ qType - Type of quantization to perform. Type::Symmetric or Type::Asymmetric
300
+ numBits - Number of bits in quantized element. int : 8 or 4
301
+ numChunks - Number of chunks(16 bytes of Input data). int : 8 or 4
302
+ Function Arguments :
303
+ local_buffer - Pointer memory with input half2 data to be quantized.
304
+ scales - Pointer to output scales.
305
+ offsets - Pointer to output offsets.
306
+ output_data - Pointer to output data.
307
+ elems_per_group - Number of elements to quantize in a group.
308
+ q_params - Quantization parameters.
309
+ */
310
+ template <int numBits, Type qType, int numChunks, int threads_per_group, int max_threads>
311
+ DS_D_INLINE void local_array(cg::thread_block& tb,
312
+ cg::thread_block_tile<hw_warp_size>& warp,
313
+ __half2* local_buffer,
314
+ float* __restrict__ scales,
315
+ float* __restrict__ offsets,
316
+ int8_t* __restrict__ output_data,
317
+ const int& elems_per_group,
318
+ const int& groups,
319
+ Params<qType, numBits> q_params);
320
+
321
+ /*
322
+ The main loop of the kernel that quantizes array in local memory of __half2 type input data.
323
+ This function computes quantization parameters for each group.
324
+ Template Arguments :
325
+ qType - Type of quantization to perform. Type::Symmetric or Type::Asymmetric
326
+ numBits - Number of bits in quantized element. int : 8 or 4
327
+ numChunks - Number of chunks(16 bytes of Input data). int : 8 or 4
328
+ Function Arguments :
329
+ local_buffer - Pointer memory with input half2 data to be quantized.
330
+ scales - Pointer to output scales.
331
+ offsets - Pointer to output offsets.
332
+ output_data - Pointer to output data.
333
+ elems_per_group - Number of elements to quantize in a group.
334
+ */
335
+ template <Type qType, int numBits, int numChunks, int threads_per_group, int max_threads>
336
+ __device__ void local_array(__half2* local_buffer,
337
+ float* __restrict__ scales,
338
+ float* __restrict__ offsets,
339
+ int8_t* __restrict__ output_data,
340
+ const int& elems_per_group,
341
+ const int& groups);
342
+
343
+ template <int numBits, Type qType>
344
+ DS_D_INLINE void _chunk(int8_t* local_output, const __half* data, Params<qType, numBits> q_params)
345
+ {
346
+ constexpr int32_t elems = 16 / sizeof(__half);
347
+ constexpr int32_t num_elems_packed = 8 / numBits;
348
+
349
+ #pragma unroll
350
+ for (int i = 0, oi = 0; i < elems; i += num_elems_packed, oi++) {
351
+ if (num_elems_packed == 1) {
352
+ // TODO(cmikeh2): refactor to use conversion utils
353
+ local_output[i] = q_params.quantize(data[i]);
354
+ } else if (num_elems_packed == 2) {
355
+ int8_t data_i8_1 = q_params.quantize(data[i]);
356
+ int8_t data_i8_2 = q_params.quantize(data[i + 1]);
357
+ auto data_i8 = PackedInt4{data_i8_2, data_i8_1};
358
+ local_output[oi] = *((int8_t*)(&data_i8));
359
+ }
360
+ }
361
+ }
362
+
363
+ template <int numBits, Type qType>
364
+ DS_D_INLINE void _chunk(int8_t* local_output, const __half2* data, Params<qType, numBits> q_params)
365
+ {
366
+ const __half* data_cast = reinterpret_cast<const __half*>(data);
367
+ _chunk<numBits>(local_output, data_cast, q_params);
368
+ }
369
+
370
+ template <Type qType, int numChunks>
371
+ DS_D_INLINE GroupStats<qType> _local_serial_reduce(__half2* local_buffer)
372
+ {
373
+ GroupStats<qType> stats;
374
+ #pragma unroll
375
+ for (int i = 0; i < numChunks * h2_per_load; i++) { stats.update(local_buffer[i]); }
376
+
377
+ return stats;
378
+ }
379
+
380
+ template <Type qType, int numBits, int numChunks, int threads_per_group, int max_threads>
381
+ DS_D_INLINE void local_array(cg::thread_block& tb,
382
+ cg::thread_block_tile<hw_warp_size>& warp,
383
+ __half2* local_buffer,
384
+ float* __restrict__ global_params,
385
+ int8_t* __restrict__ output_data,
386
+ const int& elems_per_group,
387
+ const int& groups,
388
+ Params<qType, numBits> q_params)
389
+ {
390
+ constexpr int num_ele_int8 = 8 / numBits;
391
+ constexpr int num_int8_out = quantize::h_per_load / num_ele_int8;
392
+
393
+ // Indexing offsets
394
+ const int block_num =
395
+ (tb.group_index().x * max_threads / threads_per_group) + tb.thread_index().y;
396
+ const int block_offset = block_num * elems_per_group;
397
+ const int elem_offset = tb.thread_index().x * quantize::h_per_load;
398
+ const int base_offset = (block_offset + elem_offset) / num_ele_int8;
399
+ const int stride = tb.size() * quantize::h_per_load / num_ele_int8;
400
+
401
+ int8_t local_output[num_int8_out];
402
+
403
+ if (tb.thread_index().x == 0 && block_num < groups) {
404
+ q_params.store(
405
+ global_params,
406
+ (tb.group_index().x * max_threads / threads_per_group) + tb.thread_index().y);
407
+ }
408
+ #pragma unroll
409
+ for (int i = 0; i < numChunks; i++) {
410
+ if (elem_offset + i * stride * num_ele_int8 < elems_per_group && block_num < groups) {
411
+ quantize::_chunk<numBits, qType>(
412
+ local_output, local_buffer + i * quantize::h2_per_load, q_params);
413
+ mem_access::store_global<num_int8_out>(output_data + (base_offset + i * stride),
414
+ local_output);
415
+ }
416
+ }
417
+ }
418
+
419
+ template <Type qType, int numBits, int numChunks, int threads_per_group, int max_threads>
420
+ DS_D_INLINE void local_array(cg::thread_block& tb,
421
+ cg::thread_block_tile<hw_warp_size>& warp,
422
+ __half* local_buffer,
423
+ float* __restrict__ global_params,
424
+ int8_t* __restrict__ output_data,
425
+ const int& elems_per_group,
426
+ const int& groups,
427
+ Params<qType, numBits> q_params)
428
+ {
429
+ __half2* local_buffer_h2 = reinterpret_cast<__half2*>(local_buffer);
430
+
431
+ quantize::local_array<qType, numBits, numChunks, threads_per_group, max_threads>(
432
+ tb, warp, local_buffer, global_params, output_data, elems_per_group, groups, q_params);
433
+ }
434
+
435
+ template <Type qType,
436
+ int numBits,
437
+ int numChunks,
438
+ int threads_per_group = max_threads,
439
+ int max_threads = 256>
440
+ __device__ void local_array(__half2* local_buffer,
441
+ float* __restrict__ global_params,
442
+ int8_t* __restrict__ output_data,
443
+ const int& elems_per_group,
444
+ const int& groups)
445
+ {
446
+ cg::thread_block tb = cg::this_thread_block();
447
+ cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
448
+
449
+ auto group_stats = _local_serial_reduce<qType, numChunks>(local_buffer);
450
+ auto params = group_stats.template get_params<numBits, threads_per_group>(tb, warp);
451
+
452
+ quantize::local_array<qType, numBits, numChunks, threads_per_group, max_threads>(
453
+ tb, warp, local_buffer, global_params, output_data, elems_per_group, groups, params);
454
+ }
455
+
456
+ template <Type qType, int numBits, int numChunks, int threads_per_group, int max_threads>
457
+ __device__ void local_array(__half* local_buffer,
458
+ float* __restrict__ global_params,
459
+ int8_t* __restrict__ output_data,
460
+ const int& elems_per_group,
461
+ const int& groups)
462
+ {
463
+ __half2* local_buffer_h2 = reinterpret_cast<__half2*>(local_buffer);
464
+ quantize::local_array<qType, numBits, numChunks, threads_per_group, max_threads>(
465
+ local_buffer_h2, global_params, output_data, elems_per_group, groups);
466
+ }
467
+
468
+ } // namespace quantize
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/quantizer.h ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #ifdef __HIP_PLATFORM_AMD__
9
+ #include <hip/hip_cooperative_groups.h>
10
+ #else
11
+ #include <cooperative_groups.h>
12
+ #endif
13
+
14
+ #include <cuda.h>
15
+ #include <cuda_fp16.h>
16
+ #include <stdio.h>
17
+ #include <stdlib.h>
18
+ #include <cassert>
19
+ #include <iostream>
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/reduction_utils.h ADDED
@@ -0,0 +1,826 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include "conversion_utils.h"
9
+ #include "ds_kernel_utils.h"
10
+ #include "memory_access_utils.h"
11
+
12
+ namespace cg = cooperative_groups;
13
+
14
+ namespace reduce {
15
+
16
+ enum class ROpType {
17
+ // Addition
18
+ Add,
19
+
20
+ // Maximum reduction
21
+ Max,
22
+
23
+ // Minimum reduction
24
+ Min,
25
+ };
26
+
27
+ constexpr int max_threads = 1024;
28
+ constexpr int max_warps = max_threads / hw_warp_size;
29
+
30
+ /*
31
+ High level API. The API takes in a set of operations and variables
32
+ and performs that reduction operation on that variable. The reductions
33
+ of each of the arguments are completely independent of each other (
34
+ i.e., the val1-op1 combination has no impact on val2-op2).
35
+
36
+ Example usage:
37
+ ``` cpp
38
+ float max_val;
39
+ float min_val;
40
+ reduce::block<rop::Max, rop::Min>(tb, warp, max_val, min_val);
41
+ ```
42
+
43
+ TODO(cmikeh2): In theory, we might be able to do this sequentially with
44
+ device functions and rely on the assembler correctly behaving. My initial
45
+ instinct is this won't work, but if it does it would reduce implementation
46
+ cost significantly.
47
+
48
+ TODO(cmikeh2): We need to support sub-block reductions. The warp intrinsic
49
+ currently supports this (more incidentally than anything else). It is not
50
+ uncommon in something like softmax or a fused attention kernel to map multiple
51
+ reductions to a thread block, but each reduction itself is only scoped
52
+ to part of the threads (i.e block size = 512, 128 threads per reduction).
53
+ */
54
+ template <ROpType Op, int warp_bound = max_warps>
55
+ DS_D_INLINE void block(cg::thread_block& tb, cg::thread_block_tile<hw_warp_size>& warp, float& val);
56
+
57
+ template <ROpType Op1, ROpType Op2, int warp_bound = max_warps>
58
+ DS_D_INLINE void block(cg::thread_block& tb,
59
+ cg::thread_block_tile<hw_warp_size>& warp,
60
+ float& val1,
61
+ float& val2);
62
+
63
+ template <ROpType Op1, ROpType Op2, ROpType Op3, int warp_bound = max_warps>
64
+ DS_D_INLINE void block(cg::thread_block& tb,
65
+ cg::thread_block_tile<hw_warp_size>& warp,
66
+ float& val1,
67
+ float& val2,
68
+ float& val3);
69
+
70
+ template <ROpType Op1, ROpType Op2, ROpType Op3, ROpType Op4, int warp_bound = max_warps>
71
+ DS_D_INLINE void block(cg::thread_block& tb,
72
+ cg::thread_block_tile<hw_warp_size>& warp,
73
+ float& val1,
74
+ float& val2,
75
+ float& val3,
76
+ float& val4);
77
+
78
+ /*
79
+ The partitioned block is a special case of the above where in the warps of a threadblock are
80
+ partitioned into separate independent reductions. For example, I might have an 8 warp thread block
81
+ in which each pair of warps is processing an independent piece of data. I would then reduce that
82
+ data with the something like the following:
83
+ ``` cpp
84
+ float max_val;
85
+ reduce::partitioned_block<rop::Max, 2>(tb, warp, max_val);
86
+ ```
87
+ After which, each pair of warps would have coherent data with each other. Note, this API will not
88
+ provide correct results if the number of warps per partition is not a power of 2.
89
+ */
90
+ template <ROpType Op, int num_threads>
91
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
92
+ cg::thread_block_tile<hw_warp_size>& warp,
93
+ float& val);
94
+
95
+ template <ROpType Op1, ROpType Op2, int num_threads>
96
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
97
+ cg::thread_block_tile<hw_warp_size>& warp,
98
+ float& val1,
99
+ float& val2);
100
+
101
+ template <ROpType Op1, ROpType Op2, ROpType Op3, int num_threads>
102
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
103
+ cg::thread_block_tile<hw_warp_size>& warp,
104
+ float& val1,
105
+ float& val2,
106
+ float& val3);
107
+
108
+ template <ROpType Op1, ROpType Op2, ROpType Op3, ROpType Op4, int num_threads>
109
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
110
+ cg::thread_block_tile<hw_warp_size>& warp,
111
+ float& val1,
112
+ float& val2,
113
+ float& val3,
114
+ float& val4);
115
+
116
+ /*
117
+ Single element reduction primitives. Used inside serial collection
118
+ loops.
119
+
120
+ Example usage:
121
+ using rop = reduce::OpType;
122
+ float min = init<rop::Min>();
123
+ for (int i = 0; i < 4; i++) {
124
+ min = reduce::element<rop::Min>(min, data[i]);
125
+ }
126
+ */
127
+
128
+ template <ROpType Op, typename T>
129
+ DS_D_INLINE T element(const T lhs, const T rhs);
130
+
131
+ template <ROpType OType, typename T = float>
132
+ DS_D_INLINE T init();
133
+
134
+ /********************** Internal reduction APIs **********************/
135
+
136
+ /*
137
+ Single element "reductions". TODO(cmikeh2): this sort of "op" concept
138
+ should be refactored into its own implementation at some point. This interface
139
+ may be easily expanded for new types/operations, but the typical reductions
140
+ we need are covered with min/max/add on float.
141
+
142
+ NOTE: there is no mean reduction because that relies on knowledge of how
143
+ many values were already reduced into each scalar. Implementing this on top
144
+ of reduce should be straightforward (can just wrap the sum reduction) and
145
+ would be a good extension of the header.
146
+ */
147
+
148
+ DS_D_INLINE int _warp_rank()
149
+ {
150
+ const int thread_rank =
151
+ threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y;
152
+ return thread_rank / hw_warp_size;
153
+ }
154
+
155
+ /* Float element reduce implementations */
156
+ template <>
157
+ DS_D_INLINE float element<ROpType::Add>(const float lhs, const float rhs)
158
+ {
159
+ return lhs + rhs;
160
+ }
161
+
162
+ template <>
163
+ DS_D_INLINE double element<ROpType::Add>(const double lhs, const double rhs)
164
+ {
165
+ return lhs + rhs;
166
+ }
167
+
168
+ template <>
169
+ DS_D_INLINE float element<ROpType::Max>(const float lhs, const float rhs)
170
+ {
171
+ return fmaxf(lhs, rhs);
172
+ }
173
+
174
+ template <>
175
+ DS_D_INLINE float element<ROpType::Min>(const float lhs, const float rhs)
176
+ {
177
+ return fminf(lhs, rhs);
178
+ }
179
+
180
+ /* __half element reduce implementation */
181
+ template <>
182
+ DS_D_INLINE __half element<ROpType::Add>(const __half lhs, const __half rhs)
183
+ {
184
+ return lhs + rhs;
185
+ }
186
+
187
+ template <>
188
+ DS_D_INLINE __half element<ROpType::Max>(const __half lhs, const __half rhs)
189
+ {
190
+ #if __CUDA_ARCH__ >= 800
191
+ // Intrinsic limited to Ampere + newer
192
+ return __hmax(lhs, rhs);
193
+ #else
194
+ return (lhs > rhs) ? lhs : rhs;
195
+ #endif
196
+ }
197
+
198
+ #ifdef BF16_AVAILABLE
199
+ template <>
200
+ DS_D_INLINE __nv_bfloat16 element<ROpType::Max>(const __nv_bfloat16 lhs, const __nv_bfloat16 rhs)
201
+ {
202
+ #if __CUDA_ARCH__ >= 800
203
+ // Intrinsic limited to Ampere + newer
204
+ return __hmax(lhs, rhs);
205
+ #else
206
+ return (lhs > rhs) ? lhs : rhs;
207
+ #endif
208
+ }
209
+ #endif
210
+
211
+ template <>
212
+ DS_D_INLINE __half element<ROpType::Min>(const __half lhs, const __half rhs)
213
+ {
214
+ #if __CUDA_ARCH__ >= 800
215
+ // Intrinsic limited to Ampere + newer
216
+ return __hmin(lhs, rhs);
217
+ #else
218
+ return (lhs < rhs) ? lhs : rhs;
219
+ #endif
220
+ }
221
+
222
+ /* __half2 element reduce implementation */
223
+ template <>
224
+ DS_D_INLINE __half2 element<ROpType::Add>(const __half2 lhs, const __half2 rhs)
225
+ {
226
+ return lhs + rhs;
227
+ }
228
+
229
+ template <>
230
+ DS_D_INLINE __half2 element<ROpType::Max>(const __half2 lhs, const __half2 rhs)
231
+ {
232
+ #if __CUDA_ARCH__ >= 800
233
+ return __hmax2(lhs, rhs);
234
+ #else
235
+ __half2 ret_val;
236
+ ret_val.x = (lhs.x > rhs.x) ? lhs.x : rhs.x;
237
+ ret_val.y = (lhs.y > rhs.y) ? lhs.y : rhs.y;
238
+ return ret_val;
239
+ #endif
240
+ }
241
+
242
+ #ifdef BF16_AVAILABLE
243
+ template <>
244
+ DS_D_INLINE __nv_bfloat162 element<ROpType::Max>(const __nv_bfloat162 lhs, const __nv_bfloat162 rhs)
245
+ {
246
+ #if __CUDA_ARCH__ >= 800
247
+ return __hmax2(lhs, rhs);
248
+ #else
249
+ __nv_bfloat162 ret_val;
250
+ ret_val.x = (lhs.x > rhs.x) ? lhs.x : rhs.x;
251
+ ret_val.y = (lhs.y > rhs.y) ? lhs.y : rhs.y;
252
+ return ret_val;
253
+ #endif
254
+ }
255
+ #endif
256
+
257
+ template <>
258
+ DS_D_INLINE __half2 element<ROpType::Min>(const __half2 lhs, const __half2 rhs)
259
+ {
260
+ #if __CUDA_ARCH__ >= 800
261
+ return __hmin2(lhs, rhs);
262
+ #else
263
+ __half2 ret_val;
264
+ ret_val.x = (lhs.x < rhs.x) ? lhs.x : rhs.x;
265
+ ret_val.y = (lhs.y < rhs.y) ? lhs.y : rhs.y;
266
+ return ret_val;
267
+ #endif
268
+ }
269
+
270
+ template <>
271
+ DS_D_INLINE int32_t element<ROpType::Add>(const int32_t lhs, const int32_t rhs)
272
+ {
273
+ return lhs + rhs;
274
+ }
275
+
276
+ template <>
277
+ DS_D_INLINE int32_t element<ROpType::Max>(const int32_t lhs, const int32_t rhs)
278
+ {
279
+ return (lhs > rhs) ? lhs : rhs;
280
+ }
281
+
282
+ template <>
283
+ DS_D_INLINE int32_t element<ROpType::Min>(const int32_t lhs, const int32_t rhs)
284
+ {
285
+ return (lhs < rhs) ? lhs : rhs;
286
+ }
287
+
288
+ template <>
289
+ DS_D_INLINE uint32_t element<ROpType::Add>(const uint32_t lhs, const uint32_t rhs)
290
+ {
291
+ return lhs + rhs;
292
+ }
293
+
294
+ template <>
295
+ DS_D_INLINE uint32_t element<ROpType::Max>(const uint32_t lhs, const uint32_t rhs)
296
+ {
297
+ return (lhs > rhs) ? lhs : rhs;
298
+ }
299
+
300
+ template <>
301
+ DS_D_INLINE uint32_t element<ROpType::Min>(const uint32_t lhs, const uint32_t rhs)
302
+ {
303
+ return (lhs < rhs) ? lhs : rhs;
304
+ }
305
+
306
+ template <>
307
+ DS_D_INLINE int64_t element<ROpType::Add>(const int64_t lhs, const int64_t rhs)
308
+ {
309
+ return lhs + rhs;
310
+ }
311
+
312
+ template <>
313
+ DS_D_INLINE int64_t element<ROpType::Max>(const int64_t lhs, const int64_t rhs)
314
+ {
315
+ return (lhs > rhs) ? lhs : rhs;
316
+ }
317
+
318
+ template <>
319
+ DS_D_INLINE int64_t element<ROpType::Min>(const int64_t lhs, const int64_t rhs)
320
+ {
321
+ return (lhs < rhs) ? lhs : rhs;
322
+ }
323
+
324
+ /*
325
+ Reduction initialization primitives
326
+ */
327
+ template <>
328
+ DS_D_INLINE float init<ROpType::Add>()
329
+ {
330
+ return 0.0f;
331
+ }
332
+ template <>
333
+ DS_D_INLINE double init<ROpType::Add>()
334
+ {
335
+ return (double)0.0f;
336
+ }
337
+
338
+ template <>
339
+ DS_D_INLINE float init<ROpType::Min>()
340
+ {
341
+ // Positive infinity
342
+ return INFINITY;
343
+ }
344
+
345
+ template <>
346
+ DS_D_INLINE float init<ROpType::Max>()
347
+ {
348
+ // Negative infinity
349
+ return -INFINITY;
350
+ }
351
+
352
+ template <>
353
+ DS_D_INLINE __half init<ROpType::Add>()
354
+ {
355
+ constexpr __half_raw zero = {0x0000};
356
+ return __half(zero);
357
+ }
358
+
359
+ template <>
360
+ DS_D_INLINE __half init<ROpType::Min>()
361
+ {
362
+ constexpr __half_raw inf = {0x7C00};
363
+ return __half(inf);
364
+ }
365
+
366
+ template <>
367
+ DS_D_INLINE __half init<ROpType::Max>()
368
+ {
369
+ constexpr __half_raw neg_inf = {0xFC00};
370
+ return __half(neg_inf);
371
+ }
372
+
373
+ #ifdef BF16_AVAILABLE
374
+ template <>
375
+ DS_D_INLINE __nv_bfloat16 init<ROpType::Max>()
376
+ {
377
+ constexpr __nv_bfloat16_raw neg_inf = {0xFF80};
378
+ return __nv_bfloat16(neg_inf);
379
+ }
380
+ #endif
381
+
382
+ template <>
383
+ DS_D_INLINE __half2 init<ROpType::Add>()
384
+ {
385
+ #ifdef __HIP_PLATFORM_AMD__
386
+ return __half2{_Float16_2{0x0000, 0x0000}};
387
+ #else
388
+ constexpr __half2_raw zero = {0x0000, 0x0000};
389
+ return __half2(zero);
390
+ #endif
391
+ }
392
+
393
+ template <>
394
+ DS_D_INLINE __half2 init<ROpType::Min>()
395
+ {
396
+ #ifdef __HIP_PLATFORM_AMD__
397
+ return __half2{_Float16_2{0x7C00, 0x7C00}};
398
+ #else
399
+ constexpr __half2_raw inf = {0x7C00, 0x7C00};
400
+ return __half2(inf);
401
+ #endif
402
+ }
403
+
404
+ template <>
405
+ DS_D_INLINE __half2 init<ROpType::Max>()
406
+ {
407
+ #ifdef __HIP_PLATFORM_AMD__
408
+ return __half2{_Float16_2{0xFC00, 0xFC00}};
409
+ #else
410
+ constexpr __half2_raw neg_inf = {0xFC00, 0xFC00};
411
+ return __half2(neg_inf);
412
+ #endif
413
+ }
414
+
415
+ template <>
416
+ DS_D_INLINE int32_t init<ROpType::Add>()
417
+ {
418
+ return 0;
419
+ }
420
+
421
+ template <>
422
+ DS_D_INLINE int32_t init<ROpType::Min>()
423
+ {
424
+ return 0x7FFFFFFF;
425
+ }
426
+
427
+ template <>
428
+ DS_D_INLINE int32_t init<ROpType::Max>()
429
+ {
430
+ return 0x80000000;
431
+ }
432
+
433
+ template <>
434
+ DS_D_INLINE uint32_t init<ROpType::Add>()
435
+ {
436
+ return 0;
437
+ }
438
+
439
+ template <>
440
+ DS_D_INLINE uint32_t init<ROpType::Min>()
441
+ {
442
+ return 0xFFFFFFFF;
443
+ }
444
+
445
+ template <>
446
+ DS_D_INLINE uint32_t init<ROpType::Max>()
447
+ {
448
+ return 0;
449
+ }
450
+
451
+ template <>
452
+ DS_D_INLINE int64_t init<ROpType::Add>()
453
+ {
454
+ return 0;
455
+ }
456
+
457
+ template <>
458
+ DS_D_INLINE int64_t init<ROpType::Min>()
459
+ {
460
+ return 0x7FFFFFFFFFFFFFFF;
461
+ }
462
+
463
+ template <>
464
+ DS_D_INLINE int64_t init<ROpType::Max>()
465
+ {
466
+ return 0x8000000000000000;
467
+ }
468
+
469
+ template <>
470
+ DS_D_INLINE uint64_t init<ROpType::Add>()
471
+ {
472
+ return 0;
473
+ }
474
+
475
+ template <>
476
+ DS_D_INLINE uint64_t init<ROpType::Min>()
477
+ {
478
+ return 0xFFFFFFFFFFFFFFFF;
479
+ }
480
+
481
+ template <>
482
+ DS_D_INLINE uint64_t init<ROpType::Max>()
483
+ {
484
+ return 0;
485
+ }
486
+
487
+ template <ROpType Op, typename T>
488
+ DS_D_INLINE void init(T* data)
489
+ {
490
+ data[0] = init<Op, T>();
491
+ }
492
+
493
+ template <ROpType Op1, ROpType Op2, typename T>
494
+ DS_D_INLINE void init(T* data)
495
+ {
496
+ data[0] = init<Op1, T>();
497
+ data[1] = init<Op2, T>();
498
+ }
499
+
500
+ template <ROpType Op1, ROpType Op2, ROpType Op3, typename T>
501
+ DS_D_INLINE void init(T* data)
502
+ {
503
+ data[0] = init<Op1, T>();
504
+ data[1] = init<Op2, T>();
505
+ data[2] = init<Op3, T>();
506
+ }
507
+
508
+ template <ROpType Op1, ROpType Op2, ROpType Op3, ROpType Op4, typename T>
509
+ DS_D_INLINE void init(T* data)
510
+ {
511
+ data[0] = init<Op1, T>();
512
+ data[1] = init<Op2, T>();
513
+ data[2] = init<Op3, T>();
514
+ data[3] = init<Op4, T>();
515
+ }
516
+
517
+ /*
518
+ Warp reduction primitives
519
+
520
+ `reduction_width` is an unsafe template parameter, that is that
521
+ when using `reduction_width` < hw_warp_size the warp is partitioned
522
+ into `hw_warp_size` / `reduction_width` groups of partial sums.
523
+
524
+ If someone can figure out how to use variadic templates in a reasonable way
525
+ here (fold is C++17 only and I don't think helps and recursion feels like
526
+ huge overkill that harms readability) that would be wonderful.
527
+ */
528
+
529
+ template <typename T, ROpType Op, int reduce_width = hw_warp_size>
530
+ DS_D_INLINE void _warp(cg::thread_block_tile<hw_warp_size>& warp, T* data)
531
+ {
532
+ #pragma unroll
533
+ for (int i = 1; i < reduce_width; i *= 2) {
534
+ data[0] = element<Op>(data[0], warp.shfl_xor(data[0], i));
535
+ }
536
+ }
537
+
538
+ template <typename T, ROpType Op1, ROpType Op2, int reduce_width = hw_warp_size>
539
+ DS_D_INLINE void _warp(cg::thread_block_tile<hw_warp_size>& warp, T* data)
540
+ {
541
+ #pragma unroll
542
+ for (int i = 1; i < reduce_width; i *= 2) {
543
+ data[0] = element<Op1>(data[0], warp.shfl_xor(data[0], i));
544
+ data[1] = element<Op2>(data[1], warp.shfl_xor(data[1], i));
545
+ }
546
+ }
547
+
548
+ template <typename T, ROpType Op1, ROpType Op2, ROpType Op3, int reduce_width = hw_warp_size>
549
+ DS_D_INLINE void _warp(cg::thread_block_tile<hw_warp_size>& warp, T* data)
550
+ {
551
+ #pragma unroll
552
+ for (int i = 1; i < reduce_width; i *= 2) {
553
+ data[0] = element<Op1>(data[0], warp.shfl_xor(data[0], i));
554
+ data[1] = element<Op2>(data[1], warp.shfl_xor(data[1], i));
555
+ data[2] = element<Op3>(data[2], warp.shfl_xor(data[2], i));
556
+ }
557
+ }
558
+
559
+ template <typename T,
560
+ ROpType Op1,
561
+ ROpType Op2,
562
+ ROpType Op3,
563
+ ROpType Op4,
564
+ int reduce_width = hw_warp_size>
565
+ DS_D_INLINE void _warp(cg::thread_block_tile<hw_warp_size>& warp, T* data)
566
+ {
567
+ #pragma unroll
568
+ for (int i = 1; i < reduce_width; i *= 2) {
569
+ data[0] = element<Op1>(data[0], warp.shfl_xor(data[0], i));
570
+ data[1] = element<Op2>(data[1], warp.shfl_xor(data[1], i));
571
+ data[2] = element<Op3>(data[2], warp.shfl_xor(data[2], i));
572
+ data[3] = element<Op4>(data[3], warp.shfl_xor(data[3], i));
573
+ }
574
+ }
575
+
576
+ /*
577
+ Implementation for primary block reduction that serves both `block` and
578
+ `partitioned_block`.
579
+
580
+ Total warps refers to the reduction width of the reduction, not
581
+ the number of warps in the block (which may exceed that
582
+ if the block is partitioned or if we do a conservative bound at
583
+ compile time).
584
+ */
585
+ template <typename T, int total_warps, ROpType... Ops>
586
+ DS_D_INLINE void _block(cg::thread_block& tb,
587
+ cg::thread_block_tile<hw_warp_size>& warp_arg,
588
+ T* data)
589
+ {
590
+ constexpr int elems = sizeof...(Ops);
591
+ constexpr int bytes = sizeof(T);
592
+ // Unused when `partition_size == 1` or total_warps == 1
593
+ __shared__ T reduce_buffer[max_warps * elems];
594
+
595
+ #ifdef __HIP_PLATFORM_AMD__
596
+ const int total_threads = blockDim.x * blockDim.y * blockDim.z;
597
+ const int running_warps = total_threads / hw_warp_size;
598
+ #else
599
+ const int running_warps = warp_arg.meta_group_size();
600
+ #endif
601
+
602
+ // Always perform warp-scope reduction
603
+ _warp<T, Ops...>(warp_arg, data);
604
+
605
+ // If max_warps == 1 let's skip the runtime check
606
+ if (total_warps != 1) {
607
+ if (warp_arg.thread_rank() == 0) {
608
+ #pragma unroll
609
+ for (int i = 0; i < elems; i++) {
610
+ mem_access::store_shared<bytes>(reduce_buffer + elems * _warp_rank() + i, data + i);
611
+ }
612
+ }
613
+
614
+ // Synchronization inside block-uniform conditional is safe
615
+ tb.sync();
616
+
617
+ if (_warp_rank() == 0) {
618
+ if (warp_arg.thread_rank() < running_warps) {
619
+ #pragma unroll
620
+ for (int i = 0; i < elems; i++) {
621
+ mem_access::load_shared<bytes>(
622
+ data + i, reduce_buffer + elems * warp_arg.thread_rank() + i);
623
+ }
624
+ } else {
625
+ init<Ops...>(data);
626
+ }
627
+
628
+ _warp<T, Ops..., total_warps>(warp_arg, data);
629
+
630
+ #pragma unroll
631
+ for (int i = 0; i < elems; i++) {
632
+ mem_access::store_shared<bytes>(reduce_buffer + elems * warp_arg.thread_rank() + i,
633
+ data + i);
634
+ }
635
+ }
636
+
637
+ // Synchronization inside block-uniform conditional is safe
638
+ tb.sync();
639
+
640
+ #pragma unroll
641
+ for (int i = 0; i < elems; i++) {
642
+ mem_access::load_shared<bytes>(data + i, reduce_buffer + _warp_rank() * elems + i);
643
+ }
644
+ }
645
+ }
646
+
647
+ /*
648
+ Main API implementations. For the most part, they just convert the individual
649
+ variables into arrays, which makes working with them easier with a single
650
+ implementation. In theory, we could use the `_block` implementation as another
651
+ option, but the nature of using a pointer is a little less safe and this allows
652
+ us to obfuscate the details of the partitioned implementation.
653
+ */
654
+ template <ROpType Op, int warp_bound>
655
+ DS_D_INLINE void block(cg::thread_block& tb, cg::thread_block_tile<hw_warp_size>& warp, float& val)
656
+ {
657
+ _block<float, warp_bound, Op>(tb, warp, &val);
658
+ }
659
+
660
+ template <ROpType Op1, ROpType Op2, int warp_bound>
661
+ DS_D_INLINE void block(cg::thread_block& tb,
662
+ cg::thread_block_tile<hw_warp_size>& warp,
663
+ float& val1,
664
+ float& val2)
665
+ {
666
+ float data[2] = {val1, val2};
667
+ _block<float, warp_bound, Op1, Op2>(tb, warp, data);
668
+ val1 = data[0];
669
+ val2 = data[1];
670
+ }
671
+
672
+ template <ROpType Op1, ROpType Op2, ROpType Op3, int warp_bound>
673
+ DS_D_INLINE void block(cg::thread_block& tb,
674
+ cg::thread_block_tile<hw_warp_size>& warp,
675
+ float& val1,
676
+ float& val2,
677
+ float& val3)
678
+ {
679
+ float data[3] = {val1, val2, val3};
680
+ _block<float, warp_bound, Op1, Op2, Op3>(tb, warp, data);
681
+ val1 = data[0];
682
+ val2 = data[1];
683
+ val3 = data[2];
684
+ }
685
+
686
+ template <ROpType Op1, ROpType Op2, ROpType Op3, ROpType Op4, int warp_bound>
687
+ DS_D_INLINE void block(cg::thread_block& tb,
688
+ cg::thread_block_tile<hw_warp_size>& warp,
689
+ float& val1,
690
+ float& val2,
691
+ float& val3,
692
+ float& val4)
693
+ {
694
+ float data[4] = {val1, val2, val3, val4};
695
+ _block<float, warp_bound, Op1, Op2, Op3, Op4>(tb, warp, data);
696
+ val1 = data[0];
697
+ val2 = data[1];
698
+ val3 = data[2];
699
+ val4 = data[3];
700
+ }
701
+
702
+ /*
703
+ Note: for the partitioned blocks, the implementation does not support non-power of 2 blocks in order
704
+ to shorten block scale reduction length.
705
+ */
706
+ template <ROpType Op, int num_threads>
707
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
708
+ cg::thread_block_tile<hw_warp_size>& warp,
709
+ float& val)
710
+ {
711
+ if (num_threads <= hw_warp_size) {
712
+ _warp<float, Op, num_threads>(warp, &val);
713
+ } else {
714
+ constexpr int num_warps = num_threads / hw_warp_size;
715
+ _block<float, num_warps, Op>(tb, warp, &val);
716
+ }
717
+ }
718
+
719
+ template <ROpType Op1, ROpType Op2, int num_threads>
720
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
721
+ cg::thread_block_tile<hw_warp_size>& warp,
722
+ float& val1,
723
+ float& val2)
724
+ {
725
+ float data[2] = {val1, val2};
726
+
727
+ if (num_threads <= hw_warp_size) {
728
+ _warp<float, Op1, Op2, num_threads>(warp, data);
729
+ } else {
730
+ constexpr int num_warps = num_threads / hw_warp_size;
731
+ _block<float, num_warps, Op1, Op2>(tb, warp, data);
732
+ }
733
+
734
+ val1 = data[0];
735
+ val2 = data[1];
736
+ }
737
+
738
+ template <ROpType Op1, ROpType Op2, ROpType Op3, int num_threads>
739
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
740
+ cg::thread_block_tile<hw_warp_size>& warp,
741
+ float& val1,
742
+ float& val2,
743
+ float& val3)
744
+ {
745
+ float data[3] = {val1, val2, val3};
746
+
747
+ if (num_threads <= hw_warp_size) {
748
+ _warp<float, Op1, Op2, Op3, num_threads>(warp, data);
749
+ } else {
750
+ constexpr int num_warps = num_threads / hw_warp_size;
751
+ _block<float, num_warps, Op1, Op2, Op3>(tb, warp, data);
752
+ }
753
+
754
+ val1 = data[0];
755
+ val2 = data[1];
756
+ val3 = data[2];
757
+ }
758
+
759
+ template <ROpType Op1, ROpType Op2, ROpType Op3, ROpType Op4, int num_threads>
760
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
761
+ cg::thread_block_tile<hw_warp_size>& warp,
762
+ float& val1,
763
+ float& val2,
764
+ float& val3,
765
+ float& val4)
766
+ {
767
+ float data[4] = {val1, val2, val3, val4};
768
+
769
+ if (num_threads <= hw_warp_size) {
770
+ _warp<float, Op1, Op2, Op3, Op4, num_threads>(warp, data);
771
+ } else {
772
+ constexpr int num_warps = num_threads / hw_warp_size;
773
+ _block<float, num_warps, Op1, Op2, Op3, Op4>(tb, warp, data);
774
+ }
775
+
776
+ val1 = data[0];
777
+ val2 = data[1];
778
+ val3 = data[2];
779
+ val4 = data[3];
780
+ }
781
+
782
+ /*
783
+ Arg-reduce is a specialization of the above. We only support this with a single reduction
784
+ parameter. This only works for max/min reductions.
785
+ */
786
+
787
+ __align__(8) struct IdxReduceResult {
788
+ /*
789
+ NOTE: ORDERING MATTERS HERE! The idx is the least significant set of bits
790
+ and the val is the most significant. Changing the order of this declaration
791
+ will break the code.
792
+ */
793
+ int idx;
794
+ float val;
795
+ };
796
+
797
+ template <ROpType Op, int warpBound>
798
+ DS_D_INLINE IdxReduceResult
799
+ idx_reduce(cg::thread_block& tb, cg::thread_block_tile<hw_warp_size>& warp, float val, int idx)
800
+ {
801
+ IdxReduceResult res = {idx, val};
802
+
803
+ // Clear out the nan. This shouldn't be an issue for our initial applications
804
+ if (isnan(val)) res.val = init<Op>();
805
+
806
+ // Can do float compares as integers. By packing the index into the lower bits
807
+ // we can just do a single int64 rather than a branch, compare, and select.
808
+ // One side benefit of this is that it is by nature a stable algorithm and
809
+ // will always bias ties to the higher index.
810
+ int64_t* res_as_int = reinterpret_cast<int64_t*>(&res);
811
+
812
+ // The way floating point compare works is normally to perform a sign comparison
813
+ // and if they match, then do a comparison of the rest of the bits as unsigned
814
+ // integers. Since we are bundling these, that means for negative values we need
815
+ // to reverse the sort order, which we can do with an XOR.
816
+ if (val < 0) { *res_as_int ^= 0x7fffffff00000000; }
817
+
818
+ _block<int64_t, warpBound, Op>(tb, warp, res_as_int);
819
+
820
+ // Sign bit is preserved, so we can check if we need to invert the mantissa back
821
+ if (res.val < 0) { *res_as_int ^= 0x7fffffff00000000; }
822
+
823
+ return res;
824
+ }
825
+
826
+ } // namespace reduce
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/simd.h ADDED
@@ -0,0 +1,198 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #if (__x86_64__ || __i386__)
9
+ #include <cpuid.h>
10
+ #include <x86intrin.h>
11
+ #endif
12
+
13
+ #define TILE (128 * 1024 * 1024)
14
+ #if defined(__AVX512__) or defined(__AVX256__)
15
+
16
+ #define ROUND_DOWN(size, step) ((size) & ~((step)-1))
17
+
18
+ #if defined(__AVX512__)
19
+ #define SIMD_STORE(a, d) _mm512_storeu_ps(a, d)
20
+ #define SIMD_LOAD(x) _mm512_loadu_ps(x)
21
+ #define SIMD_SET(x) _mm512_set1_ps(x)
22
+ #define SIMD_ADD(x, y) _mm512_add_ps(x, y)
23
+ #define SIMD_MUL(x, y) _mm512_mul_ps(x, y)
24
+ #define SIMD_FMA(x, y, c) _mm512_fmadd_ps(x, y, c)
25
+ #define SIMD_SQRT(x) _mm512_sqrt_ps(x)
26
+ #define SIMD_DIV(x, y) _mm512_div_ps(x, y)
27
+ #define SIMD_AND(x, y) _mm512_and_ps(x, y)
28
+ #define SIMD_ANDNOT(x, y) _mm512_andnot_ps(x, y)
29
+ #define SIMD_OR(x, y) _mm512_or_ps(x, y)
30
+ #define SIMD_XOR(x, y) _mm512_xor_ps(x, y)
31
+ #define SIMD_WIDTH 16
32
+
33
+ #define SIMD_LOAD2(x, h) \
34
+ ((h) ? _mm512_cvtph_ps(_mm256_castps_si256(_mm256_loadu_ps(x))) : _mm512_loadu_ps(x))
35
+ #define SIMD_STORE2(x, d, h) \
36
+ ((h) ? _mm256_store_ps(x, _mm256_castsi256_ps(_mm512_cvtps_ph(d, _MM_FROUND_TO_NEAREST_INT))) \
37
+ : _mm512_storeu_ps(x, d))
38
+
39
+ #define INTV __m256i
40
+ #elif defined(__AVX256__)
41
+ #define SIMD_STORE(a, d) _mm256_storeu_ps(a, d)
42
+ #define SIMD_LOAD(x) _mm256_loadu_ps(x)
43
+ #define SIMD_SET(x) _mm256_set1_ps(x)
44
+ #define SIMD_ADD(x, y) _mm256_add_ps(x, y)
45
+ #define SIMD_MUL(x, y) _mm256_mul_ps(x, y)
46
+ #define SIMD_FMA(x, y, c) _mm256_fmadd_ps(x, y, c)
47
+ #define SIMD_SQRT(x) _mm256_sqrt_ps(x)
48
+ #define SIMD_DIV(x, y) _mm256_div_ps(x, y)
49
+ #define SIMD_AND(x, y) _mm256_and_ps(x, y)
50
+ #define SIMD_ANDNOT(x, y) _mm256_andnot_ps(x, y)
51
+ #define SIMD_OR(x, y) _mm256_or_ps(x, y)
52
+ #define SIMD_XOR(x, y) _mm256_xor_ps(x, y)
53
+ #define SIMD_WIDTH 8
54
+
55
+ #define SIMD_LOAD2(x, h) \
56
+ ((h) ? _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)(x))) : _mm256_loadu_ps(x))
57
+ #define SIMD_STORE2(x, d, h) \
58
+ ((h) ? _mm_store_ps(x, _mm_castsi128_ps(_mm256_cvtps_ph(d, _MM_FROUND_TO_NEAREST_INT))) \
59
+ : _mm256_storeu_ps(x, d))
60
+
61
+ #define INTV __m128i
62
+ #endif
63
+
64
+ union AVX_Data {
65
+ #if defined(__AVX512__)
66
+ __m512 data;
67
+ #elif defined(__AVX256__)
68
+ __m256 data;
69
+ #endif
70
+ // float data_f[16];
71
+ };
72
+
73
+ template <int span>
74
+ inline void simd_store(float* dst, AVX_Data* src, bool half_precision)
75
+ {
76
+ size_t width = (half_precision ? SIMD_WIDTH / 2 : SIMD_WIDTH);
77
+ #pragma unroll
78
+ for (size_t i = 0; i < span; ++i) { SIMD_STORE2(dst + width * i, src[i].data, half_precision); }
79
+ }
80
+ template <int span>
81
+ inline void simd_load(AVX_Data* dst, float* src, bool half_precision)
82
+ {
83
+ size_t width = (half_precision ? SIMD_WIDTH / 2 : SIMD_WIDTH);
84
+ #pragma unroll
85
+ for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_LOAD2(src + width * i, half_precision); }
86
+ }
87
+ template <int span>
88
+ inline void simd_fma(AVX_Data* dst, AVX_Data* src_m_l, AVX_Data src_m_r, AVX_Data* src_a)
89
+ {
90
+ #pragma unroll
91
+ for (size_t i = 0; i < span; ++i) {
92
+ dst[i].data = SIMD_FMA(src_m_l[i].data, src_m_r.data, src_a[i].data);
93
+ }
94
+ }
95
+ template <int span>
96
+ inline void simd_fma(AVX_Data* dst, AVX_Data* src_m_l, AVX_Data src_m_r, AVX_Data src_a)
97
+ {
98
+ #pragma unroll
99
+ for (size_t i = 0; i < span; ++i) {
100
+ dst[i].data = SIMD_FMA(src_m_l[i].data, src_m_r.data, src_a.data);
101
+ }
102
+ }
103
+ template <int span>
104
+ inline void simd_fma(AVX_Data* dst, AVX_Data* src_m_l, AVX_Data* src_m_r, AVX_Data* src_a)
105
+ {
106
+ #pragma unroll
107
+ for (size_t i = 0; i < span; ++i) {
108
+ dst[i].data = SIMD_FMA(src_m_l[i].data, src_m_r[i].data, src_a[i].data);
109
+ }
110
+ }
111
+ template <int span>
112
+ inline void simd_sqrt(AVX_Data* dst, AVX_Data* src)
113
+ {
114
+ #pragma unroll
115
+ for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_SQRT(src[i].data); }
116
+ }
117
+ template <int span>
118
+ inline void simd_add(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data src_a_r)
119
+ {
120
+ #pragma unroll
121
+ for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_ADD(src_a_l[i].data, src_a_r.data); }
122
+ }
123
+ template <int span>
124
+ inline void simd_add(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data* src_a_r)
125
+ {
126
+ #pragma unroll
127
+ for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_ADD(src_a_l[i].data, src_a_r[i].data); }
128
+ }
129
+ template <int span>
130
+ inline void simd_mul(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data src_a_r)
131
+ {
132
+ #pragma unroll
133
+ for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_MUL(src_a_l[i].data, src_a_r.data); }
134
+ }
135
+ template <int span>
136
+ inline void simd_mul(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data* src_a_r)
137
+ {
138
+ #pragma unroll
139
+ for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_MUL(src_a_l[i].data, src_a_r[i].data); }
140
+ }
141
+ template <int span>
142
+ inline void simd_div(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data* src_a_r)
143
+ {
144
+ #pragma unroll
145
+ for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_DIV(src_a_l[i].data, src_a_r[i].data); }
146
+ }
147
+ template <int span>
148
+ inline void simd_and(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data src_a_r)
149
+ {
150
+ #pragma unroll
151
+ for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_AND(src_a_l[i].data, src_a_r.data); }
152
+ }
153
+ template <int span>
154
+ inline void simd_and(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data* src_a_r)
155
+ {
156
+ #pragma unroll
157
+ for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_AND(src_a_l[i].data, src_a_r[i].data); }
158
+ }
159
+ template <int span>
160
+ inline void simd_andnot(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data src_a_r)
161
+ {
162
+ #pragma unroll
163
+ for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_ANDNOT(src_a_l[i].data, src_a_r.data); }
164
+ }
165
+ template <int span>
166
+ inline void simd_andnot(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data* src_a_r)
167
+ {
168
+ #pragma unroll
169
+ for (size_t i = 0; i < span; ++i) {
170
+ dst[i].data = SIMD_ANDNOT(src_a_l[i].data, src_a_r[i].data);
171
+ }
172
+ }
173
+ template <int span>
174
+ inline void simd_or(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data src_a_r)
175
+ {
176
+ #pragma unroll
177
+ for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_OR(src_a_l[i].data, src_a_r.data); }
178
+ }
179
+ template <int span>
180
+ inline void simd_or(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data* src_a_r)
181
+ {
182
+ #pragma unroll
183
+ for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_OR(src_a_l[i].data, src_a_r[i].data); }
184
+ }
185
+ template <int span>
186
+ inline void simd_xor(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data src_a_r)
187
+ {
188
+ #pragma unroll
189
+ for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_XOR(src_a_l[i].data, src_a_r.data); }
190
+ }
191
+ template <int span>
192
+ inline void simd_xor(AVX_Data* dst, AVX_Data* src_a_l, AVX_Data* src_a_r)
193
+ {
194
+ #pragma unroll
195
+ for (size_t i = 0; i < span; ++i) { dst[i].data = SIMD_XOR(src_a_l[i].data, src_a_r[i].data); }
196
+ }
197
+
198
+ #endif
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/softmax.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <cuda.h>
9
+ #include <cuda_fp16.h>
10
+ #include <stdio.h>
11
+ #include "custom_cuda_layers.h"
12
+
13
+ #include <fstream>
14
+
15
+ using namespace std;
16
+
17
+ template <typename T>
18
+ class Softmax {
19
+ public:
20
+ struct Config {
21
+ size_t batchSize;
22
+ size_t heads;
23
+ size_t seq_length;
24
+ size_t prob_depth;
25
+ float temperature;
26
+ bool mem_alloc;
27
+ Config(size_t batch, size_t h, size_t seq, int prob_size = 0, bool mem_alloc = false)
28
+ : batchSize(batch),
29
+ heads(h),
30
+ seq_length(seq),
31
+ prob_depth(prob_size),
32
+ temperature(1.0),
33
+ mem_alloc(mem_alloc)
34
+ {
35
+ }
36
+ };
37
+
38
+ Softmax(Config config) : config_(config) {}
39
+
40
+ ~Softmax() {}
41
+
42
+ void Forward(int bsz, T* vals, const T* attn_mask, cudaStream_t& stream)
43
+ {
44
+ launch_attn_softmax<T>(vals, attn_mask, bsz, config_.heads, config_.seq_length, stream);
45
+ }
46
+
47
+ void Backward(int bsz, T* out_grad, const T* soft_out, cudaStream_t stream)
48
+ {
49
+ launch_attn_softmax_backward_v2<T>(
50
+ out_grad, soft_out, bsz, config_.heads, config_.seq_length, stream);
51
+ }
52
+
53
+ inline size_t GetProbDepth() const { return config_.prob_depth; }
54
+
55
+ inline size_t GetBatchSize() const { return config_.batchSize; }
56
+
57
+ inline size_t GetNumHeads() const { return config_.heads; }
58
+
59
+ inline size_t GetSeqLength() const { return config_.seq_length; }
60
+
61
+ inline void SetSeqLength(size_t seq_len) { config_.seq_length = seq_len; }
62
+
63
+ private:
64
+ Config config_;
65
+ };
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/strided_batch_gemm.h ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <cuda.h>
9
+ #include <cuda_fp16.h>
10
+ #include <stdio.h>
11
+ #include "context.h"
12
+
13
+ template <typename T>
14
+ class StridedBatchGemm {
15
+ public:
16
+ struct Config {
17
+ int batch_size;
18
+ int m;
19
+ int n;
20
+ int k;
21
+ float alpha;
22
+ float beta;
23
+ cublasOperation_t op_A;
24
+ cublasOperation_t op_B;
25
+ std::array<int, 3> gemm_algos;
26
+
27
+ Config(int batch,
28
+ int mm,
29
+ int nn,
30
+ int kk,
31
+ float param_alpha,
32
+ float param_beta,
33
+ cublasOperation_t opA,
34
+ cublasOperation_t opB,
35
+ const std::array<int, 3>& algos)
36
+ : batch_size(batch),
37
+ m(mm),
38
+ n(nn),
39
+ k(kk),
40
+ alpha(param_alpha),
41
+ beta(param_beta),
42
+ op_A(opA),
43
+ op_B(opB),
44
+ gemm_algos(algos)
45
+ {
46
+ }
47
+ void SetConfig(int mm, int nn, int kk)
48
+ {
49
+ m = mm;
50
+ n = nn;
51
+ k = kk;
52
+ }
53
+ };
54
+
55
+ StridedBatchGemm(const Config& config) : _config(config) {}
56
+
57
+ virtual ~StridedBatchGemm() {}
58
+
59
+ void Forward(int bsz, T* output, const T* _buffer_a, const T* _buffer_b, cublasHandle_t handle)
60
+ {
61
+ int stride_a = _config.m * _config.k;
62
+ int stride_b = _config.n * _config.k;
63
+ int stride_c = _config.m * _config.n;
64
+
65
+ cublas_strided_batched_gemm(handle,
66
+ _config.m,
67
+ _config.n,
68
+ _config.k,
69
+ &_config.alpha,
70
+ &_config.beta,
71
+ _buffer_a,
72
+ _buffer_b,
73
+ output,
74
+ _config.op_A,
75
+ _config.op_B,
76
+ stride_a,
77
+ stride_b,
78
+ stride_c,
79
+ bsz,
80
+ #ifdef __HIP_PLATFORM_AMD__
81
+ rocblas_gemm_algo(_config.gemm_algos[0]));
82
+ #else
83
+ cublasGemmAlgo_t(_config.gemm_algos[0]));
84
+ #endif
85
+ }
86
+
87
+ void ForwardPlusSave(T* output, const T* _buffer_a, const T* _buffer_b, cublasHandle_t handle)
88
+ {
89
+ int stride_a = _config.m * _config.k;
90
+ int stride_b = _config.n * _config.k;
91
+ int stride_c = _config.m * _config.n;
92
+
93
+ cublas_strided_batched_gemm(handle,
94
+ _config.m,
95
+ _config.n,
96
+ _config.k,
97
+ &_config.alpha,
98
+ &_config.beta,
99
+ _buffer_a,
100
+ _buffer_b,
101
+ output,
102
+ _config.op_A,
103
+ _config.op_B,
104
+ stride_a,
105
+ stride_b,
106
+ stride_c,
107
+ _config.batch_size,
108
+ #ifdef __HIP_PLATFORM_AMD__
109
+ rocblas_gemm_algo(_config.gemm_algos[0]));
110
+ #else
111
+ cublasGemmAlgo_t(_config.gemm_algos[0]));
112
+ #endif
113
+
114
+ k_buf = _buffer_a;
115
+ q_buf = _buffer_b;
116
+ }
117
+
118
+ void Backward(int bsz,
119
+ const T* d_output,
120
+ const T* _buffer_a,
121
+ const T* _buffer_b,
122
+ cublasHandle_t handle,
123
+ T* inpGradA = nullptr,
124
+ T* inpGradB = nullptr)
125
+ {
126
+ int mb = (_config.op_A == CUBLAS_OP_T ? _config.k : _config.m);
127
+ int kb = (_config.op_A == CUBLAS_OP_T ? _config.m : _config.k);
128
+
129
+ int stride_a = mb * _config.n;
130
+ int stride_b = _config.n * kb;
131
+ int stride_c = _config.m * _config.k;
132
+
133
+ // B need to transpose.
134
+ cublasOperation_t op_b = (_config.op_B == CUBLAS_OP_T ? CUBLAS_OP_N : CUBLAS_OP_T);
135
+
136
+ // Calculate d_A.
137
+ cublas_strided_batched_gemm(handle,
138
+ mb,
139
+ kb,
140
+ _config.n,
141
+ &_config.alpha,
142
+ &_config.beta,
143
+ (_config.op_A == CUBLAS_OP_T ? _buffer_b : d_output),
144
+ (_config.op_A == CUBLAS_OP_T ? d_output : _buffer_b),
145
+ inpGradA,
146
+ CUBLAS_OP_N,
147
+ op_b,
148
+ stride_a,
149
+ stride_b,
150
+ stride_c,
151
+ bsz,
152
+ #ifdef __HIP_PLATFORM_AMD__
153
+ rocblas_gemm_algo(_config.gemm_algos[1]));
154
+ #else
155
+ cublasGemmAlgo_t(_config.gemm_algos[1]));
156
+ #endif
157
+
158
+ // A need to transpose.
159
+ cublasOperation_t op_a = (_config.op_A == CUBLAS_OP_T ? CUBLAS_OP_N : CUBLAS_OP_T);
160
+
161
+ stride_a = _config.m * _config.k;
162
+ stride_b = _config.m * _config.n;
163
+ stride_c = _config.n * _config.k;
164
+
165
+ // Calculate d_B.
166
+ cublas_strided_batched_gemm(handle,
167
+ _config.k,
168
+ _config.n,
169
+ _config.m,
170
+ &_config.alpha,
171
+ &_config.beta,
172
+ _buffer_a,
173
+ d_output,
174
+ inpGradB,
175
+ op_a,
176
+ CUBLAS_OP_N,
177
+ stride_a,
178
+ stride_b,
179
+ stride_c,
180
+ bsz,
181
+ #ifdef __HIP_PLATFORM_AMD__
182
+ rocblas_gemm_algo(_config.gemm_algos[2]));
183
+ #else
184
+ cublasGemmAlgo_t(_config.gemm_algos[2]));
185
+ #endif
186
+ }
187
+
188
+ inline int GetN() const { return _config.k; }
189
+
190
+ inline const T* GetBufferA() const { return k_buf; }
191
+
192
+ inline const T* GetBufferB() const { return q_buf; }
193
+
194
+ inline void SetConfig(int m, int n, int k) { _config.SetConfig(m, n, k); }
195
+
196
+ private:
197
+ Config _config;
198
+ const T* q_buf;
199
+ const T* k_buf;
200
+ };
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/includes/type_shim.h ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /* Taken from NVIDIA/apex commit 855808f3fc268e9715d613f3c2e56469d8c986d8 */
7
+ #include <ATen/ATen.h>
8
+
9
+ // Forward/backward compatibility hack around
10
+ // https://github.com/pytorch/pytorch/commit/3aeb78079bcd68282fe9117088e138b77318e288
11
+ // pending more future-proof guidance from upstream.
12
+ // struct TypeShim
13
+ // {
14
+ // const at::Type& payload;
15
+ // TypeShim(const at::Type& type) : payload(type) {}
16
+ // // Enable trivial conversion to a const at::Type& for pre-3aeb78
17
+ // operator const at::Type&(){ return payload; };
18
+ // // Enable dispatch switch statements to take *this directly for post-3aeb78
19
+ // //operator at::ScalarType(){ return payload.; };
20
+ // };
21
+
22
+ #define DISPATCH_FLOAT_AND_HALF(TYPE, LEVEL, NAME, ...) \
23
+ switch (TYPE) { \
24
+ case at::ScalarType::Float: { \
25
+ using scalar_t_##LEVEL = float; \
26
+ __VA_ARGS__; \
27
+ break; \
28
+ } \
29
+ case at::ScalarType::Half: { \
30
+ using scalar_t_##LEVEL = at::Half; \
31
+ __VA_ARGS__; \
32
+ break; \
33
+ } \
34
+ case at::ScalarType::BFloat16: { \
35
+ using scalar_t_##LEVEL = at::BFloat16; \
36
+ __VA_ARGS__; \
37
+ break; \
38
+ } \
39
+ default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
40
+ }
41
+
42
+ #define DISPATCH_DOUBLE_FLOAT_AND_HALF(TYPE, LEVEL, NAME, ...) \
43
+ switch (TYPE) { \
44
+ case at::ScalarType::Double: { \
45
+ using scalar_t_##LEVEL = double; \
46
+ __VA_ARGS__; \
47
+ break; \
48
+ } \
49
+ case at::ScalarType::Float: { \
50
+ using scalar_t_##LEVEL = float; \
51
+ __VA_ARGS__; \
52
+ break; \
53
+ } \
54
+ case at::ScalarType::Half: { \
55
+ using scalar_t_##LEVEL = at::Half; \
56
+ __VA_ARGS__; \
57
+ break; \
58
+ } \
59
+ case at::ScalarType::BFloat16: { \
60
+ using scalar_t_##LEVEL = at::BFloat16; \
61
+ __VA_ARGS__; \
62
+ break; \
63
+ } \
64
+ default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
65
+ }
66
+
67
+ #define DISPATCH_DOUBLE_AND_FLOAT(TYPE, LEVEL, NAME, ...) \
68
+ switch (TYPE) { \
69
+ case at::ScalarType::Double: { \
70
+ using scalar_t_##LEVEL = double; \
71
+ __VA_ARGS__; \
72
+ break; \
73
+ } \
74
+ case at::ScalarType::Float: { \
75
+ using scalar_t_##LEVEL = float; \
76
+ __VA_ARGS__; \
77
+ break; \
78
+ } \
79
+ default: AT_ERROR(#NAME, " not implemented for '", toString(TYPE), "'"); \
80
+ }
81
+
82
+ template <typename T>
83
+ __device__ __forceinline__ T
84
+ reduce_block_into_lanes(T* x,
85
+ T val,
86
+ int lanes = 1,
87
+ bool share_result = false) // lanes is intended to be <= 32.
88
+ {
89
+ int tid = threadIdx.x + threadIdx.y * blockDim.x;
90
+ int blockSize = blockDim.x * blockDim.y; // blockSize is intended to be a multiple of 32.
91
+
92
+ if (blockSize >= 64) {
93
+ x[tid] = val;
94
+ __syncthreads();
95
+ }
96
+
97
+ #pragma unroll
98
+ for (int i = (blockSize >> 1); i >= 64; i >>= 1) {
99
+ if (tid < i) x[tid] = x[tid] + x[tid + i];
100
+ __syncthreads();
101
+ }
102
+
103
+ T final;
104
+
105
+ if (tid < 32) {
106
+ if (blockSize >= 64)
107
+ final = x[tid] + x[tid + 32];
108
+ else
109
+ final = val;
110
+ // __SYNCWARP();
111
+
112
+ #pragma unroll
113
+ for (int i = 16; i >= lanes; i >>= 1)
114
+ final = final + __shfl_down_sync(0xffffffff, final, i);
115
+ }
116
+
117
+ if (share_result) {
118
+ if (tid < lanes) x[tid] = final; // EpilogueOp
119
+ // Make sure the smem result is visible to all warps.
120
+ __syncthreads();
121
+ }
122
+
123
+ return final;
124
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/lamb/fused_lamb_cuda.cpp ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <torch/extension.h>
7
+
8
+ // CUDA forward declaration
9
+ void fused_lamb_cuda(at::Tensor& p,
10
+ at::Tensor& p_copy,
11
+ at::Tensor& m,
12
+ at::Tensor& v,
13
+ at::Tensor& g,
14
+ float lr,
15
+ float beta1,
16
+ float beta2,
17
+ float max_coeff,
18
+ float min_coeff,
19
+ float eps,
20
+ float grad_scale,
21
+ int step,
22
+ int mode,
23
+ int bias_correction,
24
+ float decay,
25
+ at::Tensor& w_l2_i,
26
+ at::Tensor& u_l2_i,
27
+ at::Tensor& lamb_coeff_val);
28
+
29
+ #define CHECK_CUDA(x) AT_ASSERTM(x.is_cuda(), #x " must be a CUDA tensor")
30
+ #define CHECK_CONTIGUOUS(x) AT_ASSERTM(x.is_contiguous(), #x " must be contiguous")
31
+ #define CHECK_INPUT(x) \
32
+ CHECK_CUDA(x); \
33
+ CHECK_CONTIGUOUS(x)
34
+
35
+ // C++ interface
36
+ at::Tensor lamb(at::Tensor& p,
37
+ at::Tensor& p_copy,
38
+ at::Tensor& m,
39
+ at::Tensor& v,
40
+ at::Tensor& g,
41
+ float lr,
42
+ float beta1,
43
+ float beta2,
44
+ float max_coeff,
45
+ float min_coeff,
46
+ float eps,
47
+ float grad_scale,
48
+ int step,
49
+ int mode,
50
+ int bias_correction,
51
+ float decay)
52
+ {
53
+ CHECK_INPUT(p);
54
+ if (p_copy.numel() > 0) CHECK_INPUT(p_copy);
55
+ CHECK_INPUT(m);
56
+ CHECK_INPUT(v);
57
+ CHECK_INPUT(g);
58
+ int64_t num_elem = p.numel();
59
+ AT_ASSERTM(m.numel() == num_elem, "number of elements in m and p tensors should be equal");
60
+ AT_ASSERTM(v.numel() == num_elem, "number of elements in v and p tensors should be equal");
61
+ AT_ASSERTM(g.numel() == num_elem, "number of elements in g and p tensors should be equal");
62
+ AT_ASSERTM(
63
+ p_copy.numel() == num_elem || p_copy.numel() == 0,
64
+ "number of elements in p_copy and p tensors should be equal, or p_copy should be empty");
65
+
66
+ // intermediate for weight L2 reduction
67
+ // make sure that the threads per block is at least 512 during the kernel launch otherwise the
68
+ // behaviour is unexpected
69
+ at::Tensor w_l2_i = at::empty(
70
+ {512},
71
+ p.options().dtype(p.type().scalarType() == at::ScalarType::Half ? at::ScalarType::Float
72
+ : p.type().scalarType()));
73
+
74
+ // intermediate for update L2 reduction
75
+ // make sure that the threads per block is at least 512 during the kernel launch otherwise the
76
+ // behaviour is unexpected
77
+ at::Tensor u_l2_i = at::empty(
78
+ {512},
79
+ p.options().dtype(p.type().scalarType() == at::ScalarType::Half ? at::ScalarType::Float
80
+ : p.type().scalarType()));
81
+
82
+ at::Tensor lamb_coeff_val = at::empty(
83
+ {1},
84
+ p.options().dtype(p.type().scalarType() == at::ScalarType::Half ? at::ScalarType::Float
85
+ : p.type().scalarType()));
86
+
87
+ fused_lamb_cuda(p,
88
+ p_copy,
89
+ m,
90
+ v,
91
+ g,
92
+ lr,
93
+ beta1,
94
+ beta2,
95
+ max_coeff,
96
+ min_coeff,
97
+ eps,
98
+ grad_scale,
99
+ step,
100
+ mode,
101
+ bias_correction,
102
+ decay,
103
+ w_l2_i,
104
+ u_l2_i,
105
+ lamb_coeff_val);
106
+
107
+ return lamb_coeff_val;
108
+ }
109
+
110
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
111
+ {
112
+ m.def("lamb", &lamb, "Adam optimized CUDA implementation with LAMB.");
113
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/lamb/fused_lamb_cuda_kernel.cu ADDED
@@ -0,0 +1,478 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <cuda.h>
7
+ #include <cuda_runtime.h>
8
+ #include <stdio.h>
9
+ #include <cmath>
10
+ #include "ATen/ATen.h"
11
+ #include "ATen/TensorUtils.h"
12
+ #include "ATen/cuda/CUDAContext.h"
13
+ #include "ATen/cuda/detail/IndexUtils.cuh"
14
+ // #include "ATen/Type.h"
15
+ #include "ATen/AccumulateType.h"
16
+
17
+ #include <iostream>
18
+
19
+ // #include <helper_functions.h>
20
+ #if defined(__HIP_PLATFORM_AMD__) && HIP_VERSION > 305
21
+ #include <hip/hip_cooperative_groups.h>
22
+ #else
23
+ #include <cooperative_groups.h>
24
+ #endif
25
+ #include <cuda_runtime_api.h>
26
+ #include <stdio.h>
27
+
28
+ namespace cg = cooperative_groups;
29
+
30
+ // Utility class used to avoid linker errors with extern
31
+ // unsized shared memory arrays with templated type
32
+ namespace {
33
+ // This is the un-specialized struct. Note that we prevent instantiation of this
34
+ // struct by putting an undefined symbol in the function body so it won't compile.
35
+ template <typename T>
36
+ struct SharedMemory {
37
+ // Ensure that we won't compile any un-specialized types
38
+ __device__ inline operator T*()
39
+ {
40
+ #ifndef _WIN32
41
+ extern __device__ void error(void);
42
+ error();
43
+ #endif
44
+ return NULL;
45
+ }
46
+ };
47
+
48
+ template <>
49
+ struct SharedMemory<float> {
50
+ __device__ inline operator float*()
51
+ {
52
+ extern __shared__ float s_float[];
53
+ return s_float;
54
+ }
55
+ };
56
+
57
+ template <>
58
+ struct SharedMemory<double> {
59
+ __device__ inline operator double*()
60
+ {
61
+ extern __shared__ double s_double[];
62
+ return s_double;
63
+ }
64
+ };
65
+ } // namespace
66
+
67
+ #include "type_shim.h"
68
+
69
+ typedef enum {
70
+ ADAM_MODE_0 = 0, // eps under square root
71
+ ADAM_MODE_1 = 1 // eps outside square root
72
+ } adamMode_t;
73
+
74
+ // s_a and s_b are in shared memory
75
+ // g_a and g_b are in shared memory
76
+ template <typename T, int blockSize>
77
+ __device__ void reduce_block_in_shared_memory(T* s_a, T* s_b, T* g_a, T* g_b)
78
+ {
79
+ // Handle to thread block group
80
+ cg::thread_block cta = cg::this_thread_block();
81
+
82
+ // perform block reduction in shared memory,
83
+ unsigned int tid = cta.thread_rank();
84
+
85
+ T a_sum = s_a[tid];
86
+ T b_sum = s_b[tid];
87
+
88
+ cg::sync(cta);
89
+
90
+ // do reduction in shared mem
91
+ if ((blockSize >= 512) && (tid < 256)) {
92
+ s_a[tid] = a_sum = a_sum + s_a[tid + 256];
93
+ s_b[tid] = b_sum = b_sum + s_b[tid + 256];
94
+ }
95
+
96
+ cg::sync(cta);
97
+
98
+ if ((blockSize >= 256) && (tid < 128)) {
99
+ s_a[tid] = a_sum = a_sum + s_a[tid + 128];
100
+ s_b[tid] = b_sum = b_sum + s_b[tid + 128];
101
+ }
102
+
103
+ cg::sync(cta);
104
+
105
+ if ((blockSize >= 128) && (tid < 64)) {
106
+ s_a[tid] = a_sum = a_sum + s_a[tid + 64];
107
+ s_b[tid] = b_sum = b_sum + s_b[tid + 64];
108
+ }
109
+
110
+ cg::sync(cta);
111
+
112
+ #if (__CUDA_ARCH__ >= 300) || (defined(__HIP_PLATFORM_AMD__) && HIP_VERSION >= 502)
113
+ if (tid < 32) {
114
+ cg::coalesced_group active = cg::coalesced_threads();
115
+
116
+ // Fetch final intermediate sum from 2nd warp
117
+ if (blockSize >= 64) {
118
+ a_sum = a_sum + s_a[tid + 32];
119
+ b_sum = b_sum + s_b[tid + 32];
120
+ }
121
+
122
+ // Reduce final warp using shuffle
123
+ for (int offset = warpSize / 2; offset > 0; offset /= 2) {
124
+ a_sum += active.shfl_down(a_sum, offset);
125
+ b_sum += active.shfl_down(b_sum, offset);
126
+ }
127
+ }
128
+ #else
129
+ if ((blockSize >= 64) && (tid < 32)) {
130
+ s_a[tid] = a_sum = a_sum + s_a[tid + 32];
131
+ s_b[tid] = b_sum = b_sum + s_b[tid + 32];
132
+ }
133
+
134
+ cg::sync(cta);
135
+
136
+ if ((blockSize >= 32) && (tid < 16)) {
137
+ s_a[tid] = a_sum = a_sum + s_a[tid + 16];
138
+ s_b[tid] = b_sum = b_sum + s_b[tid + 16];
139
+ }
140
+
141
+ cg::sync(cta);
142
+
143
+ if ((blockSize >= 16) && (tid < 8)) {
144
+ s_a[tid] = a_sum = a_sum + s_a[tid + 8];
145
+ s_b[tid] = b_sum = b_sum + s_b[tid + 8];
146
+ }
147
+
148
+ cg::sync(cta);
149
+
150
+ if ((blockSize >= 8) && (tid < 4)) {
151
+ s_a[tid] = a_sum = a_sum + s_a[tid + 4];
152
+ s_b[tid] = b_sum = b_sum + s_b[tid + 4];
153
+ }
154
+
155
+ cg::sync(cta);
156
+
157
+ if ((blockSize >= 4) && (tid < 2)) {
158
+ s_a[tid] = a_sum = a_sum + s_a[tid + 2];
159
+ s_b[tid] = b_sum = b_sum + s_b[tid + 2];
160
+ }
161
+
162
+ cg::sync(cta);
163
+
164
+ if ((blockSize >= 2) && (tid < 1)) {
165
+ s_a[tid] = a_sum = a_sum + s_a[tid + 1];
166
+ s_b[tid] = b_sum = b_sum + s_b[tid + 1];
167
+ }
168
+
169
+ cg::sync(cta);
170
+
171
+ #endif
172
+
173
+ // write result for this block to global mem
174
+ if (tid == 0) {
175
+ g_a[blockIdx.x] = (T)a_sum;
176
+ g_b[blockIdx.x] = (T)b_sum;
177
+ }
178
+ }
179
+
180
+ template <typename T, int blockSize>
181
+ __device__ void reduce_two_vectors_in_register(T a, T b, T* g_a, T* g_b)
182
+ {
183
+ const int threadIdInBlock = cg::this_thread_block().thread_rank();
184
+
185
+ T* s_a = SharedMemory<T>();
186
+ T* s_b = SharedMemory<T>() + cg::this_thread_block().size();
187
+
188
+ s_a[threadIdInBlock] = a;
189
+ s_b[threadIdInBlock] = b;
190
+
191
+ reduce_block_in_shared_memory<T, blockSize>(s_a, s_b, g_a, g_b);
192
+ }
193
+
194
+ template <typename T, typename GRAD_T, int blockSize>
195
+ __global__ void lamb_cuda_kernel_part1(
196
+ T* __restrict__ p,
197
+ GRAD_T* __restrict__ p_copy, // For mixed precision training, pass NULL if not needed
198
+ T* __restrict__ m,
199
+ T* __restrict__ v,
200
+ const GRAD_T* __restrict__ g,
201
+ const float b1,
202
+ const float b2,
203
+ const float eps,
204
+ const float grad_scale,
205
+ const float step_size,
206
+ const size_t tsize,
207
+ adamMode_t mode,
208
+ const float decay,
209
+ T* __restrict__ w_l2_i,
210
+ T* __restrict__ u_l2_i)
211
+ {
212
+ // Assuming 2D grids and 2D blocks
213
+ const int blockId = gridDim.x * blockIdx.y + blockIdx.x;
214
+ const int threadsPerBlock = blockDim.x * blockDim.y;
215
+ const int threadIdInBlock = cg::this_thread_block().thread_rank();
216
+ const int i = (blockId * threadsPerBlock + threadIdInBlock);
217
+ const int totThreads = gridDim.x * gridDim.y * threadsPerBlock;
218
+
219
+ T reg_w = 0;
220
+ T reg_u = 0;
221
+
222
+ for (int j = i; j < tsize; j += totThreads) {
223
+ T scaled_grad = g[j] / grad_scale;
224
+ T pj = p[j];
225
+ m[j] = b1 * m[j] + (1 - b1) * scaled_grad;
226
+ v[j] = b2 * v[j] + (1 - b2) * scaled_grad * scaled_grad;
227
+ float denom;
228
+ if (mode == ADAM_MODE_0)
229
+ denom = sqrtf(v[j] + eps);
230
+ else // Mode 1
231
+ denom = sqrtf(v[j]) + eps;
232
+ T update = (m[j] / denom) + (decay * p[j]);
233
+
234
+ reg_u += update * update;
235
+ reg_w += pj * pj;
236
+ }
237
+
238
+ reduce_two_vectors_in_register<T, blockSize>(reg_w, reg_u, w_l2_i, u_l2_i);
239
+ }
240
+
241
+ template <typename T, typename GRAD_T, int blockSize>
242
+ __global__ void lamb_cuda_kernel_part2(const size_t tsize, T* __restrict__ g_a, T* __restrict__ g_b)
243
+ {
244
+ T* s_a = SharedMemory<T>();
245
+ T* s_b = SharedMemory<T>() + cg::this_thread_block().size();
246
+
247
+ const int threadIdInBlock = cg::this_thread_block().thread_rank();
248
+
249
+ s_a[threadIdInBlock] = g_a[threadIdInBlock];
250
+ s_b[threadIdInBlock] = g_b[threadIdInBlock];
251
+
252
+ if (threadIdInBlock >= tsize) {
253
+ s_a[threadIdInBlock] = 0.0;
254
+ s_b[threadIdInBlock] = 0.0;
255
+ }
256
+
257
+ reduce_block_in_shared_memory<T, blockSize>(s_a, s_b, g_a, g_b);
258
+ }
259
+
260
+ template <typename T, typename GRAD_T>
261
+ __global__ void lamb_cuda_kernel_part3(
262
+ T* __restrict__ p,
263
+ GRAD_T* __restrict__ p_copy, // For mixed precision training, pass NULL if not needed
264
+ T* __restrict__ m,
265
+ T* __restrict__ v,
266
+ const GRAD_T* __restrict__ g,
267
+ const float b1,
268
+ const float b2,
269
+ const float max_coeff,
270
+ const float min_coeff,
271
+ const float eps,
272
+ const float grad_scale,
273
+ const float step_size,
274
+ const size_t tsize,
275
+ adamMode_t mode,
276
+ const float decay,
277
+ T* __restrict__ w_l2_i,
278
+ T* __restrict__ u_l2_i,
279
+ T* __restrict__ lamb_coeff_val)
280
+ {
281
+ // Assuming 2D grids and 2D blocks
282
+ const int blockId = gridDim.x * blockIdx.y + blockIdx.x;
283
+ const int threadsPerBlock = blockDim.x * blockDim.y;
284
+ const int threadIdInBlock = cg::this_thread_block().thread_rank();
285
+ const int i = (blockId * threadsPerBlock + threadIdInBlock);
286
+ const int totThreads = gridDim.x * gridDim.y * threadsPerBlock;
287
+
288
+ T reg_w = sqrtf(w_l2_i[0]);
289
+ T reg_u = sqrtf(u_l2_i[0]);
290
+
291
+ float lamb_coeff = 1.0;
292
+
293
+ if (reg_w != 0 && reg_u != 0) {
294
+ lamb_coeff = reg_w / reg_u;
295
+ if (lamb_coeff > max_coeff) { lamb_coeff = max_coeff; }
296
+ if (lamb_coeff < min_coeff) { lamb_coeff = min_coeff; }
297
+ }
298
+
299
+ if (blockId == 0 && threadIdInBlock == 0) {
300
+ lamb_coeff_val[0] = lamb_coeff;
301
+ // printf("Cuda Lamb Coeff is %.6f \n",lamb_coeff);
302
+ }
303
+
304
+ for (int j = i; j < tsize; j += totThreads) {
305
+ T pj = (float)p[j];
306
+ T mj = m[j];
307
+ T vj = v[j];
308
+ float denom;
309
+ if (mode == ADAM_MODE_0)
310
+ denom = sqrtf(vj + eps);
311
+ else // Mode 1
312
+ denom = sqrtf(vj) + eps;
313
+ T update = (mj / denom) + (decay * pj);
314
+
315
+ pj = pj - (step_size * lamb_coeff * update);
316
+ p[j] = pj;
317
+ if (p_copy != NULL) p_copy[j] = (GRAD_T)pj;
318
+ }
319
+ }
320
+
321
+ void fused_lamb_cuda(at::Tensor& p,
322
+ at::Tensor& p_copy,
323
+ at::Tensor& m,
324
+ at::Tensor& v,
325
+ at::Tensor& g,
326
+ float lr,
327
+ float beta1,
328
+ float beta2,
329
+ float max_coeff,
330
+ float min_coeff,
331
+ float eps,
332
+ float grad_scale,
333
+ int step,
334
+ int mode,
335
+ int bias_correction,
336
+ float decay,
337
+ at::Tensor& w_l2_i,
338
+ at::Tensor& u_l2_i,
339
+ at::Tensor& lamb_coeff)
340
+ {
341
+ // using namespace at;
342
+
343
+ // Get tensor size
344
+ int tsize = p.numel();
345
+ // Determine #threads and #blocks
346
+ const int threadsPerBlock = 512;
347
+ int num_blocks = (tsize + threadsPerBlock - 1) / threadsPerBlock;
348
+ if (num_blocks > 512) num_blocks = 512;
349
+
350
+ int smemsize = 0;
351
+
352
+ if (p.type().scalarType() == at::ScalarType::Double)
353
+ smemsize = 2 * threadsPerBlock * sizeof(double);
354
+ else
355
+ smemsize = 2 * threadsPerBlock * sizeof(float);
356
+
357
+ const dim3 blocks(num_blocks);
358
+ const dim3 threads(threadsPerBlock);
359
+
360
+ AT_ASSERTM(at::cuda::detail::canUse32BitIndexMath(p),
361
+ "parameter tensor is too large to be indexed with int32");
362
+ // Constants
363
+ float step_size = 0;
364
+ if (bias_correction == 1) {
365
+ const float bias_correction1 = 1 - std::pow(beta1, step);
366
+ const float bias_correction2 = 1 - std::pow(beta2, step);
367
+ step_size = lr * std::sqrt(bias_correction2) / bias_correction1;
368
+ } else {
369
+ step_size = lr;
370
+ }
371
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();
372
+
373
+ if (g.type().scalarType() == at::ScalarType::Half) {
374
+ // all other values should be fp32 for half gradients
375
+ AT_ASSERTM(p.type().scalarType() == at::ScalarType::Float,
376
+ "expected parameter to be of float type");
377
+ // dispatch is done on the gradient type
378
+ using namespace at; // prevents "toString is undefined" errors
379
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(
380
+ g.scalar_type(), "lamb_cuda_kernel", ([&] {
381
+ using accscalar_t = at::acc_type<scalar_t, true>;
382
+
383
+ lamb_cuda_kernel_part1<accscalar_t, scalar_t, threadsPerBlock>
384
+ <<<blocks, threadsPerBlock, smemsize, stream>>>(
385
+ p.data<accscalar_t>(),
386
+ p_copy.numel() ? p_copy.data<scalar_t>() : NULL,
387
+ m.data<accscalar_t>(),
388
+ v.data<accscalar_t>(),
389
+ g.data<scalar_t>(),
390
+ beta1,
391
+ beta2,
392
+ eps,
393
+ grad_scale,
394
+ step_size,
395
+ tsize,
396
+ (adamMode_t)mode,
397
+ decay,
398
+ w_l2_i.data<accscalar_t>(),
399
+ u_l2_i.data<accscalar_t>());
400
+
401
+ lamb_cuda_kernel_part2<accscalar_t, scalar_t, threadsPerBlock>
402
+ <<<1, threadsPerBlock, smemsize, stream>>>(
403
+ num_blocks, w_l2_i.data<accscalar_t>(), u_l2_i.data<accscalar_t>());
404
+
405
+ lamb_cuda_kernel_part3<accscalar_t, scalar_t>
406
+ <<<blocks, threadsPerBlock, smemsize, stream>>>(
407
+ p.data<accscalar_t>(),
408
+ p_copy.numel() ? p_copy.data<scalar_t>() : NULL,
409
+ m.data<accscalar_t>(),
410
+ v.data<accscalar_t>(),
411
+ g.data<scalar_t>(),
412
+ beta1,
413
+ beta2,
414
+ max_coeff,
415
+ min_coeff,
416
+ eps,
417
+ grad_scale,
418
+ step_size,
419
+ tsize,
420
+ (adamMode_t)mode,
421
+ decay,
422
+ w_l2_i.data<accscalar_t>(),
423
+ u_l2_i.data<accscalar_t>(),
424
+ lamb_coeff.data<accscalar_t>());
425
+ }));
426
+ } else {
427
+ using namespace at;
428
+ AT_DISPATCH_FLOATING_TYPES(
429
+ g.scalar_type(), "lamb_cuda_kernel", ([&] {
430
+ lamb_cuda_kernel_part1<scalar_t, scalar_t, threadsPerBlock>
431
+ <<<blocks, threadsPerBlock, smemsize, stream>>>(
432
+ p.data<scalar_t>(),
433
+ NULL, // don't output p_copy for fp32, it's wasted write
434
+ m.data<scalar_t>(),
435
+ v.data<scalar_t>(),
436
+ g.data<scalar_t>(),
437
+ beta1,
438
+ beta2,
439
+ eps,
440
+ grad_scale,
441
+ step_size,
442
+ tsize,
443
+ (adamMode_t)mode,
444
+ decay,
445
+ w_l2_i.data<scalar_t>(),
446
+ u_l2_i.data<scalar_t>());
447
+
448
+ lamb_cuda_kernel_part2<scalar_t, scalar_t, threadsPerBlock>
449
+ <<<1, threadsPerBlock, smemsize, stream>>>(
450
+ num_blocks, w_l2_i.data<scalar_t>(), u_l2_i.data<scalar_t>());
451
+
452
+ lamb_cuda_kernel_part3<scalar_t, scalar_t>
453
+ <<<blocks, threadsPerBlock, smemsize, stream>>>(
454
+ p.data<scalar_t>(),
455
+ NULL, // don't output p_copy for fp32, it's wasted write
456
+ m.data<scalar_t>(),
457
+ v.data<scalar_t>(),
458
+ g.data<scalar_t>(),
459
+ beta1,
460
+ beta2,
461
+ max_coeff,
462
+ min_coeff,
463
+ eps,
464
+ grad_scale,
465
+ step_size,
466
+ tsize,
467
+ (adamMode_t)mode,
468
+ decay,
469
+ w_l2_i.data<scalar_t>(),
470
+ u_l2_i.data<scalar_t>(),
471
+ lamb_coeff.data<scalar_t>());
472
+ }));
473
+ }
474
+ C10_CUDA_CHECK(cudaGetLastError());
475
+ }
476
+
477
+ // template __device__ void reduce_two_vectors_in_register<float,512>(float a, float b, float* g_a,
478
+ // float* g_b, cg::grid_group &cgg);
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/quantization/dequantize.cu ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "dequantization_utils.h"
7
+ #include "memory_access_utils.h"
8
+
9
+ namespace cg = cooperative_groups;
10
+
11
+ template <typename T, int numBits, dequantize::Type qType, int unroll, int threads>
12
+ __global__ void dequantize_kernel(T* __restrict__ dequant_data,
13
+ const int8_t* __restrict__ q_data,
14
+ const float* __restrict__ q_params,
15
+ int elems_per_group,
16
+ int total_elems)
17
+ {
18
+ dequantize::to_global<T, numBits, qType, unroll, threads>(
19
+ dequant_data, q_data, q_params, elems_per_group, total_elems);
20
+ }
21
+
22
+ #define LAUNCH_DEQUANT_KERNEL(num_bits, q_type) \
23
+ dequantize_kernel<T, num_bits, q_type, unroll, threads><<<grid, block, 0, stream>>>( \
24
+ dequant_data, q_data, q_params, elems_per_group, total_elems);
25
+
26
+ template <typename T>
27
+ void launch_dequantize_kernel(T* dequant_data,
28
+ const int8_t* q_data,
29
+ const float* q_params,
30
+ quantize::Type q_type,
31
+ int num_bits,
32
+ int elems_per_group,
33
+ int total_elems,
34
+ cudaStream_t stream)
35
+ {
36
+ constexpr int unroll = 8;
37
+ constexpr int threads = 512;
38
+ constexpr int elems_per_block = unroll * threads * dequantize::granularity / (sizeof(T));
39
+
40
+ const dim3 block(threads);
41
+ const dim3 grid((total_elems + elems_per_block - 1) / elems_per_block);
42
+
43
+ // TODO(cmikeh2): It may make sense to tune unroll, there is perf benefit for large
44
+ // problem sizes with this large unroll value.
45
+ if (num_bits == 8 && q_type == quantize::Type::Symmetric) {
46
+ LAUNCH_DEQUANT_KERNEL(8, quantize::Type::Symmetric);
47
+ } else if (num_bits == 8 && q_type == quantize::Type::Asymmetric) {
48
+ LAUNCH_DEQUANT_KERNEL(8, quantize::Type::Asymmetric);
49
+ } else if (num_bits == 4 && q_type == quantize::Type::Symmetric) {
50
+ LAUNCH_DEQUANT_KERNEL(4, quantize::Type::Symmetric);
51
+ } else if (num_bits == 4 && q_type == quantize::Type::Asymmetric) {
52
+ LAUNCH_DEQUANT_KERNEL(4, quantize::Type::Asymmetric);
53
+ }
54
+ }
55
+
56
+ template void launch_dequantize_kernel(__half* dequant_data,
57
+ const int8_t* q_data,
58
+ const float* q_params,
59
+ quantize::Type q_type,
60
+ int num_bits,
61
+ int elems_per_group,
62
+ int total_elems,
63
+ cudaStream_t stream);
64
+
65
+ template void launch_dequantize_kernel(float* dequant_data,
66
+ const int8_t* q_data,
67
+ const float* q_params,
68
+ quantize::Type q_type,
69
+ int num_bits,
70
+ int elems_per_group,
71
+ int total_elems,
72
+ cudaStream_t stream);
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/quantization/fake_quantizer.cu ADDED
@@ -0,0 +1,1028 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <math.h>
7
+ #include "custom_cuda_layers.h"
8
+ #include "memory_access_utils.h"
9
+
10
+ namespace cg = cooperative_groups;
11
+
12
+ __global__ void fake_quantize_kernel(__half* vals, int group_size, int num_bits)
13
+ {
14
+ #if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_AMD__)
15
+
16
+ cg::thread_block b = cg::this_thread_block(); // tb
17
+ cg::thread_block_tile<32> g =
18
+ cg::tiled_partition<32>(b); // warp, 32 not optimal for AMD which should be 64.
19
+
20
+ int gid = threadIdx.x >> 5;
21
+ int lane = threadIdx.x & 0x1f;
22
+ int warp_num = blockDim.x >> 5;
23
+ int id = threadIdx.x;
24
+
25
+ constexpr int granularity = 16;
26
+ constexpr int vals_per_access = granularity / sizeof(__half);
27
+
28
+ __half data[vals_per_access];
29
+
30
+ int group_id = blockIdx.x;
31
+
32
+ int thread_index = id * vals_per_access;
33
+ int reg_count = 0;
34
+ int offset = group_id * group_size;
35
+ float max = -10000.0;
36
+ for (int thread_index = id * vals_per_access; thread_index < group_size;
37
+ thread_index += blockDim.x * vals_per_access) {
38
+ mem_access::load_global<granularity>(data, vals + offset + thread_index);
39
+
40
+ #pragma unroll
41
+ for (int i = 0; i < vals_per_access; i++) {
42
+ if (abs((float)data[i]) > max) max = abs((float)data[i]);
43
+ }
44
+ }
45
+
46
+ #pragma unroll
47
+ for (int i = 1; i < WARP_SIZE; i <<= 1) {
48
+ auto temp = g.shfl_xor(max, i);
49
+ if (max < temp) max = temp;
50
+ }
51
+ __shared__ float partialMax[WARP_SIZE];
52
+
53
+ if (lane == 0) partialMax[gid] = max;
54
+
55
+ b.sync();
56
+
57
+ if (lane < warp_num) max = partialMax[lane];
58
+
59
+ #pragma unroll
60
+ for (int i = 1; i < WARP_SIZE; i <<= 1) {
61
+ auto temp = g.shfl_down(max, i);
62
+ if (max < temp) max = temp;
63
+ }
64
+
65
+ max = g.shfl(max, 0);
66
+
67
+ float q_scale = (float)(1 << num_bits) / (2 * max + 1e-5);
68
+ float q_scale_inv = 1 / q_scale;
69
+ int q_range_max = (1 << (num_bits - 1)) - 1;
70
+ int q_range_min = -(1 << (num_bits - 1));
71
+
72
+ for (int thread_index = id * vals_per_access; thread_index < group_size;
73
+ thread_index += blockDim.x * vals_per_access) {
74
+ mem_access::load_global<granularity>(data, vals + offset + thread_index);
75
+ #pragma unroll
76
+ for (int j = 0; j < vals_per_access; j++) {
77
+ float q_data;
78
+ q_data = __half2float(data[j]);
79
+ q_data = __float2int_rn(q_data * q_scale);
80
+ q_data = q_data > (q_range_max) ? (q_range_max)
81
+ : (q_data < (q_range_min) ? (q_range_min) : q_data);
82
+ data[j] = __float2half_rn(q_data * q_scale_inv);
83
+ }
84
+ mem_access::store_global<granularity>(vals + offset + thread_index, data);
85
+ }
86
+
87
+ #endif
88
+ }
89
+
90
+ __global__ void fake_quantize_kernel(float* vals, int group_size, int num_bits)
91
+ {
92
+ cg::thread_block b = cg::this_thread_block();
93
+ cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
94
+
95
+ int gid = threadIdx.x >> 5;
96
+ int lane = threadIdx.x & 0x1f;
97
+ int warp_num = blockDim.x >> 5;
98
+ int id = threadIdx.x;
99
+
100
+ constexpr int granularity = 16;
101
+ constexpr int vals_per_access = granularity / sizeof(float);
102
+
103
+ float data[vals_per_access];
104
+
105
+ int bid = blockIdx.x;
106
+
107
+ int thread_index = id * vals_per_access;
108
+
109
+ int reg_count = 0;
110
+
111
+ int offset = bid * group_size;
112
+
113
+ float max = -10000.0;
114
+
115
+ for (int thread_index = id * vals_per_access; thread_index < group_size;
116
+ thread_index += blockDim.x * vals_per_access) {
117
+ mem_access::load_global<granularity>(data, vals + offset + thread_index);
118
+
119
+ #pragma unroll
120
+ for (int i = 0; i < vals_per_access; i++) {
121
+ if (abs(data[i]) > max) max = abs(data[i]);
122
+ }
123
+ }
124
+
125
+ #pragma unroll
126
+ for (int i = 1; i < WARP_SIZE; i <<= 1) {
127
+ auto temp = g.shfl_xor(max, i);
128
+ if (max < temp) max = temp;
129
+ }
130
+ __shared__ float partialMax[WARP_SIZE];
131
+
132
+ if (lane == 0) partialMax[gid] = max;
133
+
134
+ b.sync();
135
+
136
+ if (lane < warp_num) max = partialMax[lane];
137
+
138
+ b.sync();
139
+
140
+ #pragma unroll
141
+ for (int i = 1; i < warp_num; i <<= 1) {
142
+ auto temp = g.shfl_down(max, i);
143
+ if (max < temp) max = temp;
144
+ }
145
+
146
+ max = g.shfl(max, 0);
147
+
148
+ float q_scale = (1 << num_bits) / (2 * max + 1e-5);
149
+ float q_scale_inv = 1 / q_scale;
150
+
151
+ int q_range_max = (1 << (num_bits - 1)) - 1;
152
+ int q_range_min = -(1 << (num_bits - 1));
153
+
154
+ for (int thread_index = id * vals_per_access; thread_index < group_size;
155
+ thread_index += blockDim.x * vals_per_access) {
156
+ mem_access::load_global<granularity>(data, vals + offset + thread_index);
157
+ #pragma unroll
158
+ for (int j = 0; j < vals_per_access; j++) {
159
+ float q_data;
160
+ q_data = __float2int_rn(data[j] * q_scale);
161
+ q_data = q_data > (q_range_max) ? (q_range_max)
162
+ : (q_data < (q_range_min) ? (q_range_min) : q_data);
163
+ data[j] = roundf(q_data * q_scale_inv);
164
+ }
165
+ mem_access::store_global<granularity>(vals + offset + thread_index, data);
166
+ }
167
+ }
168
+
169
+ template <typename T>
170
+ void launch_fake_quantize_kernel(T* vals,
171
+ int total_count,
172
+ int group_num,
173
+ int num_bits,
174
+ cudaStream_t stream)
175
+ {
176
+ dim3 grid_dim(group_num);
177
+ dim3 block_dim(1024);
178
+
179
+ fake_quantize_kernel<<<grid_dim, block_dim, 0, stream>>>(
180
+ vals, total_count / group_num, num_bits);
181
+ }
182
+
183
+ template void launch_fake_quantize_kernel(float* vals,
184
+ int total_count,
185
+ int group_num,
186
+ int num_bits,
187
+ cudaStream_t stream);
188
+ template void launch_fake_quantize_kernel(__half* vals,
189
+ int total_count,
190
+ int group_num,
191
+ int num_bits,
192
+ cudaStream_t stream);
193
+
194
+ __global__ void sr_fake_quantize_kernel(__half* vals,
195
+ int token_size,
196
+ int token_num,
197
+ int num_bits,
198
+ std::pair<uint64_t, uint64_t> seed)
199
+ {
200
+ #if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_AMD__)
201
+
202
+ cg::thread_block b = cg::this_thread_block();
203
+ cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
204
+
205
+ int gid = threadIdx.x >> 5;
206
+ int lane = threadIdx.x & 0x1f;
207
+ int warp_num = blockDim.x >> 5;
208
+
209
+ int idx = blockIdx.x * blockDim.x + threadIdx.x;
210
+
211
+ float2* vals_cast = reinterpret_cast<float2*>(vals);
212
+
213
+ __half2 data_low[128];
214
+ __half2 data_high[128];
215
+
216
+ int bid = blockIdx.x;
217
+
218
+ curandStatePhilox4_32_10_t state;
219
+ curand_init(seed.first, idx, seed.second, &state);
220
+ unsigned int tid = threadIdx.x;
221
+ int reg_count = 0;
222
+ int offset = bid * token_size;
223
+ int group_index = bid * token_size + tid;
224
+
225
+ int total_count = token_size * token_num;
226
+ if (group_index < total_count) {
227
+ // float min = 10000.0;
228
+ float max = -10000.0;
229
+ while (tid < token_size) {
230
+ float2 data = vals_cast[offset + tid];
231
+ __half2* data_h = reinterpret_cast<__half2*>(&data);
232
+ data_low[reg_count] = data_h[0];
233
+ data_high[reg_count] = data_h[1];
234
+
235
+ float2 data_f[2];
236
+ data_f[0] = __half22float2(data_h[0]);
237
+ data_f[1] = __half22float2(data_h[1]);
238
+
239
+ if (abs((float)data_f[0].x) > max) max = abs((float)data_f[0].x);
240
+ if (abs((float)data_f[0].y) > max) max = abs((float)data_f[0].y);
241
+ if (abs((float)data_f[1].x) > max) max = abs((float)data_f[1].x);
242
+ if (abs((float)data_f[1].y) > max) max = abs((float)data_f[1].y);
243
+
244
+ tid += blockDim.x;
245
+ reg_count++;
246
+ }
247
+
248
+ #pragma unroll
249
+ for (int i = 1; i < WARP_SIZE; i <<= 1) {
250
+ auto temp = g.shfl_xor(max, i);
251
+ if (max < temp) max = temp;
252
+ }
253
+
254
+ __shared__ float partialMax[WARP_SIZE];
255
+
256
+ if (lane == 0) partialMax[gid] = max;
257
+
258
+ b.sync();
259
+
260
+ if (lane < warp_num) max = partialMax[lane];
261
+
262
+ #pragma unroll
263
+ for (int i = 1; i < warp_num; i <<= 1) {
264
+ auto temp = g.shfl_down(max, i);
265
+ if (max < temp) max = temp;
266
+ }
267
+
268
+ max = g.shfl(max, 0);
269
+
270
+ float q_scale_val = (float)(1 << num_bits) / (max * 2 + 1e-5);
271
+ float high_q = (float)((1 << (num_bits - 1)) - 1);
272
+ float low_q = (float)(-((1 << (num_bits - 1))));
273
+
274
+ for (int i = 0; i < reg_count; i++) {
275
+ int token_index = i * blockDim.x + threadIdx.x;
276
+ if (token_index < token_size) {
277
+ float2 data_f[2];
278
+ data_f[0] = __half22float2(data_low[i]);
279
+ data_f[1] = __half22float2(data_high[i]);
280
+
281
+ float2 q_data_int[2];
282
+ q_data_int[0].x = (float)((int)(data_f[0].x * q_scale_val));
283
+ q_data_int[0].y = (float)((int)(data_f[0].y * q_scale_val));
284
+ q_data_int[1].x = (float)((int)(data_f[1].x * q_scale_val));
285
+ q_data_int[1].y = (float)((int)(data_f[1].y * q_scale_val));
286
+
287
+ // Stochastic rounding
288
+ float4 rand = curand_uniform4(&state);
289
+
290
+ float q_error[4];
291
+ q_error[0] = abs(data_f[0].x - (q_data_int[0].x / q_scale_val)) * q_scale_val;
292
+ q_error[1] = abs(data_f[0].y - (q_data_int[0].y / q_scale_val)) * q_scale_val;
293
+ q_error[2] = abs(data_f[1].x - (q_data_int[1].x / q_scale_val)) * q_scale_val;
294
+ q_error[3] = abs(data_f[1].y - (q_data_int[1].y / q_scale_val)) * q_scale_val;
295
+
296
+ q_data_int[0].x =
297
+ (rand.x < q_error[0] && q_data_int[0].x > low_q && q_data_int[0].x < high_q)
298
+ ? (q_data_int[0].x + (data_f[0].x > 0 ? 1 : -1))
299
+ : q_data_int[0].x;
300
+ q_data_int[0].y =
301
+ (rand.y < q_error[1] && q_data_int[0].y > low_q && q_data_int[0].y < high_q)
302
+ ? (q_data_int[0].y + (data_f[0].y > 0 ? 1 : -1))
303
+ : q_data_int[0].y;
304
+ q_data_int[1].x =
305
+ (rand.w < q_error[2] && q_data_int[1].x > low_q && q_data_int[1].x < high_q)
306
+ ? (q_data_int[1].x + (data_f[1].x > 0 ? 1 : -1))
307
+ : q_data_int[1].x;
308
+ q_data_int[1].y =
309
+ (rand.z < q_error[3] && q_data_int[1].y > low_q && q_data_int[1].y < high_q)
310
+ ? (q_data_int[1].y + (data_f[1].y > 0 ? 1 : -1))
311
+ : q_data_int[1].y;
312
+
313
+ data_f[0].x = q_data_int[0].x / q_scale_val;
314
+ data_f[0].y = q_data_int[0].y / q_scale_val;
315
+ data_f[1].x = q_data_int[1].x / q_scale_val;
316
+ data_f[1].y = q_data_int[1].y / q_scale_val;
317
+
318
+ float2 result;
319
+ __half2* result_h = reinterpret_cast<__half2*>(&result);
320
+ result_h[0] = __float22half2_rn(data_f[0]);
321
+ result_h[1] = __float22half2_rn(data_f[1]);
322
+
323
+ vals_cast[offset + token_index] = result;
324
+ }
325
+ }
326
+ }
327
+ #endif
328
+ }
329
+
330
+ __global__ void sr_fake_quantize_kernel(float* vals,
331
+ int token_size,
332
+ int token_num,
333
+ int num_bits,
334
+ std::pair<uint64_t, uint64_t> seed)
335
+ {
336
+ cg::thread_block b = cg::this_thread_block();
337
+ cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
338
+
339
+ int gid = threadIdx.x >> 5;
340
+ int lane = threadIdx.x & 0x1f;
341
+ int warp_num = blockDim.x >> 5;
342
+ int id = threadIdx.x;
343
+
344
+ int idx = blockIdx.x * blockDim.x + id;
345
+
346
+ float4* vals_cast = reinterpret_cast<float4*>(vals);
347
+
348
+ float4 data[128];
349
+
350
+ int bid = blockIdx.x;
351
+ int tid = threadIdx.x;
352
+ curandStatePhilox4_32_10_t state;
353
+ curand_init(seed.first, idx, seed.second, &state);
354
+
355
+ int group_index = bid * token_size + threadIdx.x;
356
+ int reg_count = 0;
357
+ int total_count = token_size * token_num;
358
+ if (group_index < total_count) {
359
+ // float min = 10000.0;
360
+ float max = -10000.0;
361
+
362
+ while (tid < token_size) {
363
+ data[reg_count] = vals_cast[group_index];
364
+
365
+ if (abs(data[reg_count].x) > max) max = abs(data[reg_count].x);
366
+ if (abs(data[reg_count].y) > max) max = abs(data[reg_count].y);
367
+ if (abs(data[reg_count].z) > max) max = abs(data[reg_count].z);
368
+ if (abs(data[reg_count].w) > max) max = abs(data[reg_count].w);
369
+
370
+ group_index += blockDim.x;
371
+ tid += blockDim.x;
372
+ reg_count++;
373
+ }
374
+
375
+ #pragma unroll
376
+ for (int i = 1; i < WARP_SIZE; i <<= 1) {
377
+ auto temp = g.shfl_xor(max, i);
378
+ if (max < temp) max = temp;
379
+ }
380
+ __shared__ float partialMax[WARP_SIZE];
381
+
382
+ if (lane == 0) partialMax[gid] = max;
383
+
384
+ b.sync();
385
+
386
+ if (lane < warp_num) max = partialMax[lane];
387
+
388
+ #pragma unroll
389
+ for (int i = 1; i < warp_num; i <<= 1) {
390
+ auto temp = g.shfl_down(max, i);
391
+ if (max < temp) max = temp;
392
+ }
393
+
394
+ max = g.shfl(max, 0);
395
+
396
+ float q_scale_val = (float)(1 << num_bits) / (max * 2 + 1e-5);
397
+ float high_q = (float)((1 << (num_bits - 1)) - 1);
398
+ float low_q = (float)(-((1 << (num_bits - 1))));
399
+
400
+ int offset = (bid)*token_size;
401
+ for (int i = 0; i < reg_count; i++) {
402
+ group_index = i * blockDim.x + threadIdx.x;
403
+ if (group_index < token_size) {
404
+ float4 q_data = data[i];
405
+
406
+ float4 q_data_int;
407
+ q_data_int.x = (float)((int)(q_data.x * q_scale_val));
408
+ q_data_int.y = (float)((int)(q_data.y * q_scale_val));
409
+ q_data_int.w = (float)((int)(q_data.w * q_scale_val));
410
+ q_data_int.z = (float)((int)(q_data.z * q_scale_val));
411
+
412
+ // Stochastic rounding
413
+ float4 rand = curand_uniform4(&state);
414
+
415
+ float q_error[4];
416
+ q_error[0] = abs(q_data.x - (q_data_int.x / q_scale_val)) * q_scale_val;
417
+ q_error[1] = abs(q_data.y - (q_data_int.y / q_scale_val)) * q_scale_val;
418
+ q_error[2] = abs(q_data.w - (q_data_int.w / q_scale_val)) * q_scale_val;
419
+ q_error[3] = abs(q_data.z - (q_data_int.z / q_scale_val)) * q_scale_val;
420
+
421
+ q_data_int.x =
422
+ (rand.x < q_error[0] && q_data_int.x > low_q && q_data_int.x < high_q)
423
+ ? (q_data_int.x + (q_data.x > 0 ? 1 : -1))
424
+ : q_data_int.x;
425
+ q_data_int.y =
426
+ (rand.y < q_error[1] && q_data_int.y > low_q && q_data_int.y < high_q)
427
+ ? (q_data_int.y + (q_data.y > 0 ? 1 : -1))
428
+ : q_data_int.y;
429
+ q_data_int.w =
430
+ (rand.w < q_error[2] && q_data_int.w > low_q && q_data_int.w < high_q)
431
+ ? (q_data_int.w + (q_data.w > 0 ? 1 : -1))
432
+ : q_data_int.w;
433
+ q_data_int.z =
434
+ (rand.z < q_error[3] && q_data_int.z > low_q && q_data_int.z < high_q)
435
+ ? (q_data_int.z + (q_data.z > 0 ? 1 : -1))
436
+ : q_data_int.z;
437
+
438
+ q_data_int.x /= q_scale_val;
439
+ q_data_int.y /= q_scale_val;
440
+ q_data_int.w /= q_scale_val;
441
+ q_data_int.z /= q_scale_val;
442
+
443
+ vals_cast[group_index + offset] = q_data_int;
444
+ }
445
+ }
446
+ }
447
+ }
448
+
449
+ template <typename T>
450
+ void launch_sr_fake_quantize_kernel(T* vals,
451
+ int total_count,
452
+ int group_num,
453
+ int num_bits,
454
+ cudaStream_t stream)
455
+ {
456
+ dim3 block_dim(1024);
457
+ dim3 grid_dim(group_num);
458
+
459
+ uint64_t inc = total_count / grid_dim.x / block_dim.x;
460
+ std::pair<uint64_t, uint64_t> seed = TrainingContext::Instance().IncrementOffset(inc);
461
+
462
+ sr_fake_quantize_kernel<<<grid_dim, block_dim, 0, stream>>>(
463
+ vals, (total_count / group_num) / 4, group_num, num_bits, seed);
464
+ }
465
+ template void launch_sr_fake_quantize_kernel(float* vals,
466
+ int total_count,
467
+ int group_num,
468
+ int num_bits,
469
+ cudaStream_t stream);
470
+ template void launch_sr_fake_quantize_kernel(__half* vals,
471
+ int total_count,
472
+ int group_num,
473
+ int num_bits,
474
+ cudaStream_t stream);
475
+
476
+ __global__ void fake_quantize_kernel_asym(__half* vals, int group_size, int num_bits)
477
+ {
478
+ #if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_AMD__)
479
+
480
+ cg::thread_block b = cg::this_thread_block();
481
+ cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
482
+
483
+ int gid = threadIdx.x >> 5;
484
+ int lane = threadIdx.x & 0x1f;
485
+ int warp_num = blockDim.x >> 5;
486
+ int id = threadIdx.x;
487
+
488
+ float2* vals_cast = reinterpret_cast<float2*>(vals);
489
+
490
+ float2 data[MAX_REG];
491
+
492
+ int group_id = blockIdx.x;
493
+
494
+ {
495
+ int group_index = id;
496
+ int reg_count = 0;
497
+ int offset = group_id * group_size;
498
+ float max = -10000.0;
499
+ float min = 10000.0;
500
+
501
+ while (group_index < group_size && reg_count < MAX_REG) {
502
+ data[reg_count] = vals_cast[offset + group_index];
503
+ __half* data_h = reinterpret_cast<__half*>(&data[reg_count]);
504
+
505
+ if (((float)data_h[0]) > max) max = (float)data_h[0];
506
+ if (((float)data_h[1]) > max) max = (float)data_h[1];
507
+ if (((float)data_h[2]) > max) max = (float)data_h[2];
508
+ if (((float)data_h[3]) > max) max = (float)data_h[3];
509
+
510
+ if (((float)data_h[0]) < min) min = (float)data_h[0];
511
+ if (((float)data_h[1]) < min) min = (float)data_h[1];
512
+ if (((float)data_h[2]) < min) min = (float)data_h[2];
513
+ if (((float)data_h[3]) < min) min = (float)data_h[3];
514
+
515
+ group_index += blockDim.x;
516
+ reg_count++;
517
+ }
518
+
519
+ #pragma unroll
520
+ for (int i = 1; i < WARP_SIZE; i <<= 1) {
521
+ auto temp = g.shfl_xor(max, i);
522
+ if (max < temp) max = temp;
523
+ }
524
+
525
+ #pragma unroll
526
+ for (int i = 1; i < WARP_SIZE; i <<= 1) {
527
+ auto temp = g.shfl_xor(min, i);
528
+ if (min > temp) min = temp;
529
+ }
530
+
531
+ __shared__ float partialMax[WARP_SIZE];
532
+ __shared__ float partialMin[WARP_SIZE];
533
+
534
+ if (lane == 0) partialMax[gid] = max;
535
+ if (lane == 0) partialMin[gid] = min;
536
+
537
+ b.sync();
538
+
539
+ if (lane < warp_num) max = partialMax[lane];
540
+ if (lane < warp_num) min = partialMin[lane];
541
+
542
+ #pragma unroll
543
+ for (int i = 1; i < warp_num; i <<= 1) {
544
+ auto temp = g.shfl_down(max, i);
545
+ if (max < temp) max = temp;
546
+ }
547
+ #pragma unroll
548
+ for (int i = 1; i < warp_num; i <<= 1) {
549
+ auto temp = g.shfl_down(min, i);
550
+ if (min > temp) min = temp;
551
+ }
552
+
553
+ max = g.shfl(max, 0);
554
+ min = g.shfl(min, 0);
555
+
556
+ float q_scale = ((max - min) + 1e-5) / (float)(1 << num_bits);
557
+ float q_scale_inv = 1 / q_scale;
558
+
559
+ for (int i = 0; i < reg_count; i++) {
560
+ group_index = i * blockDim.x + id;
561
+ if (group_index < group_size) {
562
+ __half2* data_h = reinterpret_cast<__half2*>(&data[i]);
563
+ float2 q_data[2];
564
+ q_data[0] = __half22float2(data_h[0]);
565
+ q_data[1] = __half22float2(data_h[1]);
566
+
567
+ float2 q_data_int[2];
568
+
569
+ q_data_int[0].x = roundf((q_data[0].x - min) * q_scale_inv);
570
+ q_data_int[0].y = roundf((q_data[0].y - min) * q_scale_inv);
571
+ q_data_int[1].x = roundf((q_data[1].x - min) * q_scale_inv);
572
+ q_data_int[1].y = roundf((q_data[1].y - min) * q_scale_inv);
573
+
574
+ q_data_int[0].x = q_data_int[0].x * q_scale + min;
575
+ q_data_int[0].y = q_data_int[0].y * q_scale + min;
576
+ q_data_int[1].x = q_data_int[1].x * q_scale + min;
577
+ q_data_int[1].y = q_data_int[1].y * q_scale + min;
578
+
579
+ data_h[0] = __float22half2_rn(q_data_int[0]);
580
+ data_h[1] = __float22half2_rn(q_data_int[1]);
581
+
582
+ vals_cast[offset + group_index] = data[i];
583
+ }
584
+ }
585
+ }
586
+ #endif
587
+ }
588
+
589
+ __global__ void fake_quantize_kernel_asym(float* vals, int group_size, int num_bits)
590
+ {
591
+ cg::thread_block b = cg::this_thread_block();
592
+ cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
593
+
594
+ int gid = threadIdx.x >> 5;
595
+ int lane = threadIdx.x & 0x1f;
596
+ int warp_num = blockDim.x >> 5;
597
+ int id = threadIdx.x;
598
+
599
+ float4* vals_cast = reinterpret_cast<float4*>(vals);
600
+
601
+ float4 data[MAX_REG];
602
+
603
+ int bid = blockIdx.x;
604
+
605
+ int group_index = bid * group_size + id;
606
+ int reg_count = 0;
607
+
608
+ float max = -10000.0;
609
+ float min = 10000.0;
610
+
611
+ while (id < group_size && reg_count < MAX_REG) {
612
+ float4 data_reg = vals_cast[group_index];
613
+ data[reg_count] = data_reg;
614
+
615
+ if (data_reg.x > max) max = data_reg.x;
616
+ if (data_reg.y > max) max = data_reg.y;
617
+ if (data_reg.w > max) max = data_reg.w;
618
+ if (data_reg.z > max) max = data_reg.z;
619
+
620
+ if (data_reg.x < min) min = data_reg.x;
621
+ if (data_reg.y < min) min = data_reg.y;
622
+ if (data_reg.w < min) min = data_reg.w;
623
+ if (data_reg.z < min) min = data_reg.z;
624
+
625
+ group_index += blockDim.x;
626
+ id += blockDim.x;
627
+ reg_count++;
628
+ }
629
+ id = threadIdx.x;
630
+
631
+ #pragma unroll
632
+ for (int i = 1; i < WARP_SIZE; i <<= 1) {
633
+ auto temp = g.shfl_xor(max, i);
634
+ if (max < temp) max = temp;
635
+ }
636
+
637
+ #pragma unroll
638
+ for (int i = 1; i < WARP_SIZE; i <<= 1) {
639
+ auto temp = g.shfl_xor(min, i);
640
+ if (min > temp) min = temp;
641
+ }
642
+
643
+ __shared__ float partialMax[WARP_SIZE];
644
+ __shared__ float partialMin[WARP_SIZE];
645
+
646
+ if (lane == 0) partialMax[gid] = max;
647
+ if (lane == 0) partialMin[gid] = min;
648
+
649
+ b.sync();
650
+
651
+ if (lane < warp_num) max = partialMax[lane];
652
+ if (lane < warp_num) min = partialMin[lane];
653
+
654
+ #pragma unroll
655
+ for (int i = 1; i < warp_num; i <<= 1) {
656
+ auto temp = g.shfl_down(max, i);
657
+ if (max < temp) max = temp;
658
+ }
659
+ #pragma unroll
660
+ for (int i = 1; i < warp_num; i <<= 1) {
661
+ auto temp = g.shfl_down(min, i);
662
+ if (min > temp) min = temp;
663
+ }
664
+
665
+ max = g.shfl(max, 0);
666
+ min = g.shfl(min, 0);
667
+
668
+ float q_scale = ((max - min) + 1e-5) / (float)(1 << num_bits);
669
+ float q_scale_inv = 1 / q_scale;
670
+ for (int i = 0; i < reg_count; i++) {
671
+ group_index = i * blockDim.x + id;
672
+ if (group_index < group_size) {
673
+ float4 q_data;
674
+ q_data = data[i];
675
+
676
+ float4 q_data_int;
677
+ q_data_int.x = roundf((q_data.x - min) * q_scale_inv);
678
+ q_data_int.y = roundf((q_data.y - min) * q_scale_inv);
679
+ q_data_int.w = roundf((q_data.w - min) * q_scale_inv);
680
+ q_data_int.z = roundf((q_data.z - min) * q_scale_inv);
681
+
682
+ q_data.x = q_data_int.x * q_scale + min;
683
+ q_data.y = q_data_int.y * q_scale + min;
684
+ q_data.w = q_data_int.w * q_scale + min;
685
+ q_data.z = q_data_int.z * q_scale + min;
686
+
687
+ vals_cast[group_index + bid * group_size] = q_data;
688
+ }
689
+ }
690
+ }
691
+
692
+ template <typename T>
693
+ void launch_fake_quantize_kernel_asym(T* vals,
694
+ int total_count,
695
+ int group_num,
696
+ int num_bits,
697
+ cudaStream_t stream)
698
+ {
699
+ dim3 grid_dim(group_num);
700
+ dim3 block_dim(1024);
701
+
702
+ fake_quantize_kernel_asym<<<grid_dim, block_dim, 0, stream>>>(
703
+ vals, (total_count / group_num) / 4, num_bits);
704
+ }
705
+
706
+ template void launch_fake_quantize_kernel_asym(float* vals,
707
+ int total_count,
708
+ int group_num,
709
+ int num_bits,
710
+ cudaStream_t stream);
711
+ template void launch_fake_quantize_kernel_asym(__half* vals,
712
+ int total_count,
713
+ int group_num,
714
+ int num_bits,
715
+ cudaStream_t stream);
716
+
717
+ __global__ void sr_fake_quantize_kernel_asym(__half* vals,
718
+ int token_size,
719
+ int token_num,
720
+ int num_bits,
721
+ std::pair<uint64_t, uint64_t> seed)
722
+ {
723
+ #if __CUDA_ARCH__ >= 700 || defined(__HIP_PLATFORM_AMD__)
724
+
725
+ cg::thread_block b = cg::this_thread_block();
726
+ cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
727
+
728
+ int gid = threadIdx.x >> 5;
729
+ int lane = threadIdx.x & 0x1f;
730
+ int warp_num = blockDim.x >> 5;
731
+
732
+ int idx = blockIdx.x * blockDim.x + threadIdx.x;
733
+
734
+ float2* vals_cast = reinterpret_cast<float2*>(vals);
735
+
736
+ __half2 data_low[128];
737
+ __half2 data_high[128];
738
+
739
+ int bid = blockIdx.x;
740
+
741
+ curandStatePhilox4_32_10_t state;
742
+ curand_init(seed.first, idx, seed.second, &state);
743
+ unsigned int tid = threadIdx.x;
744
+ int reg_count = 0;
745
+ int offset = bid * token_size;
746
+ int group_index = bid * token_size + tid;
747
+
748
+ int total_count = token_size * token_num;
749
+ if (group_index < total_count) {
750
+ float min = 10000.0;
751
+ float max = -10000.0;
752
+ while (tid < token_size) {
753
+ float2 data = vals_cast[offset + tid];
754
+ __half2* data_h = reinterpret_cast<__half2*>(&data);
755
+ data_low[reg_count] = data_h[0];
756
+ data_high[reg_count] = data_h[1];
757
+
758
+ float2 data_f[2];
759
+ data_f[0] = __half22float2(data_h[0]);
760
+ data_f[1] = __half22float2(data_h[1]);
761
+
762
+ if (((float)data_f[0].x) > max) max = (float)data_f[0].x;
763
+ if (((float)data_f[0].y) > max) max = (float)data_f[0].y;
764
+ if (((float)data_f[1].x) > max) max = (float)data_f[1].x;
765
+ if (((float)data_f[1].y) > max) max = (float)data_f[1].y;
766
+
767
+ if (((float)data_f[0].x) < min) min = (float)data_f[0].x;
768
+ if (((float)data_f[0].y) < min) min = (float)data_f[0].y;
769
+ if (((float)data_f[1].x) < min) min = (float)data_f[1].x;
770
+ if (((float)data_f[1].y) < min) min = (float)data_f[1].y;
771
+
772
+ tid += blockDim.x;
773
+ reg_count++;
774
+ }
775
+
776
+ #pragma unroll
777
+ for (int i = 1; i < WARP_SIZE; i <<= 1) {
778
+ auto temp = g.shfl_xor(max, i);
779
+ if (max < temp) max = temp;
780
+ }
781
+
782
+ #pragma unroll
783
+ for (int i = 1; i < WARP_SIZE; i <<= 1) {
784
+ auto temp = g.shfl_xor(min, i);
785
+ if (min > temp) min = temp;
786
+ }
787
+
788
+ __shared__ float partialMax[WARP_SIZE];
789
+ __shared__ float partialMin[WARP_SIZE];
790
+
791
+ if (lane == 0) partialMax[gid] = max;
792
+ if (lane == 0) partialMin[gid] = min;
793
+
794
+ b.sync();
795
+
796
+ if (lane < warp_num) max = partialMax[lane];
797
+ if (lane < warp_num) min = partialMin[lane];
798
+
799
+ #pragma unroll
800
+ for (int i = 1; i < warp_num; i <<= 1) {
801
+ auto temp = g.shfl_down(max, i);
802
+ if (max < temp) max = temp;
803
+ }
804
+ #pragma unroll
805
+ for (int i = 1; i < warp_num; i <<= 1) {
806
+ auto temp = g.shfl_down(min, i);
807
+ if (min > temp) min = temp;
808
+ }
809
+
810
+ max = g.shfl(max, 0);
811
+ min = g.shfl(min, 0);
812
+
813
+ float q_scale_val = ((max - min) + 1e-5) / (float)(1 << num_bits);
814
+ float q_scale_val_inv = 1 / q_scale_val;
815
+ float high_q = (float)((1 << num_bits) - 1);
816
+
817
+ for (int i = 0; i < reg_count; i++) {
818
+ int token_index = i * blockDim.x + threadIdx.x;
819
+ if (token_index < token_size) {
820
+ float2 data_f[2];
821
+ data_f[0] = __half22float2(data_low[i]);
822
+ data_f[1] = __half22float2(data_high[i]);
823
+
824
+ float2 q_data_int[2];
825
+ q_data_int[0].x = (float)((unsigned int)((data_f[0].x - min) * q_scale_val_inv));
826
+ q_data_int[0].y = (float)((unsigned int)((data_f[0].y - min) * q_scale_val_inv));
827
+ q_data_int[1].x = (float)((unsigned int)((data_f[1].x - min) * q_scale_val_inv));
828
+ q_data_int[1].y = (float)((unsigned int)((data_f[1].y - min) * q_scale_val_inv));
829
+
830
+ // Stochastic rounding
831
+ float4 rand = curand_uniform4(&state);
832
+
833
+ float q_error[4];
834
+ q_error[0] =
835
+ abs(data_f[0].x - ((q_data_int[0].x * q_scale_val) + min)) * q_scale_val_inv;
836
+ q_error[1] =
837
+ abs(data_f[0].y - ((q_data_int[0].y * q_scale_val) + min)) * q_scale_val_inv;
838
+ q_error[2] =
839
+ abs(data_f[1].x - ((q_data_int[1].x * q_scale_val) + min)) * q_scale_val_inv;
840
+ q_error[3] =
841
+ abs(data_f[1].y - ((q_data_int[1].y * q_scale_val) + min)) * q_scale_val_inv;
842
+
843
+ q_data_int[0].x = (rand.x < q_error[0] && q_data_int[0].x < high_q)
844
+ ? (q_data_int[0].x + 1)
845
+ : q_data_int[0].x;
846
+ q_data_int[0].y = (rand.y < q_error[1] && q_data_int[0].y < high_q)
847
+ ? (q_data_int[0].y + 1)
848
+ : q_data_int[0].y;
849
+ q_data_int[1].x = (rand.w < q_error[2] && q_data_int[1].x < high_q)
850
+ ? (q_data_int[1].x + 1)
851
+ : q_data_int[1].x;
852
+ q_data_int[1].y = (rand.z < q_error[3] && q_data_int[1].y < high_q)
853
+ ? (q_data_int[1].y + 1)
854
+ : q_data_int[1].y;
855
+
856
+ data_f[0].x = q_data_int[0].x * q_scale_val + min;
857
+ data_f[0].y = q_data_int[0].y * q_scale_val + min;
858
+ data_f[1].x = q_data_int[1].x * q_scale_val + min;
859
+ data_f[1].y = q_data_int[1].y * q_scale_val + min;
860
+
861
+ float2 result;
862
+ __half2* result_h = reinterpret_cast<__half2*>(&result);
863
+ result_h[0] = __float22half2_rn(data_f[0]);
864
+ result_h[1] = __float22half2_rn(data_f[1]);
865
+
866
+ vals_cast[offset + token_index] = result;
867
+ }
868
+ }
869
+ }
870
+ #endif
871
+ }
872
+
873
+ __global__ void sr_fake_quantize_kernel_asym(float* vals,
874
+ int token_size,
875
+ int token_num,
876
+ int num_bits,
877
+ std::pair<uint64_t, uint64_t> seed)
878
+ {
879
+ cg::thread_block b = cg::this_thread_block();
880
+ cg::thread_block_tile<32> g = cg::tiled_partition<32>(b);
881
+
882
+ int gid = threadIdx.x >> 5;
883
+ int lane = threadIdx.x & 0x1f;
884
+ int warp_num = blockDim.x >> 5;
885
+ int id = threadIdx.x;
886
+
887
+ int idx = blockIdx.x * blockDim.x + id;
888
+
889
+ float4* vals_cast = reinterpret_cast<float4*>(vals);
890
+
891
+ float4 data[128];
892
+
893
+ int bid = blockIdx.x;
894
+ int tid = threadIdx.x;
895
+ curandStatePhilox4_32_10_t state;
896
+ curand_init(seed.first, idx, seed.second, &state);
897
+
898
+ int group_index = bid * token_size + threadIdx.x;
899
+ int reg_count = 0;
900
+ int total_count = token_size * token_num;
901
+ if (group_index < total_count) {
902
+ float min = 10000.0;
903
+ float max = -10000.0;
904
+
905
+ while (tid < token_size) {
906
+ float4 data_reg = vals_cast[group_index];
907
+ data[reg_count] = data_reg;
908
+ if (data_reg.x > max) max = data_reg.x;
909
+ if (data_reg.y > max) max = data_reg.y;
910
+ if (data_reg.w > max) max = data_reg.w;
911
+ if (data_reg.z > max) max = data_reg.z;
912
+
913
+ if (data_reg.x < min) min = data_reg.x;
914
+ if (data_reg.y < min) min = data_reg.y;
915
+ if (data_reg.w < min) min = data_reg.w;
916
+ if (data_reg.z < min) min = data_reg.z;
917
+
918
+ group_index += blockDim.x;
919
+ tid += blockDim.x;
920
+ reg_count++;
921
+ }
922
+
923
+ #pragma unroll
924
+ for (int i = 1; i < WARP_SIZE; i <<= 1) {
925
+ auto temp = g.shfl_xor(max, i);
926
+ if (max < temp) max = temp;
927
+ }
928
+
929
+ #pragma unroll
930
+ for (int i = 1; i < WARP_SIZE; i <<= 1) {
931
+ auto temp = g.shfl_xor(min, i);
932
+ if (min > temp) min = temp;
933
+ }
934
+
935
+ __shared__ float partialMax[WARP_SIZE];
936
+ __shared__ float partialMin[WARP_SIZE];
937
+
938
+ if (lane == 0) partialMax[gid] = max;
939
+ if (lane == 0) partialMin[gid] = min;
940
+
941
+ b.sync();
942
+
943
+ if (lane < warp_num) max = partialMax[lane];
944
+ if (lane < warp_num) min = partialMin[lane];
945
+
946
+ #pragma unroll
947
+ for (int i = 1; i < warp_num; i <<= 1) {
948
+ auto temp = g.shfl_down(max, i);
949
+ if (max < temp) max = temp;
950
+ }
951
+ #pragma unroll
952
+ for (int i = 1; i < warp_num; i <<= 1) {
953
+ auto temp = g.shfl_down(min, i);
954
+ if (min > temp) min = temp;
955
+ }
956
+
957
+ max = g.shfl(max, 0);
958
+ min = g.shfl(min, 0);
959
+
960
+ float q_scale_val = ((max - min) + 1e-5) / (float)(1 << num_bits);
961
+ float high_q = (float)((1 << num_bits) - 1);
962
+
963
+ int offset = (bid)*token_size;
964
+ for (int i = 0; i < reg_count; i++) {
965
+ group_index = i * blockDim.x + threadIdx.x;
966
+ if (group_index < token_size) {
967
+ float4 q_data = data[i];
968
+
969
+ float4 q_data_int;
970
+ q_data_int.x = (float)((int)((q_data.x - min) / q_scale_val));
971
+ q_data_int.y = (float)((int)((q_data.y - min) / q_scale_val));
972
+ q_data_int.w = (float)((int)((q_data.w - min) / q_scale_val));
973
+ q_data_int.z = (float)((int)((q_data.z - min) / q_scale_val));
974
+
975
+ // Stochastic rounding
976
+ float4 rand = curand_uniform4(&state);
977
+
978
+ float q_error[4];
979
+ q_error[0] = abs(q_data.x - ((q_data_int.x * q_scale_val) + min)) / q_scale_val;
980
+ q_error[1] = abs(q_data.y - ((q_data_int.y * q_scale_val) + min)) / q_scale_val;
981
+ q_error[2] = abs(q_data.w - ((q_data_int.w * q_scale_val) + min)) / q_scale_val;
982
+ q_error[3] = abs(q_data.z - ((q_data_int.z * q_scale_val) + min)) / q_scale_val;
983
+
984
+ q_data_int.x = (rand.x < q_error[0] && q_data_int.x < high_q) ? (q_data_int.x + 1)
985
+ : q_data_int.x;
986
+ q_data_int.y = (rand.y < q_error[1] && q_data_int.y < high_q) ? (q_data_int.y + 1)
987
+ : q_data_int.y;
988
+ q_data_int.w = (rand.w < q_error[2] && q_data_int.w < high_q) ? (q_data_int.w + 1)
989
+ : q_data_int.w;
990
+ q_data_int.z = (rand.z < q_error[3] && q_data_int.z < high_q) ? (q_data_int.z + 1)
991
+ : q_data_int.z;
992
+
993
+ q_data_int.x = q_data_int.x * q_scale_val + min;
994
+ q_data_int.y = q_data_int.y * q_scale_val + min;
995
+ q_data_int.w = q_data_int.w * q_scale_val + min;
996
+ q_data_int.z = q_data_int.z * q_scale_val + min;
997
+
998
+ vals_cast[group_index + offset] = q_data_int;
999
+ }
1000
+ }
1001
+ }
1002
+ }
1003
+ template <typename T>
1004
+ void launch_sr_fake_quantize_kernel_asym(T* vals,
1005
+ int total_count,
1006
+ int group_num,
1007
+ int num_bits,
1008
+ cudaStream_t stream)
1009
+ {
1010
+ dim3 block_dim(1024);
1011
+ dim3 grid_dim(group_num);
1012
+
1013
+ uint64_t inc = total_count / grid_dim.x / block_dim.x;
1014
+ std::pair<uint64_t, uint64_t> seed = TrainingContext::Instance().IncrementOffset(inc);
1015
+
1016
+ sr_fake_quantize_kernel<<<grid_dim, block_dim, 0, stream>>>(
1017
+ vals, (total_count / group_num) / 4, group_num, num_bits, seed);
1018
+ }
1019
+ template void launch_sr_fake_quantize_kernel_asym(float* vals,
1020
+ int total_count,
1021
+ int group_num,
1022
+ int num_bits,
1023
+ cudaStream_t stream);
1024
+ template void launch_sr_fake_quantize_kernel_asym(__half* vals,
1025
+ int total_count,
1026
+ int group_num,
1027
+ int num_bits,
1028
+ cudaStream_t stream);
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/quantization/pt_binding.cpp ADDED
@@ -0,0 +1,298 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <ATen/cuda/CUDAContext.h>
7
+ #include <torch/extension.h>
8
+ #include <cassert>
9
+ #include <vector>
10
+ #include "quantization.h"
11
+
12
+ template <typename T>
13
+ at::Tensor ds_quantize(at::Tensor& vals, int groups, int bits)
14
+ {
15
+ auto t_size = vals.sizes();
16
+ int size = 1;
17
+ for (auto dim : t_size) size *= dim;
18
+
19
+ if ((((size / groups) - 1) / 4096 + 1) <= 256) {
20
+ launch_fake_quantize_kernel(
21
+ (T*)vals.data_ptr(), size, groups, bits, at::cuda::getCurrentCUDAStream());
22
+ }
23
+ return vals;
24
+ }
25
+
26
+ template <typename T>
27
+ at::Tensor ds_sr_quantize(at::Tensor& vals, int groups, int bits)
28
+ {
29
+ auto t_size = vals.sizes();
30
+ int size = 1;
31
+ for (auto dim : t_size) size *= dim;
32
+
33
+ if (((size / groups) / 4 / 1024) <= 256) {
34
+ launch_sr_fake_quantize_kernel(
35
+ (T*)vals.data_ptr(), size, groups, bits, at::cuda::getCurrentCUDAStream());
36
+ }
37
+ return vals;
38
+ }
39
+
40
+ template <typename T>
41
+ at::Tensor ds_quantize_asym(at::Tensor& vals, int groups, int bits)
42
+ {
43
+ auto t_size = vals.sizes();
44
+ int size = 1;
45
+ for (auto dim : t_size) size *= dim;
46
+
47
+ if ((((size / groups) - 1) / 4096 + 1) <= 256) {
48
+ launch_fake_quantize_kernel_asym(
49
+ (T*)vals.data_ptr(), size, groups, bits, at::cuda::getCurrentCUDAStream());
50
+ }
51
+ return vals;
52
+ }
53
+
54
+ template <typename T>
55
+ at::Tensor ds_sr_quantize_asym(at::Tensor& vals, int groups, int bits)
56
+ {
57
+ auto t_size = vals.sizes();
58
+ int size = 1;
59
+ for (auto dim : t_size) size *= dim;
60
+
61
+ if (((size / groups) / 4 / 1024) <= 256) {
62
+ launch_sr_fake_quantize_kernel_asym(
63
+ (T*)vals.data_ptr(), size, groups, bits, at::cuda::getCurrentCUDAStream());
64
+ }
65
+ return vals;
66
+ }
67
+
68
+ std::vector<at::Tensor> quantize_kernel(at::Tensor& input_vals,
69
+ int groups,
70
+ int numBits,
71
+ quantize::Type quantType)
72
+ {
73
+ auto dtype = at::kFloat;
74
+ auto params_options = at::TensorOptions()
75
+ .dtype(dtype)
76
+ .layout(at::kStrided)
77
+ .device(at::kCUDA)
78
+ .requires_grad(false);
79
+ const int param_elems = (quantize::requires_offset(quantType)) ? 2 : 1;
80
+ auto params = torch::empty({groups, param_elems}, params_options);
81
+
82
+ auto output_options = at::TensorOptions()
83
+ .dtype(at::kChar)
84
+ .layout(at::kStrided)
85
+ .device(at::kCUDA)
86
+ .requires_grad(false);
87
+
88
+ auto output_sizes = input_vals.sizes().vec();
89
+ output_sizes[output_sizes.size() - 1] /= numBits == 8 ? 1 : 2;
90
+ auto output = torch::empty(output_sizes, output_options);
91
+
92
+ const int elems_per_group = at::numel(input_vals) / groups;
93
+
94
+ launch_quant((int8_t*)output.data_ptr(),
95
+ (float*)params.data_ptr(),
96
+ (__half*)input_vals.data_ptr(),
97
+ groups,
98
+ elems_per_group,
99
+ numBits,
100
+ quantType,
101
+ at::cuda::getCurrentCUDAStream());
102
+
103
+ return {output, params};
104
+ }
105
+
106
+ template <typename T>
107
+ at::Tensor dequantize(at::Tensor& quantized_data,
108
+ at::Tensor& params,
109
+ int groups,
110
+ int num_bits,
111
+ quantize::Type quant_type)
112
+ {
113
+ auto dtype = (std::is_same<T, float>::value) ? torch::kFloat32 : torch::kFloat16;
114
+ auto output_options = at::TensorOptions()
115
+ .dtype(dtype)
116
+ .layout(at::kStrided)
117
+ .device(at::kCUDA)
118
+ .requires_grad(false);
119
+
120
+ auto output_sizes = quantized_data.sizes().vec();
121
+ output_sizes[output_sizes.size() - 1] *= num_bits == 8 ? 1 : 2;
122
+ auto output = torch::empty(output_sizes, output_options);
123
+
124
+ const int total_elems = at::numel(output);
125
+ const int elems_per_group = total_elems / groups;
126
+
127
+ launch_dequantize_kernel((T*)output.data_ptr(),
128
+ (const int8_t*)quantized_data.data_ptr(),
129
+ (const float*)params.data_ptr(),
130
+ quant_type,
131
+ num_bits,
132
+ elems_per_group,
133
+ total_elems,
134
+ at::cuda::getCurrentCUDAStream());
135
+
136
+ return output;
137
+ }
138
+
139
+ at::Tensor dequantize_int4_to_half_experimental(at::Tensor& data_in,
140
+ at::Tensor& scale_buffer,
141
+ at::Tensor& min_val_buffer,
142
+ int num_group,
143
+ int group_size)
144
+ {
145
+ auto output_options = at::TensorOptions().dtype(at::kHalf).device(at::kCUDA);
146
+ auto output = torch::empty({num_group, group_size}, output_options);
147
+
148
+ launch_dequantize_int4_to_half_experimental((uint8_t*)data_in.data_ptr(),
149
+ (half*)output.data_ptr(),
150
+ (half*)scale_buffer.data_ptr(),
151
+ (half*)min_val_buffer.data_ptr(),
152
+ num_group,
153
+ group_size,
154
+ at::cuda::getCurrentCUDAStream());
155
+
156
+ return output;
157
+ }
158
+
159
+ at::Tensor dequantize_int8_to_half_experimental(at::Tensor& data_in,
160
+ at::Tensor& scale_buffer,
161
+ at::Tensor& min_val_buffer,
162
+ int num_group,
163
+ int group_size)
164
+ {
165
+ auto output_options = at::TensorOptions().dtype(at::kHalf).device(at::kCUDA);
166
+ auto output = torch::empty({num_group, group_size}, output_options);
167
+
168
+ launch_dequantize_int8_to_half_experimental((uint8_t*)data_in.data_ptr(),
169
+ (half*)output.data_ptr(),
170
+ (half*)scale_buffer.data_ptr(),
171
+ (half*)min_val_buffer.data_ptr(),
172
+ num_group,
173
+ group_size,
174
+ at::cuda::getCurrentCUDAStream());
175
+
176
+ return output;
177
+ }
178
+
179
+ std::vector<at::Tensor> ds_swizzle_quant(at::Tensor& input_vals,
180
+ int groups,
181
+ int num_bits,
182
+ quantize::Type quant_type,
183
+ int pipeline_size,
184
+ int nodes,
185
+ int devices_per_node)
186
+ {
187
+ auto scales_options = at::TensorOptions()
188
+ .dtype(at::kFloat)
189
+ .layout(at::kStrided)
190
+ .device(at::kCUDA)
191
+ .requires_grad(false);
192
+ const int scales_elems = (quantize::requires_offset(quant_type)) ? 2 : 1;
193
+ auto scales = torch::empty({groups, scales_elems}, scales_options);
194
+
195
+ auto output_options = at::TensorOptions()
196
+ .dtype(at::kChar)
197
+ .layout(at::kStrided)
198
+ .device(at::kCUDA)
199
+ .requires_grad(false);
200
+
201
+ const int quantization_scalar = 8 / num_bits;
202
+ const int compressed_vals = at::numel(input_vals) / quantization_scalar;
203
+
204
+ auto output = torch::empty({compressed_vals}, output_options);
205
+ const int elems_per_group = at::numel(input_vals) / groups;
206
+
207
+ launch_swizzled_quant((int8_t*)output.data_ptr(),
208
+ (float*)scales.data_ptr(),
209
+ (__half*)input_vals.data_ptr(),
210
+ num_bits,
211
+ quant_type,
212
+ groups,
213
+ elems_per_group,
214
+ pipeline_size,
215
+ nodes,
216
+ devices_per_node,
217
+ at::cuda::getCurrentCUDAStream());
218
+
219
+ return {output, scales};
220
+ }
221
+
222
+ std::vector<at::Tensor> quantized_reduction(at::Tensor& input_vals,
223
+ at::Tensor& input_scales,
224
+ int in_groups,
225
+ int out_groups,
226
+ int num_bits,
227
+ quantize::Type quant_type,
228
+ int devices_per_node)
229
+ {
230
+ auto scales_options = at::TensorOptions()
231
+ .dtype(at::kFloat)
232
+ .layout(at::kStrided)
233
+ .device(at::kCUDA)
234
+ .requires_grad(false);
235
+ const int scales_elems = (quantize::requires_offset(quant_type)) ? 2 : 1;
236
+ auto scales = torch::empty({out_groups, scales_elems}, scales_options);
237
+
238
+ auto output_options = at::TensorOptions()
239
+ .dtype(at::kChar)
240
+ .layout(at::kStrided)
241
+ .device(at::kCUDA)
242
+ .requires_grad(false);
243
+
244
+ std::vector<long int> sz(input_vals.sizes().begin(), input_vals.sizes().end());
245
+ sz[sz.size() - 1] = sz.back() / devices_per_node; // num of GPU per nodes
246
+ const int elems_per_in_tensor = at::numel(input_vals) / devices_per_node;
247
+ auto output = torch::empty(sz, output_options);
248
+
249
+ const int elems_per_in_group = elems_per_in_tensor / (in_groups / devices_per_node);
250
+ const int elems_per_out_group = elems_per_in_tensor / out_groups;
251
+
252
+ launch_dequant_reduce((int8_t*)output.data_ptr(),
253
+ (float*)scales.data_ptr(),
254
+ (const int8_t*)input_vals.data_ptr(),
255
+ (const float*)input_scales.data_ptr(),
256
+ devices_per_node,
257
+ num_bits,
258
+ quant_type,
259
+ out_groups,
260
+ elems_per_out_group,
261
+ elems_per_in_tensor,
262
+ in_groups / devices_per_node,
263
+ elems_per_in_group,
264
+ at::cuda::getCurrentCUDAStream());
265
+ return {output, scales};
266
+ }
267
+
268
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
269
+ {
270
+ m.def("ds_quantize_fp32", &ds_quantize<float>, "DeepSpeed Quantize with fp32 (CUDA)");
271
+ m.def("ds_quantize_fp16", &ds_quantize<__half>, "DeepSpeed Quantize with fp16 (CUDA)");
272
+ m.def("ds_sr_quantize_fp32", &ds_sr_quantize<float>, "DeepSpeed Quantize with fp32 (CUDA)");
273
+ m.def("ds_sr_quantize_fp16", &ds_sr_quantize<__half>, "DeepSpeed Quantize with fp16 (CUDA)");
274
+ m.def("ds_quantize_asym_fp32", &ds_quantize_asym<float>, "DeepSpeed Quantize with fp32 (CUDA)");
275
+ m.def(
276
+ "ds_quantize_asym_fp16", &ds_quantize_asym<__half>, "DeepSpeed Quantize with fp16 (CUDA)");
277
+ m.def("ds_sr_quantize_asym_fp32",
278
+ &ds_sr_quantize_asym<float>,
279
+ "DeepSpeed Quantize with fp32 (CUDA)");
280
+ m.def("ds_sr_quantize_asym_fp16",
281
+ &ds_sr_quantize_asym<__half>,
282
+ "DeepSpeed Quantize with fp16 (CUDA)");
283
+ pybind11::enum_<quantize::Type>(m, "QuantizationType")
284
+ .value("Symmetric", quantize::Type::Symmetric)
285
+ .value("Asymmetric", quantize::Type::Asymmetric)
286
+ .export_values();
287
+ m.def("quantize", &quantize_kernel);
288
+ m.def("dequantize", &dequantize<__half>);
289
+ m.def("dequantize_fp32", &dequantize<float>);
290
+ m.def("dequantize_int4_to_half_experimental",
291
+ &dequantize_int4_to_half_experimental,
292
+ "Dequantize int4 to half (experimental)");
293
+ m.def("dequantize_int8_to_half_experimental",
294
+ &dequantize_int8_to_half_experimental,
295
+ "Dequantize int8 to half (experimental)");
296
+ m.def("swizzle_quant", &ds_swizzle_quant);
297
+ m.def("quantized_reduction", &quantized_reduction);
298
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/quantization/quant_reduce.cu ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <cstdio>
7
+ #include "dequantization_utils.h"
8
+ #include "ds_kernel_utils.h"
9
+ #include "memory_access_utils.h"
10
+ #include "quantization_utils.h"
11
+ #include "reduction_utils.h"
12
+
13
+ using rop = reduce::ROpType;
14
+
15
+ /*
16
+ TODO(cmikeh2): Add implementation that better handles larger nodes. It would like make sense
17
+ to leverage some parallel reductions here to improve performance.
18
+ */
19
+
20
+ template <int numBits, int numTensors, int totalChunks, quantize::Type quantType>
21
+ __global__ void __launch_bounds__(1024) dequant_reduce(int8_t* reduced_data,
22
+ float* reduced_scales,
23
+ const int8_t* input_data,
24
+ const float* input_scales,
25
+ int elems_per_out_group,
26
+ int elems_per_in_tensor,
27
+ int groups_per_in_tensor,
28
+ int elems_per_in_group,
29
+ int num_tensors)
30
+ {
31
+ cg::thread_block tb = cg::this_thread_block();
32
+ cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
33
+
34
+ // NOTE(cmikeh2): This probably could be hardcoded to a larger number,
35
+ // but that means even stronger restrictions on the number of elements per group
36
+ // A performance analysis here might be beneficial
37
+ constexpr int mem_granularity = (numBits == 8) ? 8 : 4;
38
+ constexpr int elems_per_load = mem_granularity / sizeof(int8_t); // div by 1
39
+ constexpr int storage_values = 16 / sizeof(__half2);
40
+
41
+ const int block_offset = tb.group_index().x * elems_per_out_group;
42
+ const int elem_offset = tb.thread_index().x * elems_per_load;
43
+ const int base_offset = block_offset + elem_offset;
44
+ const int stride = tb.group_dim().x * elems_per_load;
45
+
46
+ __half2 local_buffer[totalChunks * storage_values];
47
+
48
+ quantize::GroupStats<quantType> stats;
49
+
50
+ #pragma unroll
51
+ for (int i = 0; i < totalChunks; i++) {
52
+ __half2* iteration_buffer = local_buffer + i * storage_values;
53
+
54
+ #pragma unroll
55
+ for (int j = 0; j < storage_values; j++) {
56
+ iteration_buffer[j] = reduce::init<rop::Add, __half2>();
57
+ }
58
+
59
+ const int iter_offset = i * stride + base_offset;
60
+ const int iter_scale_idx = iter_offset / elems_per_in_group;
61
+ bool do_loads = i * stride + elem_offset < elems_per_out_group;
62
+
63
+ if (numTensors > 0) {
64
+ #pragma unroll
65
+ for (int j = 0; j < numTensors; j++) {
66
+ if (do_loads) {
67
+ int8_t load_buffer[elems_per_load];
68
+
69
+ mem_access::load_global<mem_granularity>(
70
+ load_buffer, input_data + j * elems_per_in_tensor + iter_offset);
71
+
72
+ quantize::Params<quantType, numBits> params(
73
+ input_scales + j * groups_per_in_tensor, iter_scale_idx);
74
+
75
+ __half2 dequant_buffer[storage_values];
76
+ dequantize::chunk<numBits, quantType>(dequant_buffer, load_buffer, params);
77
+
78
+ #pragma unroll
79
+ for (int k = 0; k < storage_values; k++) {
80
+ iteration_buffer[k] =
81
+ reduce::element<rop::Add>(iteration_buffer[k], dequant_buffer[k]);
82
+ }
83
+ }
84
+ }
85
+ } else {
86
+ #pragma unroll 4
87
+ for (int j = 0; j < num_tensors; j++) {
88
+ if (do_loads) {
89
+ int8_t load_buffer[elems_per_load];
90
+
91
+ mem_access::load_global<mem_granularity>(
92
+ load_buffer, input_data + j * elems_per_in_tensor + iter_offset);
93
+
94
+ quantize::Params<quantType, numBits> params(
95
+ input_scales + j * groups_per_in_tensor, iter_scale_idx);
96
+
97
+ __half2 dequant_buffer[storage_values];
98
+ dequantize::chunk<numBits, quantType>(dequant_buffer, load_buffer, params);
99
+
100
+ #pragma unroll
101
+ for (int k = 0; k < storage_values; k++) {
102
+ iteration_buffer[k] =
103
+ reduce::element<rop::Add>(iteration_buffer[k], dequant_buffer[k]);
104
+ }
105
+ }
106
+ }
107
+ }
108
+
109
+ #pragma unroll
110
+ for (int j = 0; j < storage_values; j++) { stats.update(iteration_buffer[j]); }
111
+ }
112
+
113
+ auto params = stats.template get_params<numBits, 1024>(tb, warp);
114
+
115
+ if (tb.thread_index().x == 0) { params.store(reduced_scales, tb.group_index().x); }
116
+
117
+ #pragma unroll
118
+ for (int i = 0; i < totalChunks; i++) {
119
+ const int iter_offset = i * stride + base_offset;
120
+ if (i * stride + elem_offset < elems_per_out_group) {
121
+ int8_t local_output[elems_per_load];
122
+ quantize::_chunk<numBits, quantType>(
123
+ local_output, local_buffer + i * storage_values, params);
124
+ mem_access::store_global<mem_granularity>(reduced_data + iter_offset, local_output);
125
+ }
126
+ }
127
+ }
128
+
129
+ template <int Power>
130
+ int32_t pow2_round(int32_t raw_value)
131
+ {
132
+ return (((raw_value - 1) >> Power) + 1) << Power;
133
+ }
134
+
135
+ #define LAUNCH_DEQUANT_REDUCE(num_chunks) \
136
+ dequant_reduce<numBits, numTensors, num_chunks, quantType> \
137
+ <<<grid, block, 0, stream>>>(reduced_data, \
138
+ reduced_scales, \
139
+ input_data, \
140
+ input_scales, \
141
+ elems_per_out_group, \
142
+ elems_per_in_tensor, \
143
+ groups_per_in_tensor, \
144
+ elems_per_in_group, \
145
+ num_tensors);
146
+
147
+ template <int numBits, int numTensors, quantize::Type quantType>
148
+ void launch_dequant_reduce_impl(int8_t* reduced_data,
149
+ float* reduced_scales,
150
+ const int8_t* input_data,
151
+ const float* input_scales,
152
+ int out_groups,
153
+ int elems_per_out_group,
154
+ int elems_per_in_tensor,
155
+ int groups_per_in_tensor,
156
+ int elems_per_in_group,
157
+ int num_tensors,
158
+ cudaStream_t stream)
159
+ {
160
+ // This is a coincidence. This is derived by 8 halves per 16 bytes with 2-way packing for int4
161
+ constexpr int elems_per_thread = numBits;
162
+ const int one_step_threads =
163
+ next_pow2((elems_per_out_group + elems_per_thread - 1) / (elems_per_thread));
164
+ // TODO(cmikeh2): Tune this
165
+ const int threads = (one_step_threads < 1024) ? one_step_threads : 1024;
166
+
167
+ dim3 block(threads);
168
+ dim3 grid(out_groups);
169
+
170
+ const int elems_per_step = threads * elems_per_thread;
171
+ const int unroll_raw = (elems_per_out_group + elems_per_step - 1) / elems_per_step;
172
+
173
+ const int unroll = (unroll_raw >= 4) ? pow2_round<1>(unroll_raw) : unroll_raw;
174
+
175
+ if (unroll == 1) {
176
+ // 0-4096 elems
177
+ LAUNCH_DEQUANT_REDUCE(1);
178
+ } else if (unroll == 2) {
179
+ // 4097-8192 etc...
180
+ LAUNCH_DEQUANT_REDUCE(2);
181
+ } else if (unroll == 3) {
182
+ LAUNCH_DEQUANT_REDUCE(3);
183
+ } else if (unroll == 4) {
184
+ LAUNCH_DEQUANT_REDUCE(4);
185
+ } else if (unroll == 6) {
186
+ LAUNCH_DEQUANT_REDUCE(6);
187
+ } else if (unroll == 8) {
188
+ LAUNCH_DEQUANT_REDUCE(8);
189
+ } else if (unroll == 10) {
190
+ LAUNCH_DEQUANT_REDUCE(10);
191
+ } else if (unroll == 12) {
192
+ // 48k limit
193
+ LAUNCH_DEQUANT_REDUCE(12);
194
+ } else {
195
+ assert(false);
196
+ }
197
+ }
198
+
199
+ #define LAUNCH_DEQUANT_REDUCE_IMPL(NUM_BITS, NUM_GPUS, QUANT_TYPE) \
200
+ launch_dequant_reduce_impl<NUM_BITS, NUM_GPUS, QUANT_TYPE>(reduced_data, \
201
+ reduced_scales, \
202
+ input_data, \
203
+ input_scales, \
204
+ out_groups, \
205
+ elems_per_out_group, \
206
+ elems_per_in_tensor, \
207
+ groups_per_in_tensor, \
208
+ elems_per_in_group, \
209
+ num_gpus, \
210
+ stream);
211
+
212
+ void launch_dequant_reduce(int8_t* reduced_data,
213
+ float* reduced_scales,
214
+ const int8_t* input_data,
215
+ const float* input_scales,
216
+ int num_gpus,
217
+ int num_bits,
218
+ quantize::Type quant_type,
219
+ int out_groups,
220
+ int elems_per_out_group,
221
+ int elems_per_in_tensor,
222
+ int groups_per_in_tensor,
223
+ int elems_per_in_group,
224
+ cudaStream_t stream)
225
+ {
226
+ if (quant_type == quantize::Type::Symmetric) {
227
+ if (num_bits == 4) {
228
+ if (num_gpus == 8) {
229
+ LAUNCH_DEQUANT_REDUCE_IMPL(4, 8, quantize::Type::Symmetric);
230
+ } else if (num_gpus == 16) {
231
+ LAUNCH_DEQUANT_REDUCE_IMPL(4, 16, quantize::Type::Symmetric);
232
+ } else {
233
+ LAUNCH_DEQUANT_REDUCE_IMPL(4, -1, quantize::Type::Symmetric);
234
+ }
235
+ } else if (num_bits == 8) {
236
+ if (num_gpus == 8) {
237
+ LAUNCH_DEQUANT_REDUCE_IMPL(8, 8, quantize::Type::Symmetric);
238
+ } else if (num_gpus == 16) {
239
+ LAUNCH_DEQUANT_REDUCE_IMPL(8, 16, quantize::Type::Symmetric);
240
+ } else {
241
+ LAUNCH_DEQUANT_REDUCE_IMPL(8, -1, quantize::Type::Symmetric);
242
+ }
243
+ }
244
+ } else if (quant_type == quantize::Type::Asymmetric) {
245
+ if (num_bits == 4) {
246
+ if (num_gpus == 8) {
247
+ LAUNCH_DEQUANT_REDUCE_IMPL(4, 8, quantize::Type::Asymmetric);
248
+ } else if (num_gpus == 16) {
249
+ LAUNCH_DEQUANT_REDUCE_IMPL(4, 16, quantize::Type::Asymmetric);
250
+ } else {
251
+ LAUNCH_DEQUANT_REDUCE_IMPL(4, -1, quantize::Type::Asymmetric);
252
+ }
253
+ } else if (num_bits == 8) {
254
+ if (num_gpus == 8) {
255
+ LAUNCH_DEQUANT_REDUCE_IMPL(8, 8, quantize::Type::Asymmetric);
256
+ } else if (num_gpus == 16) {
257
+ LAUNCH_DEQUANT_REDUCE_IMPL(8, 16, quantize::Type::Asymmetric);
258
+ } else {
259
+ LAUNCH_DEQUANT_REDUCE_IMPL(8, -1, quantize::Type::Asymmetric);
260
+ }
261
+ }
262
+ }
263
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/quantization/quantize.cu ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "ds_kernel_utils.h"
7
+ #include "memory_access_utils.h"
8
+ #include "quantization.h"
9
+ #include "quantization_utils.h"
10
+ #include "reduction_utils.h"
11
+
12
+ namespace cg = cooperative_groups;
13
+
14
+ /*
15
+ Pure quantization kernel with no fusion.
16
+ */
17
+ template <int q_bits,
18
+ quantize::Type quant_type,
19
+ int UNROLL,
20
+ int internal_unroll,
21
+ int threads_per_group,
22
+ int max_threads>
23
+ __global__ void cached_quantization(int8_t* __restrict__ output_data,
24
+ float* __restrict__ params,
25
+ const __half* __restrict__ input_data,
26
+ int groups,
27
+ int elems_per_group)
28
+ {
29
+ cg::thread_block tb = cg::this_thread_block();
30
+ cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
31
+
32
+ // Indexing offsets
33
+ const int block_offset =
34
+ (tb.group_index().x * (max_threads / threads_per_group) * elems_per_group) +
35
+ (tb.thread_index().y * elems_per_group);
36
+ const int elem_offset = tb.thread_index().x * quantize::h_per_load;
37
+ const int base_offset = block_offset + elem_offset;
38
+ const int stride = tb.size() * quantize::h_per_load;
39
+
40
+ const __half* input_base = input_data + base_offset; //..
41
+
42
+ __half2 local_buffer[UNROLL * internal_unroll * quantize::h2_per_load];
43
+
44
+ #pragma unroll
45
+ for (int i = 0; i < UNROLL; i++) {
46
+ // Convenience helper, should resolve to register indices and not realize.
47
+ __half2* iteration_buffer = local_buffer + i * internal_unroll * quantize::h2_per_load;
48
+ #pragma unroll
49
+ for (int j = 0; j < internal_unroll; j++) {
50
+ const int iteration = i * internal_unroll + j;
51
+ mem_access::load_global<quantize::granularity>(
52
+ iteration_buffer + j * quantize::h2_per_load,
53
+ input_base + iteration * stride,
54
+ elem_offset + iteration * stride < elems_per_group);
55
+ }
56
+ }
57
+
58
+ quantize::
59
+ local_array<quant_type, q_bits, UNROLL * internal_unroll, threads_per_group, max_threads>(
60
+ local_buffer, params, output_data, elems_per_group, groups);
61
+ }
62
+
63
+ /********* Launcher methods ***********/
64
+ #define LAUNCH_CACHED_QUANT_CALL(q_bits, quant_type) \
65
+ cached_quantization<q_bits, \
66
+ quant_type, \
67
+ unroll_factor, \
68
+ internal_unroll_l, \
69
+ threads_per_group, \
70
+ max_threads> \
71
+ <<<grid, block, 0, stream>>>(output_data, params, input_data, groups, elems_per_group);
72
+
73
+ #define LAUNCH_CACHED_QUANT( \
74
+ q_bits, quant_type, unroll_factor_in, internal_unroll_in, threads_per_group_in) \
75
+ const int unroll_factor = unroll_factor_in; \
76
+ const int internal_unroll_l = internal_unroll_in; \
77
+ const int threads_per_group = threads_per_group_in; \
78
+ if (q_bits == 4) { \
79
+ if (quant_type == quantize::Type::Asymmetric) { \
80
+ LAUNCH_CACHED_QUANT_CALL(4, quantize::Type::Asymmetric) \
81
+ } else { \
82
+ LAUNCH_CACHED_QUANT_CALL(4, quantize::Type::Symmetric) \
83
+ } \
84
+ } else { \
85
+ if (quant_type == quantize::Type::Asymmetric) { \
86
+ LAUNCH_CACHED_QUANT_CALL(8, quantize::Type::Asymmetric) \
87
+ } else { \
88
+ LAUNCH_CACHED_QUANT_CALL(8, quantize::Type::Symmetric) \
89
+ } \
90
+ }
91
+
92
+ void launch_quant(int8_t* output_data,
93
+ float* params,
94
+ const __half* input_data,
95
+ const int groups,
96
+ const int elems_per_group,
97
+ const int num_bits,
98
+ const quantize::Type quant_type,
99
+ cudaStream_t stream)
100
+ {
101
+ constexpr int max_threads = 256;
102
+
103
+ constexpr int internal_unroll = 2;
104
+
105
+ const bool is_subblock_schedule = (elems_per_group <= 128) ? true : false;
106
+ const int h_per_step = is_subblock_schedule ? quantize::h_per_load
107
+ : quantize::h_per_load * internal_unroll;
108
+
109
+ // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of
110
+ // warp-sized blocks rather than stepping up to 64/96 threads
111
+ const int one_step_threads = next_pow2((elems_per_group + h_per_step - 1) / h_per_step);
112
+ const int threads_per_group = (one_step_threads < max_threads) ? one_step_threads : max_threads;
113
+
114
+ const int groups_per_block =
115
+ is_subblock_schedule ? (max_threads + threads_per_group - 1) / threads_per_group : 1;
116
+ const int groups_launch = (groups_per_block + groups - 1) / groups_per_block;
117
+
118
+ dim3 block(threads_per_group, groups_per_block);
119
+ dim3 grid(groups_launch);
120
+
121
+ const int elems_per_step = threads_per_group * h_per_step;
122
+ const int external_unroll = (elems_per_group + elems_per_step - 1) / elems_per_step;
123
+
124
+ if (is_subblock_schedule) {
125
+ // <=128
126
+ if (threads_per_group == 1) {
127
+ LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, 1, 1);
128
+ } else if (threads_per_group == 2) {
129
+ LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, 1, 2);
130
+ } else if (threads_per_group == 4) {
131
+ LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, 1, 4);
132
+ } else if (threads_per_group == 8) {
133
+ LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, 1, 8);
134
+ } else if (threads_per_group == 16) {
135
+ LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, 1, 16);
136
+ }
137
+ } else if (external_unroll == 1) {
138
+ // 129 - 4096 elems
139
+ // (this can launch with 1-7 warps as well)
140
+ LAUNCH_CACHED_QUANT(num_bits, quant_type, 1, internal_unroll, max_threads);
141
+ } else if (external_unroll == 2) {
142
+ // 4097 - 8192 elems
143
+ LAUNCH_CACHED_QUANT(num_bits, quant_type, 2, internal_unroll, max_threads);
144
+ } else if (external_unroll == 3) {
145
+ // 8193 - 12288 elems
146
+ LAUNCH_CACHED_QUANT(num_bits, quant_type, 3, internal_unroll, max_threads);
147
+ } else if (external_unroll == 4) {
148
+ // 12289 - 16384 elems
149
+ LAUNCH_CACHED_QUANT(num_bits, quant_type, 4, internal_unroll, max_threads);
150
+ }
151
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/quantization/quantize_intX.cu ADDED
@@ -0,0 +1,281 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <assert.h>
7
+ #include <cuda_fp16.h>
8
+ #include <cuda_runtime.h>
9
+ #include "memory_access_utils.h"
10
+
11
+ template <typename T, int N>
12
+ struct alignas(sizeof(T) * N) AlignedArray {
13
+ using Element = T;
14
+ static const int kElements = N;
15
+
16
+ __device__ __host__ AlignedArray() {}
17
+
18
+ __device__ __host__ AlignedArray(const T& rhs)
19
+ {
20
+ #pragma unroll
21
+ for (int idx = 0; idx < kElements; ++idx) { this->at(idx) = rhs; }
22
+ }
23
+
24
+ __device__ __host__ T& operator[](int offset)
25
+ {
26
+ return reinterpret_cast<T&>(this->buffer[offset]);
27
+ }
28
+
29
+ __device__ __host__ const T& operator[](int offset) const
30
+ {
31
+ return reinterpret_cast<const T&>(this->buffer[offset]);
32
+ }
33
+
34
+ __device__ __host__ T& at(int offset) { return reinterpret_cast<T&>(this->buffer[offset]); }
35
+
36
+ __device__ __host__ const T& at(int offset) const
37
+ {
38
+ return reinterpret_cast<const T&>(this->buffer[offset]);
39
+ }
40
+
41
+ __device__ __host__ AlignedArray<T, N> operator+(const AlignedArray<T, N>& rhs) const
42
+ {
43
+ AlignedArray<T, N> ret;
44
+
45
+ #pragma unroll
46
+ for (int idx = 0; idx < kElements; ++idx) { ret[idx] = this->at(idx) + rhs.at(idx); }
47
+
48
+ return ret;
49
+ }
50
+
51
+ __device__ __forceinline__ void clear()
52
+ {
53
+ #pragma unroll
54
+ for (int idx = 0; idx < kElements; ++idx) { this->at(idx) = Element(0); }
55
+ }
56
+
57
+ Element buffer[N];
58
+ };
59
+
60
+ template <typename T>
61
+ struct reduce_max {
62
+ __device__ __forceinline__ T operator()(const T& lhs, const T& rhs)
63
+ {
64
+ return lhs > rhs ? lhs : rhs;
65
+ }
66
+ };
67
+
68
+ template <typename T>
69
+ struct reduce_min {
70
+ __device__ __forceinline__ T operator()(const T& lhs, const T& rhs)
71
+ {
72
+ return lhs < rhs ? lhs : rhs;
73
+ }
74
+ };
75
+
76
+ template <typename T, int N>
77
+ struct subtract {
78
+ __device__ __forceinline__ AlignedArray<T, N> operator()(const AlignedArray<T, N>& lhs,
79
+ const T& rhs)
80
+ {
81
+ AlignedArray<T, N> ret;
82
+
83
+ #pragma unroll
84
+ for (int idx = 0; idx < N; ++idx) { ret[idx] = lhs[idx] - rhs; }
85
+
86
+ return ret;
87
+ }
88
+ };
89
+
90
+ template <typename T, int N>
91
+ struct plus {
92
+ __device__ __forceinline__ AlignedArray<T, N> operator()(const AlignedArray<T, N>& lhs,
93
+ const T& rhs)
94
+ {
95
+ AlignedArray<T, N> ret;
96
+
97
+ #pragma unroll
98
+ for (int idx = 0; idx < N; ++idx) { ret[idx] = lhs[idx] + rhs; }
99
+
100
+ return ret;
101
+ }
102
+ };
103
+
104
+ template <typename T, int N>
105
+ struct multiply {
106
+ __device__ __forceinline__ AlignedArray<T, N> operator()(const AlignedArray<T, N>& lhs,
107
+ const T& rhs)
108
+ {
109
+ AlignedArray<T, N> ret;
110
+
111
+ #pragma unroll
112
+ for (int idx = 0; idx < N; ++idx) { ret[idx] = lhs[idx] * rhs; }
113
+
114
+ return ret;
115
+ }
116
+ };
117
+
118
+ template <typename T, int N>
119
+ struct clamp {
120
+ __device__ __forceinline__ AlignedArray<T, N> operator()(const AlignedArray<T, N>& lhs,
121
+ const T& min_val,
122
+ const T& max_val)
123
+ {
124
+ AlignedArray<T, N> ret;
125
+
126
+ #pragma unroll
127
+ for (int idx = 0; idx < N; ++idx) {
128
+ ret[idx] = reduce_max<T>()(reduce_min<T>()(lhs[idx], max_val), min_val);
129
+ }
130
+
131
+ return ret;
132
+ }
133
+ };
134
+
135
+ template <typename T, int N>
136
+ struct round_int;
137
+
138
+ template <int N>
139
+ struct round_int<half, N> {
140
+ __device__ __forceinline__ AlignedArray<half, N> operator()(const AlignedArray<half, N>& lhs)
141
+ {
142
+ AlignedArray<half, N> ret;
143
+
144
+ #pragma unroll
145
+ for (int idx = 0; idx < N; ++idx) { ret[idx] = hrint(lhs[idx]); }
146
+
147
+ return ret;
148
+ }
149
+ };
150
+
151
+ template <typename T, int N>
152
+ struct divide {
153
+ __device__ __forceinline__ AlignedArray<T, N> operator()(const AlignedArray<T, N>& lhs,
154
+ const T& rhs)
155
+ {
156
+ AlignedArray<T, N> ret;
157
+
158
+ #pragma unroll
159
+ for (int idx = 0; idx < N; ++idx) { ret[idx] = lhs[idx] / rhs; }
160
+
161
+ return ret;
162
+ }
163
+ };
164
+
165
+ template <typename T, int N, typename Reducer>
166
+ __device__ __forceinline__ T to_scalar(const AlignedArray<T, N>& data)
167
+ {
168
+ Reducer re;
169
+ T res = data[0];
170
+
171
+ #pragma unroll
172
+ for (int idx = 1; idx < N; ++idx) { res = re(res, data[idx]); }
173
+
174
+ return res;
175
+ }
176
+
177
+ template <int N>
178
+ __device__ __forceinline__ AlignedArray<half, N * 2> int4_to_half(
179
+ const AlignedArray<uint8_t, N>& data)
180
+ {
181
+ AlignedArray<half, N * 2> ret;
182
+
183
+ #pragma unroll
184
+ for (int idx = 0; idx < N * 2; idx += 2) {
185
+ ret[idx] = half(int(data[idx / 2] >> 4));
186
+ ret[idx + 1] = half(int(data[idx / 2] & 0xf));
187
+ }
188
+
189
+ return ret;
190
+ }
191
+
192
+ __global__ void dequantize_int4_to_half(uint8_t* data_in,
193
+ half* data_out,
194
+ half* scale_buffer,
195
+ half* min_val_buffer,
196
+ int num_group,
197
+ int group_size)
198
+ {
199
+ using AccessType = AlignedArray<uint8_t, 4>;
200
+ using AccessTypeOut = AlignedArray<half, 8>;
201
+
202
+ for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < num_group * group_size / 8;
203
+ idx += blockDim.x * gridDim.x) {
204
+ int id_group = idx / (group_size / 8);
205
+ AccessType value = reinterpret_cast<AccessType*>(data_in)[idx];
206
+ half scale = scale_buffer[id_group];
207
+ half min_value = min_val_buffer[id_group];
208
+
209
+ AccessTypeOut output = int4_to_half(value);
210
+ output = divide<half, 8>()(output, scale);
211
+ output = plus<half, 8>()(output, min_value);
212
+
213
+ reinterpret_cast<AccessTypeOut*>(data_out)[idx] = output;
214
+ }
215
+ }
216
+
217
+ void launch_dequantize_int4_to_half_experimental(uint8_t* data_in,
218
+ half* data_out,
219
+ half* scale_buffer,
220
+ half* min_val_buffer,
221
+ int num_group,
222
+ int group_size,
223
+ cudaStream_t stream)
224
+ {
225
+ int num_warp = num_group / 4;
226
+ int num_block = num_warp / 8; // 256 trd / block
227
+
228
+ dequantize_int4_to_half<<<num_block, 256, 0, stream>>>(
229
+ data_in, data_out, scale_buffer, min_val_buffer, num_group, group_size);
230
+ }
231
+
232
+ template <int N>
233
+ __device__ __forceinline__ AlignedArray<half, N> int8_to_half(const AlignedArray<uint8_t, N>& data)
234
+ {
235
+ AlignedArray<half, N> ret;
236
+
237
+ #pragma unroll
238
+ for (int idx = 0; idx < N; idx += 1) { ret[idx] = half(int(data[idx])); }
239
+
240
+ return ret;
241
+ }
242
+
243
+ __global__ void dequantize_int8_to_half(uint8_t* data_in,
244
+ half* data_out,
245
+ half* scale_buffer,
246
+ half* min_val_buffer,
247
+ int num_group,
248
+ int group_size)
249
+ {
250
+ using AccessType = AlignedArray<uint8_t, 8>;
251
+ using AccessTypeOut = AlignedArray<half, 8>;
252
+
253
+ for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < num_group * group_size / 8;
254
+ idx += blockDim.x * gridDim.x) {
255
+ int id_group = idx / (group_size / 8);
256
+ AccessType value = reinterpret_cast<AccessType*>(data_in)[idx];
257
+ half scale = scale_buffer[id_group];
258
+ half min_value = min_val_buffer[id_group];
259
+
260
+ AccessTypeOut output = int8_to_half(value);
261
+ output = divide<half, 8>()(output, scale);
262
+ output = plus<half, 8>()(output, min_value);
263
+
264
+ reinterpret_cast<AccessTypeOut*>(data_out)[idx] = output;
265
+ }
266
+ }
267
+
268
+ void launch_dequantize_int8_to_half_experimental(uint8_t* data_in,
269
+ half* data_out,
270
+ half* scale_buffer,
271
+ half* min_val_buffer,
272
+ int num_group,
273
+ int group_size,
274
+ cudaStream_t stream)
275
+ {
276
+ int num_warp = num_group / 4;
277
+ int num_block = num_warp / 8; // 256 trd / block
278
+
279
+ dequantize_int8_to_half<<<num_block, 256, 0, stream>>>(
280
+ data_in, data_out, scale_buffer, min_val_buffer, num_group, group_size);
281
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/quantization/swizzled_quantize.cu ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "memory_access_utils.h"
7
+ #include "quantization_utils.h"
8
+ #include "reduction_utils.h"
9
+
10
+ using rop = reduce::ROpType;
11
+
12
+ namespace swiz_quant {
13
+ constexpr int max_threads = 512;
14
+ constexpr int min_threads = 32;
15
+
16
+ constexpr int step_granularity = 2;
17
+ constexpr int h_per_step = step_granularity * quantize::h_per_load;
18
+ } // namespace swiz_quant
19
+
20
+ template <int numBits, int totalChunks, int threads, quantize::Type quantType>
21
+ __global__ void swizzled_quant_kernel(int8_t* quantized_data,
22
+ float* quantized_scales,
23
+ const __half* uncompressed_data,
24
+ int elems_per_group,
25
+ int nodes,
26
+ int devices_per_node)
27
+ {
28
+ cg::thread_block tb = cg::this_thread_block();
29
+ cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
30
+
31
+ // Indexing offsets, same as normal quantization for in-case
32
+ const int block_rank = blockIdx.x + blockIdx.y * gridDim.x + blockIdx.z * gridDim.x * gridDim.y;
33
+ const int block_offset = block_rank * elems_per_group;
34
+ const int elem_offset = tb.thread_index().x * quantize::h_per_load;
35
+ const int base_offset = block_offset + elem_offset;
36
+ const int stride = tb.size() * quantize::h_per_load;
37
+ const __half* input_base = uncompressed_data + base_offset;
38
+
39
+ // Local buffer
40
+ __half2 local_buffer[totalChunks * quantize::h2_per_load];
41
+
42
+ quantize::GroupStats<quantType> stats;
43
+ #pragma unroll
44
+ for (int i = 0; i < totalChunks; i++) {
45
+ __half2* iteration_buffer = local_buffer + i * quantize::h2_per_load;
46
+
47
+ mem_access::load_global<quantize::granularity>(
48
+ iteration_buffer, input_base + i * stride, elem_offset + i * stride < elems_per_group);
49
+
50
+ #pragma unroll
51
+ for (int j = 0; j < quantize::h2_per_load; j++) { stats.update(iteration_buffer[j]); }
52
+ }
53
+
54
+ auto params = stats.template get_params<numBits, threads>(tb, warp);
55
+
56
+ const int partition_id = blockIdx.z;
57
+ const int partition_offset = partition_id / devices_per_node;
58
+ const int partition_base = (partition_id % devices_per_node) * nodes;
59
+ const int pipelining_offset = blockIdx.y * (devices_per_node * nodes);
60
+ const int output_partition = (pipelining_offset + partition_base + partition_offset);
61
+
62
+ constexpr int out_scalar_effect = 8 / numBits;
63
+ const int out_block_rank = output_partition * gridDim.x + blockIdx.x;
64
+ const int out_block_offset = out_block_rank * elems_per_group / out_scalar_effect;
65
+ const int out_base_offset = out_block_offset + elem_offset / out_scalar_effect;
66
+ int8_t* out_base = quantized_data + out_base_offset;
67
+
68
+ const int out_stride = stride / out_scalar_effect;
69
+ constexpr int num_int8_out = quantize::h_per_load / out_scalar_effect;
70
+
71
+ if (tb.thread_index().x == 0) { params.store(quantized_scales, out_block_rank); }
72
+
73
+ #pragma unroll
74
+ for (int i = 0; i < totalChunks; i++) {
75
+ if (i * stride + elem_offset < elems_per_group) {
76
+ int8_t local_output[quantize::h_per_load / out_scalar_effect];
77
+ quantize::_chunk<numBits, quantType>(
78
+ local_output, local_buffer + i * quantize::h2_per_load, params);
79
+ mem_access::store_global<num_int8_out>(out_base + i * out_stride, local_output);
80
+ }
81
+ }
82
+ }
83
+
84
+ #define LAUNCH_SWIZZLE_QUANT(total_chunks, threads) \
85
+ swizzled_quant_kernel<numBits, total_chunks, threads, qType><<<grid, block, 0, stream>>>( \
86
+ q_data, q_scales, input_data, elems_per_group, nodes, devices_per_node);
87
+
88
+ /*
89
+ Swizzled quantization reorganizes the quantized groups in order to better facilitate
90
+ communication. As an example of the partitioning scheme we have the following example
91
+ of 2 node, 4 device swizzling:
92
+
93
+ --- --- --- --- --- --- --- ---
94
+ | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
95
+ --- --- --- --- --- --- --- ---
96
+ becomes
97
+ --- --- --- --- --- --- --- ---
98
+ | 0 | 4 | 1 | 5 | 2 | 6 | 3 | 7 |
99
+ --- --- --- --- --- --- --- ---
100
+
101
+ Multiple quantization groups may be mapped into a single partition. In order to better support
102
+ later pipelining, we may also perform an additional slicing. In two-way slicing, for instance,
103
+ the first halves of each partition are concatenated.
104
+ */
105
+
106
+ template <int numBits, quantize::Type qType>
107
+ void launch_swizzled_quant_impl(int8_t* q_data,
108
+ float* q_scales,
109
+ const __half* input_data,
110
+ int groups,
111
+ int elems_per_group,
112
+ int pipelining,
113
+ int nodes,
114
+ int devices_per_node,
115
+ cudaStream_t stream)
116
+ {
117
+ const int one_step_threads =
118
+ next_pow2((elems_per_group + swiz_quant::h_per_step - 1) / swiz_quant::h_per_step);
119
+ const int max_threads = (one_step_threads < swiz_quant::max_threads) ? one_step_threads
120
+ : swiz_quant::max_threads;
121
+ const int threads = (max_threads < swiz_quant::min_threads) ? swiz_quant::min_threads
122
+ : max_threads;
123
+
124
+ dim3 block(threads);
125
+ const int groups_per_partition = groups / (nodes * devices_per_node);
126
+ assert(groups_per_partition % pipelining == 0);
127
+ const int contiguous_groups = groups_per_partition / pipelining;
128
+ const int partitions = nodes * devices_per_node;
129
+ dim3 grid(contiguous_groups, pipelining, partitions);
130
+
131
+ const int elems_per_step = threads * swiz_quant::h_per_step;
132
+ const int external_unroll = ((elems_per_group + elems_per_step - 1) / elems_per_step);
133
+ const int total_unroll = external_unroll * swiz_quant::step_granularity;
134
+
135
+ assert(total_unroll % 2 == 0);
136
+
137
+ if (threads == 32) {
138
+ LAUNCH_SWIZZLE_QUANT(2, 32);
139
+ } else if (threads == 64) {
140
+ LAUNCH_SWIZZLE_QUANT(2, 64);
141
+ } else if (threads == 128) {
142
+ LAUNCH_SWIZZLE_QUANT(2, 128);
143
+ } else if (threads == 256) {
144
+ LAUNCH_SWIZZLE_QUANT(2, 256);
145
+ } else if (threads == 512) {
146
+ if (total_unroll == 2) {
147
+ LAUNCH_SWIZZLE_QUANT(2, 512);
148
+ } else if (total_unroll == 4) {
149
+ LAUNCH_SWIZZLE_QUANT(4, 512);
150
+ } else if (total_unroll == 6) {
151
+ LAUNCH_SWIZZLE_QUANT(6, 512);
152
+ } else if (total_unroll == 8) {
153
+ LAUNCH_SWIZZLE_QUANT(8, 512);
154
+ } else if (total_unroll == 10) {
155
+ LAUNCH_SWIZZLE_QUANT(10, 512);
156
+ }
157
+ }
158
+ }
159
+
160
+ #define DISPATCH_SWIZZLE_QUANT(num_bits, qtype) \
161
+ launch_swizzled_quant_impl<num_bits, qtype>(q_data, \
162
+ q_scales, \
163
+ input_data, \
164
+ groups, \
165
+ elems_per_group, \
166
+ pipelining, \
167
+ nodes, \
168
+ devices_per_node, \
169
+ stream);
170
+
171
+ void launch_swizzled_quant(int8_t* q_data,
172
+ float* q_scales,
173
+ const __half* input_data,
174
+ int num_bits,
175
+ quantize::Type q_type,
176
+ int groups,
177
+ int elems_per_group,
178
+ int pipelining,
179
+ int nodes,
180
+ int devices_per_node,
181
+ cudaStream_t stream)
182
+ {
183
+ if (num_bits == 4) {
184
+ if (q_type == quantize::Type::Asymmetric) {
185
+ DISPATCH_SWIZZLE_QUANT(4, quantize::Type::Asymmetric);
186
+ } else if (q_type == quantize::Type::Symmetric) {
187
+ DISPATCH_SWIZZLE_QUANT(4, quantize::Type::Symmetric);
188
+ }
189
+ } else if (num_bits == 8) {
190
+ if (q_type == quantize::Type::Asymmetric) {
191
+ DISPATCH_SWIZZLE_QUANT(8, quantize::Type::Asymmetric);
192
+ } else if (q_type == quantize::Type::Symmetric) {
193
+ DISPATCH_SWIZZLE_QUANT(8, quantize::Type::Symmetric);
194
+ }
195
+ }
196
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/gather_scatter.cu ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "custom_cuda_layers.h"
7
+ #include "memory_access_utils.h"
8
+
9
+ namespace cg = cooperative_groups;
10
+
11
+ namespace td_data {
12
+ constexpr int granularity = 16;
13
+ }
14
+
15
+ template <typename T>
16
+ __global__ void gather_tokens_impl(T* retained_tokens,
17
+ const T* activations,
18
+ int32_t* gather_indices,
19
+ int32_t sampled_tokens,
20
+ int32_t channels,
21
+ int32_t read_batch_stride,
22
+ int32_t read_seq_stride,
23
+ int32_t write_batch_stride,
24
+ int32_t write_seq_stride)
25
+ {
26
+ constexpr int mem_vals_t = td_data::granularity / sizeof(T);
27
+
28
+ cg::thread_block tb = cg::this_thread_block();
29
+
30
+ const int gather_idx = gather_indices[tb.group_index().x * sampled_tokens + tb.group_index().y];
31
+
32
+ const int read_offset = read_batch_stride * tb.group_index().x + read_seq_stride * gather_idx;
33
+ const int write_offset =
34
+ write_batch_stride * tb.group_index().x + write_seq_stride * tb.group_index().y;
35
+
36
+ for (int i = tb.thread_index().x * mem_vals_t; i < channels; i += blockDim.x * mem_vals_t) {
37
+ T local_data[mem_vals_t];
38
+ mem_access::load_global<td_data::granularity>(local_data, activations + read_offset + i);
39
+ mem_access::store_global<td_data::granularity>(retained_tokens + write_offset + i,
40
+ local_data);
41
+ }
42
+ }
43
+
44
+ template <typename T>
45
+ void launch_gather_tokens(T* retained_tokens,
46
+ T* activations,
47
+ int32_t* gather_indices,
48
+ int32_t batch_size,
49
+ int32_t sampled_tokens,
50
+ int32_t channels,
51
+ int32_t read_batch_stride,
52
+ int32_t read_seq_stride,
53
+ int32_t write_batch_stride,
54
+ int32_t write_seq_stride,
55
+ cudaStream_t stream)
56
+ {
57
+ constexpr int mem_vals_t = td_data::granularity / sizeof(T);
58
+
59
+ const int load_steps = (channels + mem_vals_t - 1) / mem_vals_t;
60
+ const int threads = (load_steps >= 1024) ? 1024 : load_steps;
61
+
62
+ dim3 block(threads);
63
+ dim3 grid(batch_size, sampled_tokens);
64
+
65
+ gather_tokens_impl<T><<<grid, block, 0, stream>>>(retained_tokens,
66
+ activations,
67
+ gather_indices,
68
+ sampled_tokens,
69
+ channels,
70
+ read_batch_stride,
71
+ read_seq_stride,
72
+ write_batch_stride,
73
+ write_seq_stride);
74
+ }
75
+
76
+ template void launch_gather_tokens<float>(float*,
77
+ float*,
78
+ int32_t*,
79
+ int32_t,
80
+ int32_t,
81
+ int32_t,
82
+ int32_t,
83
+ int32_t,
84
+ int32_t,
85
+ int32_t,
86
+ cudaStream_t);
87
+
88
+ template void launch_gather_tokens<__half>(__half*,
89
+ __half*,
90
+ int32_t*,
91
+ int32_t,
92
+ int32_t,
93
+ int32_t,
94
+ int32_t,
95
+ int32_t,
96
+ int32_t,
97
+ int32_t,
98
+ cudaStream_t);
99
+
100
+ template <typename T>
101
+ __global__ void scatter_tokens_impl(T* all_activations,
102
+ const T* layer_activations,
103
+ int32_t* gather_indices,
104
+ int32_t retained_tokens,
105
+ int32_t channels,
106
+ int32_t read_batch_stride,
107
+ int32_t read_seq_stride,
108
+ int32_t write_batch_stride,
109
+ int32_t write_seq_stride)
110
+ {
111
+ constexpr int mem_vals_t = td_data::granularity / sizeof(T);
112
+
113
+ cg::thread_block tb = cg::this_thread_block();
114
+
115
+ const int gather_idx =
116
+ gather_indices[tb.group_index().x * retained_tokens + tb.group_index().y];
117
+
118
+ const int read_offset =
119
+ read_batch_stride * tb.group_index().x + read_seq_stride * tb.group_index().y;
120
+ const int write_offset =
121
+ write_batch_stride * tb.group_index().x + write_seq_stride * gather_idx;
122
+
123
+ for (int i = tb.thread_index().x * mem_vals_t; i < channels; i += mem_vals_t * blockDim.x) {
124
+ T local_data[mem_vals_t];
125
+ mem_access::load_global<td_data::granularity>(local_data,
126
+ layer_activations + read_offset + i);
127
+ mem_access::store_global<td_data::granularity>(all_activations + write_offset + i,
128
+ local_data);
129
+ }
130
+ }
131
+
132
+ template <typename T>
133
+ void launch_scatter_tokens(T* all_activations,
134
+ T* layer_activations,
135
+ int32_t* gather_indices,
136
+ int32_t batch_size,
137
+ int32_t sampled_tokens,
138
+ int32_t channels,
139
+ int32_t read_batch_stride,
140
+ int32_t read_seq_stride,
141
+ int32_t write_batch_stride,
142
+ int32_t write_seq_stride,
143
+ cudaStream_t stream)
144
+ {
145
+ constexpr int mem_vals_t = td_data::granularity / sizeof(T);
146
+
147
+ const int load_steps = (channels + mem_vals_t - 1) / mem_vals_t;
148
+ const int threads = (load_steps >= 1024) ? 1024 : load_steps;
149
+
150
+ dim3 block(threads);
151
+ dim3 grid(batch_size, sampled_tokens);
152
+
153
+ scatter_tokens_impl<T><<<grid, block, 0, stream>>>(all_activations,
154
+ layer_activations,
155
+ gather_indices,
156
+ sampled_tokens,
157
+ channels,
158
+ read_batch_stride,
159
+ read_seq_stride,
160
+ write_batch_stride,
161
+ write_seq_stride);
162
+ }
163
+
164
+ template void launch_scatter_tokens<float>(float*,
165
+ float*,
166
+ int32_t*,
167
+ int32_t,
168
+ int32_t,
169
+ int32_t,
170
+ int32_t,
171
+ int32_t,
172
+ int32_t,
173
+ int32_t,
174
+ cudaStream_t);
175
+
176
+ template void launch_scatter_tokens<__half>(__half*,
177
+ __half*,
178
+ int32_t*,
179
+ int32_t,
180
+ int32_t,
181
+ int32_t,
182
+ int32_t,
183
+ int32_t,
184
+ int32_t,
185
+ int32_t,
186
+ cudaStream_t);
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/pt_binding.cpp ADDED
@@ -0,0 +1,216 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <torch/extension.h>
7
+ #include <vector>
8
+ #include "custom_cuda_layers.h"
9
+
10
+ torch::Tensor token_sort_(torch::Tensor& unsorted_token_ids, int64_t original_tokens)
11
+ {
12
+ const int layers = unsorted_token_ids.size(0);
13
+ const int batch_size = unsorted_token_ids.size(1);
14
+ const int reserved_tokens = unsorted_token_ids.size(2);
15
+
16
+ launch_token_sort(unsorted_token_ids.data_ptr<int32_t>(),
17
+ layers,
18
+ batch_size,
19
+ reserved_tokens,
20
+ original_tokens,
21
+ c10::cuda::getCurrentCUDAStream());
22
+
23
+ return unsorted_token_ids;
24
+ }
25
+
26
+ torch::Tensor token_gather(torch::Tensor& activations,
27
+ torch::Tensor& sorted_indices,
28
+ bool batch_first)
29
+ {
30
+ // Activations may be in either [N, S, C] or [S, N, C] while sorted_indices is
31
+ // always in [N, retained]
32
+ /*
33
+ TORCH_CHECK(sorted_indices.size(0) == activations.size(0) ||
34
+ sorted_indices.size(0) == activations.size(1),
35
+ "Unable to match the batch size of the sorted indices to the activation
36
+ shape."); TORCH_CHECK(activations.size(2) % 8 == 0, "Channels must be divisible by 8 to align
37
+ with vectorized loads.");
38
+ */
39
+ // bool batch_first = sorted_indices.size(0) == activations.size(0);
40
+
41
+ const int64_t dim_0 = (batch_first) ? sorted_indices.size(0) : sorted_indices.size(1);
42
+ const int64_t dim_1 = (batch_first) ? sorted_indices.size(1) : sorted_indices.size(0);
43
+ const int64_t dim_2 = activations.size(2);
44
+
45
+ auto output = torch::empty({dim_0, dim_1, dim_2}, activations.options());
46
+
47
+ const int batch_size = sorted_indices.size(0);
48
+ const int channels = dim_2;
49
+ const int retained_tokens = sorted_indices.size(1);
50
+ const int read_batch_stride = (batch_first) ? activations.stride(0) : activations.stride(1);
51
+ const int read_seq_stride = (batch_first) ? activations.stride(1) : activations.stride(0);
52
+ const int write_batch_stride = (batch_first) ? output.stride(0) : output.stride(1);
53
+ const int write_seq_stride = (batch_first) ? output.stride(1) : output.stride(0);
54
+
55
+ if (activations.options().dtype() == torch::kFloat) {
56
+ launch_gather_tokens((float*)output.data_ptr(),
57
+ (float*)activations.data_ptr(),
58
+ (int32_t*)sorted_indices.data_ptr(),
59
+ batch_size,
60
+ retained_tokens,
61
+ channels,
62
+ read_batch_stride,
63
+ read_seq_stride,
64
+ write_batch_stride,
65
+ write_seq_stride,
66
+ c10::cuda::getCurrentCUDAStream());
67
+ } else {
68
+ launch_gather_tokens((__half*)output.data_ptr(),
69
+ (__half*)activations.data_ptr(),
70
+ (int32_t*)sorted_indices.data_ptr(),
71
+ batch_size,
72
+ retained_tokens,
73
+ channels,
74
+ read_batch_stride,
75
+ read_seq_stride,
76
+ write_batch_stride,
77
+ write_seq_stride,
78
+ c10::cuda::getCurrentCUDAStream());
79
+ }
80
+
81
+ return output;
82
+ }
83
+
84
+ torch::Tensor token_scatter_(torch::Tensor& all_activations,
85
+ torch::Tensor& layer_activations,
86
+ torch::Tensor& sorted_indices,
87
+ bool batch_first)
88
+ {
89
+ // Activations may be in either [N, S, C] or [S, N, C] while sorted_indices is
90
+ // always in [N, retained]
91
+ /*
92
+ TORCH_CHECK(sorted_indices.size(0) == all_activations.size(0) ||
93
+ sorted_indices.size(0) == all_activations.size(1),
94
+ "Unable to match the batch size of the sorted indices to the activation
95
+ shape."); TORCH_CHECK(all_activations.size(2) % 8 != 0, "Channels must be divisible by 8 to
96
+ align with vectorized loads.");
97
+ */
98
+ // bool batch_first = sorted_indices.size(0) == all_activations.size(0);
99
+
100
+ const int batch_size = sorted_indices.size(0);
101
+ const int channels = all_activations.size(2);
102
+ const int retained_tokens = sorted_indices.size(1);
103
+ const int read_batch_stride = (batch_first) ? layer_activations.stride(0)
104
+ : layer_activations.stride(1);
105
+ const int read_seq_stride = (batch_first) ? layer_activations.stride(1)
106
+ : layer_activations.stride(0);
107
+ const int write_batch_stride = (batch_first) ? all_activations.stride(0)
108
+ : all_activations.stride(1);
109
+ const int write_seq_stride = (batch_first) ? all_activations.stride(1)
110
+ : all_activations.stride(0);
111
+
112
+ if (all_activations.options().dtype() == torch::kFloat) {
113
+ launch_scatter_tokens((float*)all_activations.data_ptr(),
114
+ (float*)layer_activations.data_ptr(),
115
+ (int32_t*)sorted_indices.data_ptr(),
116
+ batch_size,
117
+ retained_tokens,
118
+ channels,
119
+ read_batch_stride,
120
+ read_seq_stride,
121
+ write_batch_stride,
122
+ write_seq_stride,
123
+ c10::cuda::getCurrentCUDAStream());
124
+ } else {
125
+ launch_scatter_tokens((__half*)all_activations.data_ptr(),
126
+ (__half*)layer_activations.data_ptr(),
127
+ (int32_t*)sorted_indices.data_ptr(),
128
+ batch_size,
129
+ retained_tokens,
130
+ channels,
131
+ read_batch_stride,
132
+ read_seq_stride,
133
+ write_batch_stride,
134
+ write_seq_stride,
135
+ c10::cuda::getCurrentCUDAStream());
136
+ }
137
+
138
+ return all_activations;
139
+ }
140
+
141
+ torch::Tensor mask_gather_bert(torch::Tensor& dense_mask, torch::Tensor& sorted_indices)
142
+ {
143
+ // TORCH_CHECK(dense_mask.dim() == 4)
144
+
145
+ const int batch_size = dense_mask.size(0);
146
+ const int layers = sorted_indices.size(0);
147
+ /*
148
+ TORCH_CHECK(layers * batch_size == sorted_indices.size(0),
149
+ "Mismatch between the indices and the mask");
150
+ */
151
+ const int orig_seq_len = dense_mask.size(3);
152
+ const int truncated_seq_len = sorted_indices.size(2);
153
+
154
+ auto output = torch::empty({layers, batch_size, 1, truncated_seq_len, truncated_seq_len},
155
+ dense_mask.options());
156
+
157
+ if (dense_mask.options().dtype() == torch::kFloat) {
158
+ launch_slice_bert_mask((float*)output.data_ptr(),
159
+ (const float*)dense_mask.data_ptr(),
160
+ (const int32_t*)sorted_indices.data_ptr(),
161
+ layers,
162
+ batch_size,
163
+ truncated_seq_len,
164
+ orig_seq_len,
165
+ c10::cuda::getCurrentCUDAStream());
166
+ } else {
167
+ launch_slice_bert_mask((__half*)output.data_ptr(),
168
+ (const __half*)dense_mask.data_ptr(),
169
+ (const int32_t*)sorted_indices.data_ptr(),
170
+ layers,
171
+ batch_size,
172
+ truncated_seq_len,
173
+ orig_seq_len,
174
+ c10::cuda::getCurrentCUDAStream());
175
+ }
176
+
177
+ return output;
178
+ }
179
+
180
+ torch::Tensor mask_gather_gpt(torch::Tensor dense_mask, int truncated_seq_len)
181
+ {
182
+ // TORCH_CHECK(dense_mask.dim() == 4)
183
+
184
+ const int batch_size = dense_mask.size(0);
185
+ const int orig_seq_len = dense_mask.size(3);
186
+
187
+ auto output =
188
+ torch::empty({batch_size, 1, truncated_seq_len, truncated_seq_len}, dense_mask.options());
189
+
190
+ if (dense_mask.options().dtype() == torch::kFloat) {
191
+ launch_slice_gpt_mask((float*)output.data_ptr(),
192
+ (const float*)dense_mask.data_ptr(),
193
+ batch_size,
194
+ truncated_seq_len,
195
+ orig_seq_len,
196
+ c10::cuda::getCurrentCUDAStream());
197
+ } else {
198
+ launch_slice_gpt_mask((__half*)output.data_ptr(),
199
+ (const __half*)dense_mask.data_ptr(),
200
+ batch_size,
201
+ truncated_seq_len,
202
+ orig_seq_len,
203
+ c10::cuda::getCurrentCUDAStream());
204
+ }
205
+
206
+ return output;
207
+ }
208
+
209
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
210
+ {
211
+ m.def("token_sort_", &token_sort_, "Comparison free sorting algorithm (CUDA)");
212
+ m.def("token_gather", &token_gather, "Parallel gather of tokens (CUDA)");
213
+ m.def("token_scatter_", &token_scatter_, "Parallel scatter of tokens (CUDA)");
214
+ m.def("mask_gather_bert", &mask_gather_bert, "Token-based mask gather for BERT masking (CUDA)");
215
+ m.def("mask_gather_gpt", &mask_gather_gpt, "Token-based mask gather for GPT masking (CUDA)");
216
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/slice_attn_masks.cu ADDED
@@ -0,0 +1,128 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "custom_cuda_layers.h"
7
+ #include "memory_access_utils.h"
8
+
9
+ namespace cg = cooperative_groups;
10
+
11
+ template <typename T>
12
+ __global__ void slice_gpt_mask_impl(T* output_mask,
13
+ const T* input_mask,
14
+ int truncated_seq_len,
15
+ int orig_seq_len)
16
+ {
17
+ const int in_batch_stride = orig_seq_len * orig_seq_len;
18
+ const int out_batch_stride = truncated_seq_len * truncated_seq_len;
19
+
20
+ cg::thread_block tb = cg::this_thread_block();
21
+
22
+ const T* input_mask_block =
23
+ input_mask + blockIdx.x * in_batch_stride + blockIdx.y * orig_seq_len;
24
+ T* output_mask_block =
25
+ output_mask + blockIdx.x * out_batch_stride + blockIdx.y * truncated_seq_len;
26
+
27
+ for (int i = tb.thread_index().x; i < truncated_seq_len; i += blockDim.x) {
28
+ output_mask_block[i] = input_mask_block[i];
29
+ }
30
+ }
31
+
32
+ template <typename T>
33
+ void launch_slice_gpt_mask(T* output_mask,
34
+ const T* input_mask,
35
+ int batch_size,
36
+ int truncated_seq_len,
37
+ int orig_seq_len,
38
+ cudaStream_t stream)
39
+ {
40
+ const int threads = (truncated_seq_len >= 1024) ? 1024 : truncated_seq_len;
41
+
42
+ dim3 block(threads);
43
+ dim3 grid(batch_size, truncated_seq_len);
44
+
45
+ slice_gpt_mask_impl<T>
46
+ <<<grid, block, 0, stream>>>(output_mask, input_mask, truncated_seq_len, orig_seq_len);
47
+ }
48
+
49
+ template void launch_slice_gpt_mask<float>(float*, const float*, int, int, int, cudaStream_t);
50
+
51
+ template void launch_slice_gpt_mask<__half>(__half*, const __half*, int, int, int, cudaStream_t);
52
+
53
+ template <typename T>
54
+ __global__ void slice_bert_mask_impl(T* output_mask,
55
+ const T* input_mask,
56
+ const int32_t* retained_indices,
57
+ int32_t truncated_seq_len,
58
+ int32_t orig_seq_len)
59
+ {
60
+ const int in_batch_stride = orig_seq_len * orig_seq_len;
61
+ const int out_batch_stride = truncated_seq_len * truncated_seq_len;
62
+ const int out_layer_stride = out_batch_stride * gridDim.y;
63
+
64
+ cg::thread_block tb = cg::this_thread_block();
65
+
66
+ const int out_layer_offset = tb.group_index().x * out_layer_stride;
67
+
68
+ const int in_batch_offset = tb.group_index().y * in_batch_stride;
69
+ const int out_batch_offset = tb.group_index().y * out_batch_stride;
70
+
71
+ const int32_t gather_row =
72
+ retained_indices[tb.group_index().y * truncated_seq_len + tb.group_index().z];
73
+ const int in_seq_offset = gather_row * orig_seq_len;
74
+ const int out_seq_offset = tb.group_index().z * truncated_seq_len;
75
+
76
+ const T* in_sequence = input_mask + in_batch_offset + in_seq_offset;
77
+ T* out_sequence = output_mask + out_layer_offset + out_batch_offset + out_seq_offset;
78
+ const int32_t* gather_data = retained_indices + tb.group_index().y * truncated_seq_len;
79
+
80
+ for (int i = tb.thread_index().x; i < truncated_seq_len; i += blockDim.x) {
81
+ out_sequence[i] = in_sequence[gather_data[i]];
82
+ }
83
+ }
84
+
85
+ /*
86
+ Since the Bert mask is not causal like GPT, we can't just generate a set of
87
+ masks for the entire model based off a single layer sample.
88
+
89
+ We map the kernel as follows:
90
+ z-dimension: layer
91
+ y-dimension: batch
92
+ x-dimension: sequence_offset
93
+ */
94
+ template <typename T>
95
+ void launch_slice_bert_mask(T* output_mask,
96
+ const T* input_mask,
97
+ const int32_t* retained_indices,
98
+ int32_t layers,
99
+ int32_t batch_size,
100
+ int32_t truncated_seq_len,
101
+ int32_t orig_seq_len,
102
+ cudaStream_t stream)
103
+ {
104
+ const int threads = (truncated_seq_len >= 1024) ? 1024 : truncated_seq_len;
105
+ dim3 block(threads);
106
+ dim3 grid(layers, batch_size, truncated_seq_len);
107
+
108
+ slice_bert_mask_impl<T><<<grid, block, 0, stream>>>(
109
+ output_mask, input_mask, retained_indices, truncated_seq_len, orig_seq_len);
110
+ }
111
+
112
+ template void launch_slice_bert_mask<float>(float*,
113
+ const float*,
114
+ const int32_t*,
115
+ int32_t,
116
+ int32_t,
117
+ int32_t,
118
+ int32_t,
119
+ cudaStream_t);
120
+
121
+ template void launch_slice_bert_mask<__half>(__half*,
122
+ const __half*,
123
+ const int32_t*,
124
+ int32_t,
125
+ int32_t,
126
+ int32_t,
127
+ int32_t,
128
+ cudaStream_t);
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/random_ltd/token_sort.cu ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <cassert>
7
+ #include "custom_cuda_layers.h"
8
+ #include "memory_access_utils.h"
9
+
10
+ namespace cg = cooperative_groups;
11
+
12
+ namespace td_sort {
13
+ constexpr int threads = 512;
14
+ constexpr int granularity = 16;
15
+ constexpr int mem_vals = granularity / sizeof(int32_t);
16
+ constexpr int max_buffer_size = (threads + 1) * mem_vals;
17
+
18
+ #ifdef __HIP_PLATFORM_AMD__
19
+ constexpr int warp_size = 64;
20
+ #else
21
+ constexpr int warp_size = 32;
22
+ #endif
23
+
24
+ constexpr int max_warps = threads / warp_size;
25
+ } // namespace td_sort
26
+
27
+ template <int VALS_PER_THREAD>
28
+ __global__ void scan_sort(int32_t* data, int reserved_tokens, int original_tokens)
29
+ {
30
+ cg::thread_block tb = cg::this_thread_block();
31
+ cg::thread_block_tile<td_sort::warp_size> warp = cg::tiled_partition<td_sort::warp_size>(tb);
32
+
33
+ __shared__ int32_t indices_buffer[td_sort::max_buffer_size];
34
+ __shared__ int32_t intermediate_buffer[td_sort::max_warps];
35
+ __shared__ int32_t sorted_indices_buffer[td_sort::max_buffer_size];
36
+
37
+ for (int i = tb.thread_index().x * td_sort::mem_vals; i < original_tokens + 1;
38
+ i += tb.group_dim().x * td_sort::mem_vals) {
39
+ uint32_t zeros[td_sort::mem_vals] = {0, 0, 0, 0};
40
+ mem_access::store_shared<td_sort::granularity>(indices_buffer + i, zeros);
41
+ }
42
+
43
+ int32_t local_vals[VALS_PER_THREAD];
44
+
45
+ // We flatten layers/batch into a single indexing dimension
46
+ int32_t* data_block = data + tb.group_index().x * reserved_tokens;
47
+
48
+ // The next two loops really could be fused for a more logical code layout, but don't want to
49
+ // move the barrier forward
50
+ #pragma unroll
51
+ for (int i = 0; i < VALS_PER_THREAD; i++) {
52
+ const int iter_idx = i * td_sort::threads + tb.thread_index().x;
53
+ if (iter_idx < reserved_tokens) {
54
+ mem_access::load_global<sizeof(int32_t)>(local_vals + i, data_block + iter_idx);
55
+ } else {
56
+ local_vals[i] = 0;
57
+ }
58
+ }
59
+
60
+ tb.sync();
61
+
62
+ #pragma unroll
63
+ for (int i = 0; i < VALS_PER_THREAD; i++) {
64
+ const int iter_idx = i * td_sort::threads + tb.thread_index().x;
65
+ if (iter_idx < reserved_tokens) {
66
+ const int32_t one = 1;
67
+ mem_access::store_shared<sizeof(int32_t)>(indices_buffer + local_vals[i], &one);
68
+ }
69
+ }
70
+
71
+ tb.sync();
72
+
73
+ int32_t local_input[td_sort::mem_vals];
74
+ mem_access::load_shared<td_sort::granularity>(
75
+ local_input, indices_buffer + tb.thread_index().x * td_sort::mem_vals);
76
+
77
+ int32_t reduce_vals[td_sort::mem_vals];
78
+ reduce_vals[0] = local_input[0];
79
+
80
+ #pragma unroll
81
+ for (int i = 1; i < td_sort::mem_vals; i++) {
82
+ reduce_vals[i] = local_input[i] + reduce_vals[i - 1];
83
+ }
84
+
85
+ int32_t step_1_val = reduce_vals[td_sort::mem_vals - 1];
86
+ // Short span exclusive scan algorithm (less work efficient)
87
+ #pragma unroll
88
+ for (int i = 1; i < td_sort::warp_size; i *= 2) {
89
+ int32_t step_val = warp.shfl_up(step_1_val, i);
90
+ step_1_val = (warp.thread_rank() < i) ? step_1_val : step_1_val + step_val;
91
+ }
92
+
93
+ if (warp.thread_rank() == td_sort::warp_size - 1) {
94
+ mem_access::store_shared<sizeof(int32_t)>(intermediate_buffer + warp.meta_group_rank(),
95
+ &step_1_val);
96
+ }
97
+
98
+ tb.sync();
99
+
100
+ if (warp.meta_group_rank() == 0) {
101
+ int32_t step_2_val = 0;
102
+ if (warp.thread_rank() < td_sort::max_warps) {
103
+ mem_access::load_shared<sizeof(int32_t)>(&step_2_val,
104
+ intermediate_buffer + warp.thread_rank());
105
+ }
106
+
107
+ #pragma unroll
108
+ for (int i = 1; i < td_sort::warp_size; i *= 2) {
109
+ int32_t step_val = warp.shfl_up(step_2_val, i);
110
+ step_2_val = (warp.thread_rank() < i) ? step_2_val : step_2_val + step_val;
111
+ }
112
+
113
+ if (warp.thread_rank() < td_sort::max_warps) {
114
+ mem_access::store_shared<sizeof(int32_t)>(intermediate_buffer + warp.thread_rank(),
115
+ &step_2_val);
116
+ }
117
+ }
118
+
119
+ tb.sync();
120
+
121
+ int step_2_val = 0;
122
+ if (warp.meta_group_rank() > 0) {
123
+ mem_access::load_shared<sizeof(int32_t)>(&step_2_val,
124
+ intermediate_buffer + warp.meta_group_rank() - 1);
125
+ }
126
+
127
+ const int thread_offset = reduce_vals[td_sort::mem_vals - 1];
128
+
129
+ #pragma unroll
130
+ for (int i = 0; i < td_sort::mem_vals; i++) {
131
+ reduce_vals[i] += step_1_val + step_2_val - thread_offset;
132
+ }
133
+ mem_access::store_shared<td_sort::granularity>(
134
+ indices_buffer + tb.thread_index().x * td_sort::mem_vals, reduce_vals);
135
+
136
+ if (tb.thread_index().x == 0) {
137
+ indices_buffer[original_tokens] = original_tokens - indices_buffer[original_tokens];
138
+ }
139
+ tb.sync();
140
+
141
+ for (int i = 0; i < VALS_PER_THREAD; i++) {
142
+ const int iter_idx = i * td_sort::threads + tb.thread_index().x;
143
+ if (iter_idx < reserved_tokens) {
144
+ if (local_vals[i] == 0) {
145
+ int zero = 0;
146
+ mem_access::store_shared<sizeof(int32_t)>(sorted_indices_buffer, &zero);
147
+ } else {
148
+ int sorted_idx;
149
+ mem_access::load_shared<sizeof(int32_t)>(&sorted_idx,
150
+ indices_buffer + local_vals[i] - 1);
151
+ mem_access::store_shared<sizeof(int32_t)>(sorted_indices_buffer + sorted_idx,
152
+ local_vals + i);
153
+ }
154
+ }
155
+ }
156
+
157
+ tb.sync();
158
+
159
+ #pragma unroll
160
+ for (int i = 0; i < VALS_PER_THREAD; i++) {
161
+ const int iter_idx = i * td_sort::threads + tb.thread_index().x;
162
+ if (iter_idx < reserved_tokens) {
163
+ int32_t store_val;
164
+ mem_access::load_shared<sizeof(int32_t)>(&store_val, sorted_indices_buffer + iter_idx);
165
+ mem_access::store_global<sizeof(int32_t)>(data_block + iter_idx, &store_val);
166
+ }
167
+ }
168
+ }
169
+
170
+ void launch_token_sort(int32_t* indices,
171
+ int layers,
172
+ int batch_size,
173
+ int reserved_size,
174
+ int original_tokens,
175
+ cudaStream_t stream)
176
+ {
177
+ // Each sort is completely independent, can flatten this dimension
178
+ dim3 grid(layers * batch_size);
179
+ dim3 block(td_sort::threads);
180
+
181
+ const int vals_per_thread = (reserved_size + td_sort::threads - 1) / td_sort::threads;
182
+
183
+ if (vals_per_thread == 1) {
184
+ scan_sort<1><<<grid, block, 0, stream>>>(indices, reserved_size, original_tokens);
185
+ } else if (vals_per_thread == 2) {
186
+ scan_sort<2><<<grid, block, 0, stream>>>(indices, reserved_size, original_tokens);
187
+ } else if (vals_per_thread == 3) {
188
+ scan_sort<3><<<grid, block, 0, stream>>>(indices, reserved_size, original_tokens);
189
+ } else if (vals_per_thread == 4) {
190
+ scan_sort<4><<<grid, block, 0, stream>>>(indices, reserved_size, original_tokens);
191
+ } else {
192
+ assert(false);
193
+ }
194
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/sparse_attention/utils.cpp ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ DeepSpeed note, code taken & adapted from commit 9aa94789f13ada713af36cfd8cca2fc9a7f6b79a
8
+ https:github.com/ptillet/torch-blocksparse/blob/master/csrc/utils.cpp
9
+ */
10
+
11
+ #include <torch/extension.h>
12
+ #include <string>
13
+ #include <tuple>
14
+ #include <vector>
15
+ #ifdef _OPENMP
16
+ #include <omp.h>
17
+ #endif
18
+
19
+ typedef std::vector<std::tuple<int, torch::Tensor>> ret_t;
20
+
21
+ void segment_blocks(torch::Tensor layout,
22
+ torch::Tensor idx,
23
+ torch::Tensor scratch,
24
+ int max_width,
25
+ ret_t& ret)
26
+ {
27
+ size_t H = layout.size(0);
28
+ size_t M = layout.size(1);
29
+ size_t N = layout.size(2);
30
+ torch::Tensor tmp = torch::zeros_like(layout);
31
+
32
+ auto _tmp = tmp.accessor<int, 3>();
33
+ auto _layout = layout.accessor<int, 3>();
34
+ auto _idx = idx.accessor<int, 3>();
35
+ auto _scratch = scratch.accessor<int, 3>();
36
+ std::vector<int> current(H, 0);
37
+
38
+ #ifdef _OPENMP
39
+ #pragma omp parallel for
40
+ #endif
41
+ for (size_t h = 0; h < H; h++) {
42
+ // surrounding indices
43
+ std::vector<int> ii_left(max_width, -1);
44
+ std::vector<std::vector<int>> ii_top(max_width, std::vector<int>(N, -1));
45
+
46
+ for (size_t m = 0; m < M; m++) {
47
+ for (size_t n = 0; n < N; n++) {
48
+ int v = _layout[h][m][n];
49
+ if (v == 0) continue;
50
+ int n_left = ii_left[max_width - 1];
51
+ int m_top = ii_top[max_width - 1][n];
52
+ int top = (m_top >= 0) ? _tmp[h][m_top][n] : 0;
53
+ int left = (n_left >= 0) ? _tmp[h][m][n_left] : 0;
54
+ int topleft = (m_top >= 0 && n_left >= 0) ? _tmp[h][m_top][n_left] : 0;
55
+ int width = std::min(left, std::min(top, topleft)) + 1;
56
+
57
+ // reset width if blocks cannot be
58
+ // packed together (i.e., there's a 1 "in the middle")
59
+ for (int nn = n_left + 1; nn < n; nn++)
60
+ if (ii_top[max_width - 1][nn] > ii_top[max_width - 1][n]) width = 1;
61
+ _tmp[h][m][n] = width;
62
+
63
+ // update n_left ring buffer
64
+ for (int k = 0; k < max_width - 1; k++) ii_left[k] = ii_left[k + 1];
65
+ ii_left[max_width - 1] = n;
66
+
67
+ // update ii_top ring buffer
68
+ for (int k = 0; k < max_width - 1; k++) ii_top[k][n] = ii_top[k + 1][n];
69
+ ii_top[max_width - 1][n] = m;
70
+
71
+ // block is too small -- skip
72
+ if (width != max_width) continue;
73
+
74
+ // retained blocks are set to zeros
75
+ for (size_t km = 0; km < max_width; km++)
76
+ for (size_t kn = 0; kn < max_width; kn++) {
77
+ int mm = ii_top[km][n];
78
+ int nn = ii_left[kn];
79
+ if (mm < 0 || nn < 0) continue;
80
+ _layout[h][mm][nn] = 0;
81
+ _tmp[h][mm][nn] = 0;
82
+ _scratch[h][current[h]][0] = (int)h;
83
+ _scratch[h][current[h]][1] = (int)mm;
84
+ _scratch[h][current[h]][2] = (int)nn;
85
+ _scratch[h][current[h]][3] = _idx[h][mm][nn];
86
+ current[h]++;
87
+ }
88
+ }
89
+ }
90
+ }
91
+ std::vector<torch::Tensor> to_cat;
92
+ for (size_t h = 0; h < H; h++)
93
+ if (current[h] > 0) to_cat.push_back(scratch[h].slice(0, 0, current[h]));
94
+ if (!to_cat.empty()) ret.push_back({max_width, torch::cat(to_cat)});
95
+ }
96
+
97
+ ret_t sdd_segment(torch::Tensor layout, int start_width)
98
+ {
99
+ ret_t ret;
100
+
101
+ // block index
102
+ torch::Tensor idx = torch::zeros_like(layout);
103
+ int current = 0;
104
+ int64_t H = layout.size(0);
105
+ int64_t M = layout.size(1);
106
+ int64_t N = layout.size(2);
107
+ auto _layout = layout.accessor<int, 3>();
108
+ auto _idx = idx.accessor<int, 3>();
109
+ for (int64_t h = 0; h < H; h++)
110
+ for (int64_t m = 0; m < M; m++)
111
+ for (int64_t n = 0; n < N; n++) {
112
+ if (_layout[h][m][n] == 0) continue;
113
+ _idx[h][m][n] = current++;
114
+ }
115
+
116
+ // scratch memory
117
+ torch::Tensor scratch = torch::empty({H, layout.sum().item<int>(), 4}, layout.dtype());
118
+
119
+ for (int max_width = start_width; max_width > 0; max_width /= 2)
120
+ segment_blocks(layout, idx, scratch, max_width, ret);
121
+ return ret;
122
+ }
123
+
124
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
125
+ {
126
+ m.def("sdd_segment", &sdd_segment, "SDD segmentation handler");
127
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/gelu.cu ADDED
@@ -0,0 +1,710 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "conversion_utils.h"
7
+ #include "inference_cuda_layers.h"
8
+ #include "memory_access_utils.h"
9
+
10
+ namespace cg = cooperative_groups;
11
+ #define MAX_CAP 4
12
+ #define MAX_SEQ 2048
13
+
14
+ // only used to avoid compilation error due to lack of definition.
15
+ #ifndef BF16_AVAILABLE
16
+ using __nv_bfloat162 = __half2;
17
+ #endif
18
+
19
+ inline __device__ float gelu(const float x)
20
+ {
21
+ constexpr float sqrt_param = 0.79788456080286535587989211986876f;
22
+ constexpr float mul_param = 0.044715;
23
+ return x * 0.5f * (1.0f + tanhf(sqrt_param * (x + mul_param * x * x * x)));
24
+ }
25
+
26
+ /*
27
+ In-place gelu(biasAdd(x)) for channels last
28
+ */
29
+ template <typename T>
30
+ __global__ void fused_bias_gelu(T* input, const T* bias, int total_count, int intermediate_size)
31
+ {
32
+ // Input restriction: intermediate_size % vals_per_access == 0
33
+ constexpr int granularity = 16;
34
+ constexpr int values_per_access = granularity / sizeof(T);
35
+ const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access;
36
+
37
+ if (offset < total_count) {
38
+ T data[values_per_access];
39
+ T data_bias[values_per_access];
40
+ mem_access::load_global<granularity>(data, input + offset);
41
+ mem_access::load_global<granularity>(
42
+ data_bias, bias + (offset % intermediate_size), bias != nullptr);
43
+
44
+ #pragma unroll
45
+ for (int i = 0; i < values_per_access; i++) {
46
+ float data_f = conversion::to<float>(data[i]);
47
+ float bias_f = conversion::to<float>(data_bias[i]);
48
+ data[i] = conversion::to<T>(gelu(data_f + bias_f));
49
+ }
50
+
51
+ mem_access::store_global<granularity>(input + offset, data);
52
+ }
53
+ }
54
+
55
+ template <typename T>
56
+ void launch_bias_gelu(T* input,
57
+ const T* bias,
58
+ int intermediate_size,
59
+ int batch_size,
60
+ cudaStream_t stream)
61
+ {
62
+ constexpr int threads = 1024;
63
+ constexpr int granularity = 16;
64
+
65
+ const int total_count = batch_size * intermediate_size;
66
+ const int elems_per_block = threads * (granularity / sizeof(T));
67
+ dim3 block_dims(threads);
68
+ dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block);
69
+
70
+ fused_bias_gelu<<<grid_dims, block_dims, 0, stream>>>(
71
+ input, bias, total_count, intermediate_size);
72
+ }
73
+
74
+ #define INSTANTIATE_LAUNCH_BIAS_GELU(T) \
75
+ template void launch_bias_gelu<T>(T*, const T*, int, int, cudaStream_t);
76
+
77
+ INSTANTIATE_LAUNCH_BIAS_GELU(float)
78
+ #ifdef BF16_AVAILABLE
79
+ INSTANTIATE_LAUNCH_BIAS_GELU(__nv_bfloat16)
80
+ #endif
81
+ INSTANTIATE_LAUNCH_BIAS_GELU(__half)
82
+
83
+ /*
84
+ In-place channels-last bias add
85
+ */
86
+ template <typename T>
87
+ __global__ void fused_bias_add(T* input, const T* bias, int total_count, int intermediate_size)
88
+ {
89
+ // Input restriction: intermediate_size % vals_per_access == 0
90
+ constexpr int granularity = 16;
91
+ constexpr int values_per_access = granularity / sizeof(T);
92
+ const int offset = (blockIdx.x * blockDim.x + threadIdx.x) * values_per_access;
93
+
94
+ if (offset < total_count) {
95
+ T data[values_per_access];
96
+ T data_bias[values_per_access];
97
+ mem_access::load_global<granularity>(data, input + offset);
98
+ mem_access::load_global<granularity>(
99
+ data_bias, bias + (offset % intermediate_size), bias != nullptr);
100
+
101
+ #pragma unroll
102
+ for (int i = 0; i < values_per_access; i++) {
103
+ float data_f = conversion::to<float>(data[i]);
104
+ float bias_f = conversion::to<float>(data_bias[i]);
105
+ data[i] = conversion::to<T>(data_f + bias_f);
106
+ }
107
+
108
+ mem_access::store_global<granularity>(input + offset, data);
109
+ }
110
+ }
111
+
112
+ template <typename T>
113
+ void launch_bias_add(T* input,
114
+ const T* bias,
115
+ int intermediate_size,
116
+ int batch_size,
117
+ cudaStream_t stream)
118
+ {
119
+ constexpr int threads = 1024;
120
+ constexpr int granularity = 16;
121
+
122
+ const int total_count = batch_size * intermediate_size;
123
+ const int elems_per_block = threads * (granularity / sizeof(T));
124
+ dim3 block_dims(threads);
125
+ dim3 grid_dims((total_count + elems_per_block - 1) / elems_per_block);
126
+
127
+ fused_bias_add<<<grid_dims, block_dims, 0, stream>>>(
128
+ input, bias, total_count, intermediate_size);
129
+ }
130
+
131
+ #define INSTANTIATE_LAUNCH_BIAS_ADD(T) \
132
+ template void launch_bias_add<T>(T*, const T*, int, int, cudaStream_t);
133
+
134
+ INSTANTIATE_LAUNCH_BIAS_ADD(float)
135
+ #ifdef BF16_AVAILABLE
136
+ INSTANTIATE_LAUNCH_BIAS_ADD(__nv_bfloat16)
137
+ #endif
138
+ INSTANTIATE_LAUNCH_BIAS_ADD(__half)
139
+
140
+ __global__ void fused_bias_residual(float* residual,
141
+ const float* hidden_state,
142
+ const float* attn,
143
+ const float* bias,
144
+ const float* attn_bias,
145
+ const int total_count,
146
+ const int intermediate_size,
147
+ const float mp_scale,
148
+ const bool preln)
149
+ {
150
+ float4* res_fl4_ptr = reinterpret_cast<float4*>(residual);
151
+ const float4* hs_fl4_ptr = reinterpret_cast<const float4*>(hidden_state);
152
+ const float4* attn_fl4_ptr = reinterpret_cast<const float4*>(attn);
153
+ const float4* bias_fl4_ptr = reinterpret_cast<const float4*>(bias);
154
+ const float4* attn_bias_fl4_ptr = reinterpret_cast<const float4*>(attn_bias);
155
+ const int offset = blockIdx.x * blockDim.x + threadIdx.x;
156
+
157
+ if (offset < total_count) {
158
+ float4 res_fl4 = res_fl4_ptr[offset];
159
+ const float4 hs_fl4 = hs_fl4_ptr[offset];
160
+ const float4 attn_fl4 = attn_fl4_ptr[offset];
161
+ const float4 bias_fl4 = bias_fl4_ptr[offset % intermediate_size];
162
+ const float4 attn_bias_fl4 = attn_bias_fl4_ptr[offset % intermediate_size];
163
+ if (preln) {
164
+ // residual = (residual + attention + bias + attention_bias) *
165
+ // mp_scale + hidden_state
166
+ res_fl4.x =
167
+ (res_fl4.x + attn_fl4.x + bias_fl4.x + attn_bias_fl4.x) * mp_scale + (hs_fl4.x);
168
+ res_fl4.y =
169
+ (res_fl4.y + attn_fl4.y + bias_fl4.y + attn_bias_fl4.y) * mp_scale + (hs_fl4.y);
170
+ res_fl4.z =
171
+ (res_fl4.z + attn_fl4.z + bias_fl4.z + attn_bias_fl4.z) * mp_scale + (hs_fl4.z);
172
+ res_fl4.w =
173
+ (res_fl4.w + attn_fl4.w + bias_fl4.w + attn_bias_fl4.w) * mp_scale + (hs_fl4.w);
174
+ } else {
175
+ // residual += hidden_state + bias
176
+ res_fl4.x = res_fl4.x + hs_fl4.x + bias_fl4.x;
177
+ res_fl4.y = res_fl4.y + hs_fl4.y + bias_fl4.y;
178
+ res_fl4.z = res_fl4.z + hs_fl4.z + bias_fl4.z;
179
+ res_fl4.w = res_fl4.w + hs_fl4.w + bias_fl4.w;
180
+ }
181
+ res_fl4_ptr[offset] = res_fl4;
182
+ }
183
+ }
184
+
185
+ template <typename T>
186
+ __global__ void fused_bias_residual(T* residual,
187
+ const T* hidden_state,
188
+ const T* attn,
189
+ const T* bias,
190
+ const T* attn_bias,
191
+ const int total_count,
192
+ const int intermediate_size,
193
+ const float mp_scale,
194
+ const bool preln)
195
+ {
196
+ using T2 =
197
+ typename std::conditional<std::is_same<T, __half>::value, __half2, __nv_bfloat162>::type;
198
+ float2* res_fl2_ptr = reinterpret_cast<float2*>(residual);
199
+ const float2* hs_fl2_ptr = reinterpret_cast<const float2*>(hidden_state);
200
+ const float2* attn_fl2_ptr = reinterpret_cast<const float2*>(attn);
201
+ const float2* bias_fl2_ptr = reinterpret_cast<const float2*>(bias);
202
+ const float2* attn_bias_fl2_ptr = reinterpret_cast<const float2*>(attn_bias);
203
+ const int offset = blockIdx.x * blockDim.x + threadIdx.x;
204
+
205
+ if (offset < total_count) {
206
+ float2 res_fl2 = res_fl2_ptr[offset];
207
+ const float2 hs_fl2 = hs_fl2_ptr[offset];
208
+ const float2 attn_fl2 = attn_fl2_ptr[offset];
209
+ const float2 bias_fl2 = bias_fl2_ptr[offset % intermediate_size];
210
+ const float2 attn_bias_fl2 = attn_bias_fl2_ptr[offset % intermediate_size];
211
+
212
+ T2* res_half2 = reinterpret_cast<T2*>(&res_fl2);
213
+ const T2* hs_half2 = reinterpret_cast<const T2*>(&hs_fl2);
214
+ const T2* attn_half2 = reinterpret_cast<const T2*>(&attn_fl2);
215
+ const T2* bias_half2 = reinterpret_cast<const T2*>(&bias_fl2);
216
+ const T2* attn_bias_half2 = reinterpret_cast<const T2*>(&attn_bias_fl2);
217
+
218
+ float2 res_low = conversion::to<float2>(res_half2[0]);
219
+ float2 res_high = conversion::to<float2>(res_half2[1]);
220
+
221
+ const float2 hs_low = conversion::to<float2>(hs_half2[0]);
222
+ const float2 hs_high = conversion::to<float2>(hs_half2[1]);
223
+
224
+ const float2 attn_low = conversion::to<float2>(attn_half2[0]);
225
+ const float2 attn_high = conversion::to<float2>(attn_half2[1]);
226
+
227
+ const float2 bias_low = conversion::to<float2>(bias_half2[0]);
228
+ const float2 bias_high = conversion::to<float2>(bias_half2[1]);
229
+
230
+ const float2 attn_bias_low = conversion::to<float2>(attn_bias_half2[0]);
231
+ const float2 attn_bias_high = conversion::to<float2>(attn_bias_half2[1]);
232
+
233
+ if (preln) {
234
+ // residual = (residual + attention + bias + attention_bias) *
235
+ // mp_scale + hidden_state
236
+ res_low.x =
237
+ (res_low.x + attn_low.x + bias_low.x + attn_bias_low.x) * mp_scale + hs_low.x;
238
+ res_low.y =
239
+ (res_low.y + attn_low.y + bias_low.y + attn_bias_low.y) * mp_scale + hs_low.y;
240
+ res_high.x =
241
+ (res_high.x + attn_high.x + bias_high.x + attn_bias_high.x) * mp_scale + hs_high.x;
242
+ res_high.y =
243
+ (res_high.y + attn_high.y + bias_high.y + attn_bias_high.y) * mp_scale + hs_high.y;
244
+ } else {
245
+ // residual += hidden_state + bias
246
+ res_low.x = (res_low.x + hs_low.x + bias_low.x);
247
+ res_low.y = (res_low.y + hs_low.y + bias_low.y);
248
+ res_high.x = (res_high.x + hs_high.x + bias_high.x);
249
+ res_high.y = (res_high.y + hs_high.y + bias_high.y);
250
+ }
251
+ res_half2[0] = conversion::to<T2>(res_low);
252
+ res_half2[1] = conversion::to<T2>(res_high);
253
+
254
+ res_fl2_ptr[offset] = res_fl2;
255
+ }
256
+ }
257
+
258
+ template <typename T>
259
+ void launch_bias_residual(T* residual,
260
+ T* hidden_state,
261
+ T* attn,
262
+ T* bias,
263
+ T* attn_bias,
264
+ int batch,
265
+ int hidden_dim,
266
+ int mp_size,
267
+ bool preln,
268
+ cudaStream_t stream)
269
+ {
270
+ int total_count = batch * hidden_dim / 4;
271
+ dim3 block_dims(1024);
272
+ dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size);
273
+
274
+ fused_bias_residual<<<grid_dims, block_dims, 0, stream>>>(residual,
275
+ hidden_state,
276
+ attn,
277
+ bias,
278
+ attn_bias,
279
+ total_count,
280
+ hidden_dim / 4,
281
+ 1.0 / mp_size,
282
+ preln);
283
+ }
284
+
285
+ #define INSTANTIATE_LAUNCH_BIAS_RESIDUAL(T) \
286
+ template void launch_bias_residual<T>(T*, T*, T*, T*, T*, int, int, int, bool, cudaStream_t);
287
+
288
+ INSTANTIATE_LAUNCH_BIAS_RESIDUAL(float);
289
+ #ifdef BF16_AVAILABLE
290
+ INSTANTIATE_LAUNCH_BIAS_RESIDUAL(__nv_bfloat16);
291
+ #endif
292
+ INSTANTIATE_LAUNCH_BIAS_RESIDUAL(__half);
293
+
294
+ __global__ void gptj_residual_add(float* residual,
295
+ const float* hidden_state,
296
+ const float* attn,
297
+ const float* bias,
298
+ const float* attn_bias,
299
+ const int total_count,
300
+ const int intermediate_size,
301
+ const float mp_scale)
302
+ {
303
+ float4* res_fl4_ptr = reinterpret_cast<float4*>(residual);
304
+ const float4* hs_fl4_ptr = reinterpret_cast<const float4*>(hidden_state);
305
+ const float4* attn_fl4_ptr = reinterpret_cast<const float4*>(attn);
306
+ const float4* bias_fl4_ptr = reinterpret_cast<const float4*>(bias);
307
+ const float4* attn_bias_fl4_ptr = reinterpret_cast<const float4*>(attn_bias);
308
+ const int offset = blockIdx.x * blockDim.x + threadIdx.x;
309
+
310
+ if (offset < total_count) {
311
+ float4 res_fl4 = res_fl4_ptr[offset];
312
+ const float4 hs_fl4 = hs_fl4_ptr[offset];
313
+ const float4 attn_fl4 = attn_fl4_ptr[offset];
314
+ const float4 bias_fl4 = bias_fl4_ptr[offset % intermediate_size];
315
+
316
+ if (attn_bias) {
317
+ float4 attn_bias_fl4 = attn_bias_fl4_ptr[offset % intermediate_size];
318
+ // residual += attention_bias
319
+ res_fl4.x += attn_bias_fl4.x;
320
+ res_fl4.y += attn_bias_fl4.y;
321
+ res_fl4.z += attn_bias_fl4.z;
322
+ res_fl4.w += attn_bias_fl4.w;
323
+ }
324
+ // residual = hidden_state + attention + (residual + bias) * mp_scale
325
+ res_fl4.x = hs_fl4.x + attn_fl4.x + (res_fl4.x + bias_fl4.x) * mp_scale;
326
+ res_fl4.y = hs_fl4.y + attn_fl4.y + (res_fl4.y + bias_fl4.y) * mp_scale;
327
+ res_fl4.z = hs_fl4.z + attn_fl4.z + (res_fl4.z + bias_fl4.z) * mp_scale;
328
+ res_fl4.w = hs_fl4.w + attn_fl4.w + (res_fl4.w + bias_fl4.w) * mp_scale;
329
+
330
+ res_fl4_ptr[offset] = res_fl4;
331
+ }
332
+ }
333
+
334
+ template <typename T>
335
+ __global__ void gptj_residual_add(T* residual,
336
+ const T* hidden_state,
337
+ const T* attn,
338
+ const T* bias,
339
+ const T* attn_bias,
340
+ const int total_count,
341
+ const int intermediate_size,
342
+ const float mp_scale)
343
+ {
344
+ using T2 =
345
+ typename std::conditional<std::is_same<T, __half>::value, __half2, __nv_bfloat162>::type;
346
+ float2* res_fl2_ptr = reinterpret_cast<float2*>(residual);
347
+ const float2* hs_fl2_ptr = reinterpret_cast<const float2*>(hidden_state);
348
+ const float2* attn_fl2_ptr = reinterpret_cast<const float2*>(attn);
349
+ const float2* bias_fl2_ptr = reinterpret_cast<const float2*>(bias);
350
+ const float2* attn_bias_fl2_ptr = reinterpret_cast<const float2*>(attn_bias);
351
+ const int offset = blockIdx.x * blockDim.x + threadIdx.x;
352
+
353
+ if (offset < total_count) {
354
+ float2 res_fl2 = res_fl2_ptr[offset];
355
+ const float2 hs_fl2 = hs_fl2_ptr[offset];
356
+ const float2 attn_fl2 = attn_fl2_ptr[offset];
357
+ const float2 bias_fl2 = bias_fl2_ptr[offset % intermediate_size];
358
+
359
+ T2* res_half2 = reinterpret_cast<T2*>(&res_fl2);
360
+ const T2* hs_half2 = reinterpret_cast<const T2*>(&hs_fl2);
361
+ const T2* attn_half2 = reinterpret_cast<const T2*>(&attn_fl2);
362
+ const T2* bias_half2 = reinterpret_cast<const T2*>(&bias_fl2);
363
+
364
+ float2 res_low = conversion::to<float2>(res_half2[0]);
365
+ float2 res_high = conversion::to<float2>(res_half2[1]);
366
+
367
+ const float2 hs_low = conversion::to<float2>(hs_half2[0]);
368
+ const float2 hs_high = conversion::to<float2>(hs_half2[1]);
369
+
370
+ const float2 attn_low = conversion::to<float2>(attn_half2[0]);
371
+ const float2 attn_high = conversion::to<float2>(attn_half2[1]);
372
+
373
+ const float2 bias_low = conversion::to<float2>(bias_half2[0]);
374
+ const float2 bias_high = conversion::to<float2>(bias_half2[1]);
375
+
376
+ if (attn_bias) {
377
+ const float2 attn_bias_fl2 = attn_bias_fl2_ptr[offset % intermediate_size];
378
+ const T2* attn_bias_half2 = reinterpret_cast<const T2*>(&attn_bias_fl2);
379
+ const float2 attn_bias_low = conversion::to<float2>(attn_bias_half2[0]);
380
+ const float2 attn_bias_high = conversion::to<float2>(attn_bias_half2[1]);
381
+ // residual += attention_bias
382
+ res_low.x += attn_bias_low.x;
383
+ res_low.y += attn_bias_low.y;
384
+ res_high.x += attn_bias_high.x;
385
+ res_high.y += attn_bias_high.y;
386
+ }
387
+ // residual = hidden_state + attention + (residual + bias) * mp_scale
388
+ res_low.x = attn_low.x + hs_low.x + (res_low.x + bias_low.x) * mp_scale;
389
+ res_low.y = attn_low.y + hs_low.y + (res_low.y + bias_low.y) * mp_scale;
390
+ res_high.x = attn_high.x + hs_high.x + (res_high.x + bias_high.x) * mp_scale;
391
+ res_high.y = attn_high.y + hs_high.y + (res_high.y + bias_high.y) * mp_scale;
392
+
393
+ res_half2[0] = conversion::to<T2>(res_low);
394
+ res_half2[1] = conversion::to<T2>(res_high);
395
+
396
+ res_fl2_ptr[offset] = res_fl2;
397
+ }
398
+ }
399
+
400
+ template <typename T>
401
+ void launch_gptj_residual_add(T* residual,
402
+ T* hidden_state,
403
+ T* attn,
404
+ T* bias,
405
+ T* attn_bias,
406
+ int hidden_dim,
407
+ int batch,
408
+ int mp_size,
409
+ cudaStream_t stream)
410
+ {
411
+ int total_count = batch * hidden_dim / 4;
412
+ dim3 block_dims(1024);
413
+ dim3 grid_dims((total_count - 1) / 1024 + 1); // (batch_size);
414
+
415
+ gptj_residual_add<<<grid_dims, block_dims, 0, stream>>>(
416
+ residual, hidden_state, attn, bias, attn_bias, total_count, hidden_dim / 4, 1.0 / mp_size);
417
+ }
418
+
419
+ #define INSTANTIATE_GPT_RES_ADD(T) \
420
+ template void launch_gptj_residual_add<T>(T*, T*, T*, T*, T*, int, int, int, cudaStream_t);
421
+
422
+ INSTANTIATE_GPT_RES_ADD(float);
423
+ INSTANTIATE_GPT_RES_ADD(__half);
424
+ #ifdef BF16_AVAILABLE
425
+ INSTANTIATE_GPT_RES_ADD(__nv_bfloat16);
426
+ #endif
427
+
428
+ template <typename T>
429
+ __global__ void moe_res_matmul(T* residual, T* coef, T* mlp_out, int seq_len, int hidden_dim)
430
+ {
431
+ constexpr int granularity = 16;
432
+ constexpr int vals_per_access = granularity / sizeof(T);
433
+
434
+ T* residual_seq = residual + blockIdx.x * hidden_dim;
435
+ T* mlp_out_seq = mlp_out + blockIdx.x * hidden_dim;
436
+
437
+ for (unsigned tid = threadIdx.x * vals_per_access; tid < hidden_dim;
438
+ tid += blockDim.x * vals_per_access) {
439
+ T mlp[vals_per_access];
440
+ T res[vals_per_access];
441
+ T coef1[vals_per_access];
442
+ T coef2[vals_per_access];
443
+
444
+ mem_access::load_global<granularity>(mlp, mlp_out_seq + tid);
445
+ mem_access::load_global<granularity>(res, residual_seq + tid);
446
+ mem_access::load_global<granularity>(coef1, coef + tid);
447
+ mem_access::load_global<granularity>(coef2, coef + tid + hidden_dim);
448
+
449
+ #pragma unroll
450
+ for (int idx = 0; idx < vals_per_access; idx++) {
451
+ mlp[idx] = mlp[idx] * coef2[idx] + res[idx] * coef1[idx];
452
+ }
453
+
454
+ mem_access::store_global<granularity>(mlp_out_seq + tid, mlp);
455
+ }
456
+ }
457
+
458
+ template <typename T>
459
+ void launch_moe_res_matmul(T* residual,
460
+ T* coef,
461
+ T* mlp_out,
462
+ int seq_len,
463
+ int hidden_dim,
464
+ cudaStream_t stream)
465
+ {
466
+ dim3 grid_dim(seq_len);
467
+ dim3 block_dim(1024);
468
+ moe_res_matmul<<<grid_dim, block_dim, 0, stream>>>(
469
+ residual, coef, mlp_out, seq_len, hidden_dim);
470
+ }
471
+
472
+ #define INSTANTIATE_LAUNCH_MOE_RES_MATMUL(T) \
473
+ template void launch_moe_res_matmul<T>(T*, T*, T*, int, int, cudaStream_t);
474
+
475
+ INSTANTIATE_LAUNCH_MOE_RES_MATMUL(float);
476
+ #ifdef BF16_AVAILABLE
477
+ INSTANTIATE_LAUNCH_MOE_RES_MATMUL(__nv_bfloat16);
478
+ #endif
479
+ INSTANTIATE_LAUNCH_MOE_RES_MATMUL(__half);
480
+
481
+ template <typename T>
482
+ __global__ void pad_data_kernel(T* padded_output, T* output, int head_size, int padded_head_size)
483
+ {
484
+ using T2 =
485
+ typename std::conditional<std::is_same<T, __half>::value, __half2, __nv_bfloat162>::type;
486
+ float4* padded_output_cast = reinterpret_cast<float4*>(padded_output);
487
+ float4* output_cast = reinterpret_cast<float4*>(output);
488
+ int bid = blockIdx.x * (blockDim.y) + threadIdx.y;
489
+ int idx = threadIdx.x;
490
+ padded_output_cast += (bid * padded_head_size);
491
+ output_cast += (bid * head_size);
492
+ float4 ZERO;
493
+ const T2 zero_h = conversion::to<T2>(0.f);
494
+ T2* ZERO_h = reinterpret_cast<T2*>(&ZERO);
495
+ #pragma unroll
496
+ for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h;
497
+ if (idx < head_size)
498
+ padded_output_cast[idx] = output_cast[idx];
499
+ else
500
+ padded_output_cast[idx] = ZERO;
501
+ }
502
+
503
+ __global__ void pad_data_kernel(float* padded_output,
504
+ float* output,
505
+ int head_size,
506
+ int padded_head_size)
507
+ {
508
+ }
509
+
510
+ template <typename T>
511
+ void pad_data(T* padded_output,
512
+ T* output,
513
+ int bsz,
514
+ int head_size,
515
+ int padded_head_size,
516
+ cudaStream_t stream)
517
+ {
518
+ dim3 grid_dim((bsz - 1) / 16 + 1);
519
+ dim3 block_dim(padded_head_size / 8, 16);
520
+ pad_data_kernel<<<grid_dim, block_dim, 0, stream>>>(
521
+ padded_output, output, head_size / 8, padded_head_size / 8);
522
+ }
523
+
524
+ #define INSTANTIATE_PAD_DATA(T) template void pad_data(T*, T*, int, int, int, cudaStream_t stream);
525
+
526
+ INSTANTIATE_PAD_DATA(float);
527
+ INSTANTIATE_PAD_DATA(__half);
528
+ #ifdef BF16_AVAILABLE
529
+ INSTANTIATE_PAD_DATA(__nv_bfloat16);
530
+ #endif
531
+
532
+ template <typename T>
533
+ __global__ void pad_head_seq_kernel(T* padded_output,
534
+ T* output,
535
+ int seq_len,
536
+ int padded_seq_len,
537
+ int head_size,
538
+ int padded_head_size)
539
+ {
540
+ using T2 =
541
+ typename std::conditional<std::is_same<T, __half>::value, __half2, __nv_bfloat162>::type;
542
+ float4* padded_output_cast = reinterpret_cast<float4*>(padded_output);
543
+ float4* output_cast = reinterpret_cast<float4*>(output);
544
+ int bsz = blockIdx.x;
545
+ int bid = blockIdx.y * (blockDim.y) + threadIdx.y;
546
+ int idx = threadIdx.x;
547
+ padded_output_cast += (bsz * padded_seq_len + bid) * padded_head_size;
548
+ output_cast += (bsz * seq_len + bid) * head_size;
549
+ float4 ZERO;
550
+ const T2 zero_h = conversion::to<T2>(0.f);
551
+ T2* ZERO_h = reinterpret_cast<T2*>(&ZERO);
552
+ #pragma unroll
553
+ for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h;
554
+
555
+ if (idx < head_size && bid < seq_len)
556
+ padded_output_cast[idx] = output_cast[idx];
557
+ else
558
+ padded_output_cast[idx] = ZERO;
559
+ }
560
+
561
+ __global__ void pad_head_seq_kernel(float* padded_output,
562
+ float* output,
563
+ int seq_len,
564
+ int padded_seq_len,
565
+ int head_size,
566
+ int padded_head_size)
567
+ {
568
+ }
569
+
570
+ template <typename T>
571
+ void pad_head_seq(T* padded_output,
572
+ T* output,
573
+ int bsz,
574
+ int seq_len,
575
+ int padded_seq_len,
576
+ int head_size,
577
+ int padded_head_size,
578
+ cudaStream_t stream)
579
+ {
580
+ dim3 grid_dim(bsz, padded_seq_len / 16);
581
+ dim3 block_dim(padded_head_size / 8, 16);
582
+ pad_head_seq_kernel<<<grid_dim, block_dim, 0, stream>>>(
583
+ padded_output, output, seq_len, padded_seq_len, head_size / 8, padded_head_size / 8);
584
+ }
585
+
586
+ #define INSTANTIATE_PAD_HEAD_SEQ(T) \
587
+ template void pad_head_seq<T>(T*, T*, int, int, int, int, int, cudaStream_t);
588
+
589
+ INSTANTIATE_PAD_HEAD_SEQ(__half);
590
+ #ifdef BF16_AVAILABLE
591
+ INSTANTIATE_PAD_HEAD_SEQ(__nv_bfloat16);
592
+ #endif
593
+ INSTANTIATE_PAD_HEAD_SEQ(float);
594
+
595
+ // TODO(cmikeh2): evaluate different GeLU performance
596
+ __device__ __forceinline__ float old_gelu(float val)
597
+ {
598
+ // 1 / sqrt(2)
599
+ constexpr float rsqrt_2 = 0.707106769084930419922;
600
+ return val * 0.5f * (1.0f + erff(val * rsqrt_2));
601
+ }
602
+
603
+ namespace fused_geglu {
604
+ constexpr int threads = 256;
605
+ constexpr int steps = 2;
606
+ constexpr int granularity = 16;
607
+ } // namespace fused_geglu
608
+
609
+ __device__ __forceinline__ float silu(float val) { return val / (1.0f + expf(-val)); }
610
+
611
+ template <typename T, bool useGelu>
612
+ __global__ void fused_gate_activation(T* output,
613
+ const T* activation,
614
+ const T* bias,
615
+ int base_channels,
616
+ int output_stride,
617
+ int total_elems)
618
+ {
619
+ constexpr int T_per_access = fused_geglu::granularity / sizeof(T);
620
+ constexpr int T_per_step = T_per_access * fused_geglu::threads;
621
+ constexpr int T_per_block = T_per_step * fused_geglu::steps;
622
+
623
+ const int id = blockIdx.x * T_per_block + threadIdx.x * T_per_access;
624
+
625
+ #pragma unroll
626
+ for (int i = 0; i < fused_geglu::steps; i++) {
627
+ T activation_buffer_1[T_per_access];
628
+ T activation_buffer_2[T_per_access];
629
+ T bias_buffer_1[T_per_access];
630
+ T bias_buffer_2[T_per_access];
631
+
632
+ const int iter_id = id + T_per_step * i;
633
+ if (iter_id < total_elems) {
634
+ const int channel_id = iter_id % base_channels;
635
+ const int seq_id = iter_id / base_channels;
636
+ const int seq_offset = seq_id * base_channels * 2;
637
+
638
+ mem_access::load_global<fused_geglu::granularity>(activation_buffer_1,
639
+ activation + seq_offset + channel_id);
640
+ mem_access::load_global<fused_geglu::granularity>(
641
+ activation_buffer_2, activation + seq_offset + channel_id + base_channels);
642
+ mem_access::load_global<fused_geglu::granularity>(
643
+ bias_buffer_1, bias + channel_id, bias != nullptr);
644
+ mem_access::load_global<fused_geglu::granularity>(
645
+ bias_buffer_2, bias + channel_id + base_channels, bias != nullptr);
646
+
647
+ // Since the GeLU is going to happen at float, might as well
648
+ // convert
649
+ #pragma unroll
650
+ for (int v = 0; v < T_per_access; v++) {
651
+ T hidden_state = activation_buffer_1[v] + bias_buffer_1[v];
652
+ T pre_gate = activation_buffer_2[v] + bias_buffer_2[v];
653
+ float pre_gate_f = conversion::to<float>(pre_gate);
654
+ float gate_f = (useGelu) ? old_gelu(pre_gate_f) : silu(pre_gate_f);
655
+ T gate = conversion::to<T>(gate_f);
656
+ activation_buffer_1[v] = hidden_state * gate;
657
+ }
658
+
659
+ mem_access::store_global<fused_geglu::granularity>(
660
+ output + seq_id * output_stride + channel_id, activation_buffer_1);
661
+ }
662
+ }
663
+ }
664
+
665
+ template <typename T>
666
+ void launch_gated_activation(T* output,
667
+ const T* activation,
668
+ const T* bias,
669
+ int rows,
670
+ int output_stride,
671
+ int elems_per_row,
672
+ bool use_gelu,
673
+ cudaStream_t stream)
674
+ {
675
+ /*
676
+ Fused bias GEGLU is a variant of the gated activation functions.
677
+ The input here is a matrix of [batch, seq_len, 2 * intermediate_dim]
678
+ where the second half of the channels act as GeLU gates for the first
679
+ half.
680
+ */
681
+
682
+ // Re-derive the above figures
683
+ constexpr int T_per_access = fused_geglu::granularity / sizeof(T);
684
+ constexpr int T_per_step = T_per_access * fused_geglu::threads;
685
+ constexpr int T_per_block = T_per_step * fused_geglu::steps;
686
+
687
+ const int base_channels = elems_per_row / 2;
688
+ const int total_elems = base_channels * rows;
689
+
690
+ dim3 block(fused_geglu::threads);
691
+ dim3 grid((total_elems + T_per_block - 1) / T_per_block);
692
+
693
+ if (use_gelu) {
694
+ fused_gate_activation<T, true><<<grid, block, 0, stream>>>(
695
+ output, activation, bias, base_channels, output_stride, total_elems);
696
+ } else {
697
+ fused_gate_activation<T, false><<<grid, block, 0, stream>>>(
698
+ output, activation, bias, base_channels, output_stride, total_elems);
699
+ }
700
+ }
701
+
702
+ #define INSTANTIATE_LAUNCH_GATED_ACTIVATION(T) \
703
+ template void launch_gated_activation( \
704
+ T*, const T*, const T*, int, int, int, bool, cudaStream_t);
705
+
706
+ INSTANTIATE_LAUNCH_GATED_ACTIVATION(__half);
707
+ #ifdef BF16_AVAILABLE
708
+ INSTANTIATE_LAUNCH_GATED_ACTIVATION(__nv_bfloat16);
709
+ #endif
710
+ INSTANTIATE_LAUNCH_GATED_ACTIVATION(float);
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/pointwise_ops.cu ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <cuda_fp16.h>
7
+ #include "conversion_utils.h"
8
+ #include "ds_kernel_utils.h"
9
+ #include "memory_access_utils.h"
10
+
11
+ namespace pwise {
12
+ constexpr int granularity = 16;
13
+ constexpr int unroll = 4;
14
+ constexpr int threads = 256;
15
+ } // namespace pwise
16
+
17
+ template <typename T>
18
+ __global__ void vector_add_kernel(T* out, const T* a, const T* b, float gamma, int num_elems)
19
+ {
20
+ constexpr int T_per_access = pwise::granularity / sizeof(T);
21
+
22
+ const int block_offset = blockIdx.x * pwise::threads * pwise::unroll * T_per_access;
23
+ const int thread_offset = threadIdx.x * T_per_access;
24
+ const int total_offset = block_offset + thread_offset;
25
+ constexpr int stride = pwise::threads * T_per_access;
26
+
27
+ #pragma unroll
28
+ for (int i = 0; i < pwise::unroll; i++) {
29
+ T temp_buf_a[T_per_access], temp_buf_b[T_per_access];
30
+
31
+ const int iter_idx = total_offset + i * stride;
32
+
33
+ mem_access::load_global<pwise::granularity>(temp_buf_a, a + iter_idx, iter_idx < num_elems);
34
+ mem_access::load_global<pwise::granularity>(temp_buf_b, b + iter_idx, iter_idx < num_elems);
35
+
36
+ #pragma unroll
37
+ for (int j = 0; j < T_per_access; j++) {
38
+ float up_cast_a = conversion::to<float>(temp_buf_a[j]);
39
+ float up_cast_b = conversion::to<float>(temp_buf_b[j]);
40
+ temp_buf_a[j] = conversion::to<T>((gamma * up_cast_a) + up_cast_b);
41
+ }
42
+
43
+ if (iter_idx < num_elems) {
44
+ mem_access::store_global<pwise::granularity>(out + iter_idx, temp_buf_a);
45
+ }
46
+ }
47
+ }
48
+
49
+ template <typename T>
50
+ void launch_vector_add(T* out,
51
+ const T* a,
52
+ const T* b,
53
+ float gamma,
54
+ int num_elems,
55
+ cudaStream_t stream)
56
+ {
57
+ constexpr int T_per_access = pwise::granularity / sizeof(T);
58
+ constexpr int T_per_block = pwise::threads * T_per_access * pwise::unroll;
59
+
60
+ dim3 block(pwise::threads);
61
+ dim3 grid((num_elems + T_per_block - 1) / T_per_block);
62
+
63
+ vector_add_kernel<<<grid, block, 0, stream>>>(out, a, b, gamma, num_elems);
64
+ }
65
+
66
+ #define INSTANTIATE_VECTOR_ADD(T) \
67
+ template void launch_vector_add<T>( \
68
+ T * out, const T* a, const T* b, float gamma, int num_elems, cudaStream_t stream);
69
+
70
+ INSTANTIATE_VECTOR_ADD(float)
71
+ INSTANTIATE_VECTOR_ADD(__half)
72
+ #ifdef BF16_AVAILABLE
73
+ INSTANTIATE_VECTOR_ADD(__nv_bfloat16)
74
+ #endif
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/rms_norm.cu ADDED
@@ -0,0 +1,263 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "conversion_utils.h"
7
+ #include "ds_kernel_utils.h"
8
+ #include "inference_cuda_layers.h"
9
+ #include "memory_access_utils.h"
10
+ #include "reduction_utils.h"
11
+
12
+ namespace cg = cooperative_groups;
13
+ using rop = reduce::ROpType;
14
+
15
+ namespace rms {
16
+ constexpr int granularity = 16;
17
+ } // namespace rms
18
+
19
+ template <typename T, int UNROLL, int threadsPerGroup, int maxThreads>
20
+ __global__ void rms_norm(T* output, const T* vals, const T* gamma, float epsilon, int elems_per_row)
21
+ {
22
+ constexpr int T_per_load = rms::granularity / sizeof(T);
23
+
24
+ cg::thread_block tb = cg::this_thread_block();
25
+ cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
26
+
27
+ // X-dimension of the block
28
+ const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) +
29
+ (tb.thread_index().y * elems_per_row);
30
+ const int thread_offset = tb.thread_index().x * T_per_load;
31
+ const int base_offset = block_offset + thread_offset;
32
+ const int stride = blockDim.x * T_per_load;
33
+
34
+ float var_sum = reduce::init<rop::Add, float>();
35
+
36
+ const T* input_base = vals + base_offset;
37
+
38
+ T local_buffer[UNROLL * T_per_load];
39
+
40
+ #pragma unroll
41
+ for (int i = 0; i < UNROLL; i++) {
42
+ T* iteration_buffer = local_buffer + (i * T_per_load);
43
+
44
+ mem_access::load_global<rms::granularity>(iteration_buffer,
45
+ input_base + (i * stride),
46
+ thread_offset + (i * stride) < elems_per_row);
47
+
48
+ #pragma unroll
49
+ for (int j = 0; j < T_per_load; j++) {
50
+ float up_cast = conversion::to<float>(iteration_buffer[j]);
51
+ float sq_val = up_cast * up_cast;
52
+ var_sum = reduce::element<rop::Add, float>(var_sum, sq_val);
53
+ }
54
+ }
55
+
56
+ reduce::partitioned_block<rop::Add, threadsPerGroup>(tb, warp, var_sum);
57
+ const float var = var_sum / elems_per_row;
58
+ const T denom = conversion::to<T>(__frsqrt_rn(var + epsilon));
59
+
60
+ T* block_output = output + block_offset;
61
+
62
+ #pragma unroll
63
+ for (int i = 0; i < UNROLL; i++) {
64
+ T* iteration_buffer = local_buffer + (i * T_per_load);
65
+ const int iter_idx = i * stride + thread_offset;
66
+ const bool do_loads = (iter_idx < elems_per_row);
67
+
68
+ T gamma_local[T_per_load];
69
+
70
+ mem_access::load_global<rms::granularity>(gamma_local, gamma + iter_idx, do_loads);
71
+
72
+ #pragma unroll
73
+ for (int j = 0; j < T_per_load; j++) {
74
+ iteration_buffer[j] *= denom;
75
+ iteration_buffer[j] *= gamma_local[j];
76
+ }
77
+
78
+ if (do_loads) {
79
+ mem_access::store_global<rms::granularity>(block_output + iter_idx, iteration_buffer);
80
+ }
81
+ }
82
+ }
83
+
84
+ template <typename T, int UNROLL, int threadsPerGroup, int maxThreads>
85
+ __global__ void pre_rms_norm(T* output,
86
+ T* res_out,
87
+ const T* vals,
88
+ const T* residual,
89
+ const T* gamma,
90
+ float epsilon,
91
+ int elems_per_row)
92
+ {
93
+ constexpr int T_per_load = rms::granularity / sizeof(T);
94
+
95
+ cg::thread_block tb = cg::this_thread_block();
96
+ cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
97
+
98
+ // X-dimension of the block
99
+ const int block_offset = (tb.group_index().x * (maxThreads / threadsPerGroup) * elems_per_row) +
100
+ (tb.thread_index().y * elems_per_row);
101
+ const int thread_offset = tb.thread_index().x * T_per_load;
102
+ const int base_offset = block_offset + thread_offset;
103
+ const int stride = blockDim.x * T_per_load;
104
+
105
+ float var_sum = reduce::init<rop::Add, float>();
106
+
107
+ const T* input_base = vals + base_offset;
108
+ const T* residual_base = residual + base_offset;
109
+ T* res_output = res_out + base_offset;
110
+
111
+ T local_buffer[UNROLL * T_per_load];
112
+
113
+ #pragma unroll
114
+ for (int i = 0; i < UNROLL; i++) {
115
+ T* iteration_buffer = local_buffer + (i * T_per_load);
116
+ T residual_buffer[T_per_load];
117
+
118
+ const int iter_offset = i * stride + thread_offset;
119
+ const bool do_loads = (iter_offset < elems_per_row);
120
+
121
+ mem_access::load_global<rms::granularity>(
122
+ iteration_buffer, input_base + (i * stride), do_loads);
123
+ mem_access::load_global<rms::granularity>(
124
+ residual_buffer, residual_base + (i * stride), do_loads);
125
+
126
+ #pragma unroll
127
+ for (int j = 0; j < T_per_load; j++) {
128
+ iteration_buffer[j] += residual_buffer[j];
129
+ float vals_up_cast = conversion::to<float>(iteration_buffer[j]);
130
+
131
+ var_sum = reduce::element<rop::Add, float>(var_sum, vals_up_cast * vals_up_cast);
132
+ }
133
+
134
+ if (do_loads) {
135
+ mem_access::store_global<rms::granularity>(res_output + i * stride, iteration_buffer);
136
+ }
137
+ }
138
+
139
+ reduce::partitioned_block<rop::Add, threadsPerGroup>(tb, warp, var_sum);
140
+ const float var = var_sum / elems_per_row;
141
+ const T denom = conversion::to<T>(__frsqrt_rn(var + epsilon));
142
+
143
+ T* block_output = output + block_offset;
144
+
145
+ #pragma unroll
146
+ for (int i = 0; i < UNROLL; i++) {
147
+ T* iteration_buffer = local_buffer + (i * T_per_load);
148
+ const int iter_idx = i * stride + thread_offset;
149
+ const bool do_loads = (iter_idx < elems_per_row);
150
+
151
+ T gamma_local[T_per_load];
152
+
153
+ mem_access::load_global<rms::granularity>(gamma_local, gamma + iter_idx, do_loads);
154
+
155
+ #pragma unroll
156
+ for (int j = 0; j < T_per_load; j++) {
157
+ iteration_buffer[j] *= denom;
158
+ iteration_buffer[j] *= gamma_local[j];
159
+ }
160
+
161
+ if (do_loads) {
162
+ mem_access::store_global<rms::granularity>(block_output + iter_idx, iteration_buffer);
163
+ }
164
+ }
165
+ }
166
+
167
+ #define LAUNCH_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \
168
+ rms_norm<T, UNROLL, threadsPerGroup, maxThreads> \
169
+ <<<grid, block, 0, stream>>>(norm_output, vals, gamma, epsilon, elems_per_row);
170
+
171
+ #define LAUNCH_PRE_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \
172
+ pre_rms_norm<T, UNROLL, threadsPerGroup, maxThreads><<<grid, block, 0, stream>>>( \
173
+ norm_output, res_output, vals, residual, gamma, epsilon, elems_per_row);
174
+
175
+ #define LAUNCH_ALL_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \
176
+ if (pre_norm) { \
177
+ LAUNCH_PRE_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \
178
+ } else { \
179
+ LAUNCH_RMS_NORM(UNROLL, threadsPerGroup, maxThreads) \
180
+ }
181
+
182
+ template <typename T>
183
+ void launch_rms_norm(T* norm_output,
184
+ T* res_output,
185
+ const T* vals,
186
+ const T* residual,
187
+ const T* gamma,
188
+ float epsilon,
189
+ int rows,
190
+ int elems_per_row,
191
+ cudaStream_t stream)
192
+ {
193
+ // 8 for __half, 4 for float
194
+ constexpr int T_per_load = rms::granularity / sizeof(T);
195
+ constexpr int maxThreads = 256;
196
+ constexpr int internalUnroll = sizeof(T) == 4 ? 4 : 2;
197
+
198
+ const bool is_subblock_schedule = (elems_per_row <= 128) ? true : false;
199
+ const int h_per_step = is_subblock_schedule ? T_per_load : T_per_load * internalUnroll;
200
+
201
+ // Scheduling concern: may be slightly faster for some inputs to assign multiple stages of
202
+ // warp-sized blocks rather than stepping up to 64/96 threads
203
+ const int one_step_threads = next_pow2((elems_per_row + h_per_step - 1) / h_per_step);
204
+ const int threads_per_group = (one_step_threads < maxThreads) ? one_step_threads : maxThreads;
205
+
206
+ const int groups_per_block_max =
207
+ is_subblock_schedule ? (maxThreads + threads_per_group - 1) / threads_per_group : 1;
208
+ const int groups_per_block = (rows < groups_per_block_max) ? rows : groups_per_block_max;
209
+ const int groups_launch = (groups_per_block + rows - 1) / groups_per_block;
210
+
211
+ dim3 block(threads_per_group, groups_per_block);
212
+ dim3 grid(groups_launch);
213
+
214
+ const int elems_per_step = threads_per_group * h_per_step;
215
+ const int external_unRoll = (elems_per_row + elems_per_step - 1) / elems_per_step;
216
+
217
+ bool pre_norm = (residual == nullptr) ? false : true;
218
+
219
+ if (is_subblock_schedule) {
220
+ // <=128
221
+ if (threads_per_group == 1) {
222
+ LAUNCH_ALL_RMS_NORM(1, 1, maxThreads);
223
+ } else if (threads_per_group == 2) {
224
+ LAUNCH_ALL_RMS_NORM(1, 2, maxThreads);
225
+ } else if (threads_per_group == 4) {
226
+ LAUNCH_ALL_RMS_NORM(1, 4, maxThreads);
227
+ } else if (threads_per_group == 8) {
228
+ LAUNCH_ALL_RMS_NORM(1, 8, maxThreads);
229
+ } else if (threads_per_group == 16) {
230
+ LAUNCH_ALL_RMS_NORM(1, 16, maxThreads);
231
+ }
232
+ } else if (external_unRoll == 1) {
233
+ // 129 - 4096 elems
234
+ // (this can launch with 1-7 warps as well)
235
+ LAUNCH_ALL_RMS_NORM(1 * internalUnroll, maxThreads, maxThreads);
236
+ } else if (external_unRoll == 2) {
237
+ // 4097 - 8192 elems
238
+ LAUNCH_ALL_RMS_NORM(2 * internalUnroll, maxThreads, maxThreads);
239
+ } else if (external_unRoll == 3) {
240
+ // 8193 - 12288 elems
241
+ LAUNCH_ALL_RMS_NORM(3 * internalUnroll, maxThreads, maxThreads);
242
+ } else if (external_unRoll == 4) {
243
+ // 12289 - 16384 elems
244
+ LAUNCH_ALL_RMS_NORM(4 * internalUnroll, maxThreads, maxThreads);
245
+ }
246
+ }
247
+
248
+ #define INSTANTIATE_LAUNCH_RMS_NORM(T) \
249
+ template void launch_rms_norm<T>(T * norm_output, \
250
+ T * res_output, \
251
+ const T* vals, \
252
+ const T* residual, \
253
+ const T* gamma, \
254
+ float epsilon, \
255
+ int rows, \
256
+ int elems_per_row, \
257
+ cudaStream_t stream);
258
+
259
+ INSTANTIATE_LAUNCH_RMS_NORM(float)
260
+ INSTANTIATE_LAUNCH_RMS_NORM(__half)
261
+ #ifdef BF16_AVAILABLE
262
+ INSTANTIATE_LAUNCH_RMS_NORM(__nv_bfloat16)
263
+ #endif
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/transformer/inference/csrc/transform.cu ADDED
@@ -0,0 +1,727 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #ifndef __HIP_PLATFORM_AMD__
7
+ #include <cuda_profiler_api.h>
8
+ #endif
9
+ #include "conversion_utils.h"
10
+ #include "inference_cuda_layers.h"
11
+ namespace cg = cooperative_groups;
12
+
13
+ // only used to avoid compilation error due to lack of definition.
14
+ #ifndef BF16_AVAILABLE
15
+ using __nv_bfloat162 = __half2;
16
+ #endif
17
+
18
+ // Bias add
19
+
20
+ __global__ void bias_add_transform_0213(float* output,
21
+ float* k_cache,
22
+ float* v_cache,
23
+ const float* vals,
24
+ const float* bias,
25
+ int hidden_dim,
26
+ int seq_length,
27
+ unsigned seq_offset,
28
+ int heads,
29
+ int head_stride,
30
+ int num_kv,
31
+ int rotary_dim,
32
+ bool rotate_half,
33
+ bool rotate_every_two,
34
+ int head_ext,
35
+ int max_out_tokens,
36
+ float rope_theta)
37
+ {
38
+ int d0_stride = hidden_dim * seq_length;
39
+ int d1_stride = hidden_dim;
40
+ int d2_stride = hidden_dim / heads;
41
+
42
+ int d0 = blockIdx.x; // Batch
43
+ int d1 = blockIdx.y; // Sequence ID (0-127)
44
+ int cnt = blockIdx.z / head_ext; // Hidden count
45
+ int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11)
46
+ int d3 = threadIdx.x; // Values (groups of 4)
47
+
48
+ int d2_out_stride = d2_stride * (cnt == 0 ? seq_length : max_out_tokens);
49
+ int d0_out_stride = hidden_dim * (cnt == 0 ? seq_length : max_out_tokens);
50
+
51
+ const float4* vals_vec = reinterpret_cast<const float4*>(vals);
52
+ float4* output_vec =
53
+ reinterpret_cast<float4*>(cnt == 0 ? output : (cnt == 1 ? k_cache : v_cache));
54
+
55
+ vals_vec += (d0 * (d1_stride + num_kv * 2 * d2_stride) * seq_length);
56
+ vals_vec += d1 * (d1_stride + num_kv * 2 * d2_stride);
57
+ vals_vec += (cnt == 0 ? 0 : d1_stride) + (cnt == 0 ? 0 : (cnt - 1) * num_kv * d2_stride);
58
+ vals_vec += ((cnt == 0 ? d2 : (d2 / head_stride)) * d2_stride);
59
+
60
+ output_vec += (d1 * d2_stride);
61
+ output_vec += (d0 * d0_out_stride);
62
+ output_vec += (d2 * d2_out_stride);
63
+
64
+ unsigned seq_id = d1 + seq_offset;
65
+ float4 inputs = vals_vec[d3];
66
+ int lane = d3 & 0x1f;
67
+ if (cnt < 2 && rotary_dim > 0 && d3 < rotary_dim) {
68
+ float4 q = vals_vec[d3];
69
+ float2* q_f = reinterpret_cast<float2*>(&q);
70
+ if (rotate_every_two) {
71
+ #pragma unroll
72
+ for (int o = 0; o < 2; o++) {
73
+ float inv_freq = (float)(((d3 << 1) + o) * 2) / (float)(rotary_dim << 2);
74
+ inv_freq = 1.0 / powf(rope_theta, inv_freq) * (float)seq_id;
75
+ q_f[o].x = (-1.0 * q_f[o].y * sinf(inv_freq) + q_f[o].x * cosf(inv_freq));
76
+ q_f[o].y = (q_f[o].x * sinf(inv_freq) + q_f[o].y * cosf(inv_freq));
77
+ }
78
+ }
79
+ output_vec[d3] = q;
80
+ } else
81
+ output_vec[d3] = inputs;
82
+ }
83
+
84
+ #define ATTN_H 3
85
+ #define MAX_SEQ_LINE 10
86
+
87
+ template <typename T>
88
+ __global__ void bias_add_transform_0213(T* output, // q
89
+ T* k_cache,
90
+ T* v_cache,
91
+ const T* vals, // qkv
92
+ const T* bias,
93
+ int hidden_dim,
94
+ int seq_length,
95
+ unsigned seq_offset,
96
+ int all_tokens,
97
+ int heads,
98
+ int head_stride,
99
+ int num_kv,
100
+ int rotary_dim,
101
+ bool rotate_half,
102
+ bool rotate_every_two,
103
+ int head_ext,
104
+ int max_out_tokens,
105
+ float rope_theta)
106
+ {
107
+ using T2 =
108
+ typename std::conditional<std::is_same<T, __half>::value, __half2, __nv_bfloat162>::type;
109
+ unsigned half_dim = (rotary_dim << 3) >> 1;
110
+ int d0_stride = hidden_dim * seq_length;
111
+ int d1_stride = hidden_dim;
112
+ int d2_stride = hidden_dim / heads;
113
+
114
+ int d0 = blockIdx.x; // Batch
115
+ int d1 = blockIdx.y; // Sequence ID (0-127)
116
+ int cnt = blockIdx.z / head_ext; // Hidden count
117
+ int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11)
118
+ int d3 = threadIdx.x; // Values (groups of 4)
119
+
120
+ int d2_out_stride = d2_stride * (cnt == 0 ? seq_length : max_out_tokens);
121
+ int d0_out_stride = hidden_dim * (cnt == 0 ? seq_length : max_out_tokens);
122
+
123
+ float4 vals_arr;
124
+ float4 output_arr;
125
+
126
+ T2* vals_half = reinterpret_cast<T2*>(&vals_arr);
127
+ T2* output_half = reinterpret_cast<T2*>(&output_arr);
128
+
129
+ const float4* vals_vec = reinterpret_cast<const float4*>(vals);
130
+ float4* output_vec =
131
+ reinterpret_cast<float4*>(cnt == 0 ? output : (cnt == 1 ? k_cache : v_cache));
132
+
133
+ vals_vec += (d0 * (d1_stride + num_kv * 2 * d2_stride) * seq_length);
134
+ vals_vec += (d1 * (d1_stride + num_kv * 2 * d2_stride));
135
+ vals_vec += (cnt == 0 ? 0 : d1_stride) + (cnt == 0 ? 0 : (cnt - 1) * num_kv * d2_stride);
136
+ vals_vec += ((cnt == 0 ? d2 : (d2 / head_stride)) * d2_stride);
137
+
138
+ output_vec += (d1 * d2_stride);
139
+ output_vec += (d0 * d0_out_stride);
140
+ output_vec += (d2 * d2_out_stride);
141
+
142
+ unsigned seq_id = d1 + seq_offset;
143
+
144
+ int lane = d3 & 0x1f;
145
+ if (cnt < 2 && rotary_dim > 0 && d3 < rotary_dim) {
146
+ float4 q = vals_vec[d3];
147
+ T2* q_h = reinterpret_cast<T2*>(&q);
148
+ if (rotate_every_two) {
149
+ #pragma unroll
150
+ for (int o = 0; o < 4; o++) {
151
+ float inv_freq = (float)(((d3 << 2) + o) * 2) / (float)(rotary_dim << 3);
152
+ inv_freq = 1.0 / powf(rope_theta, inv_freq) * (float)seq_id;
153
+ float q_data[2];
154
+ q_data[0] = conversion::to<float>(q_h[o].x);
155
+ q_data[1] = conversion::to<float>(q_h[o].y);
156
+ q_h[o].x = conversion::to<T>(-1.0 * q_data[1] * sinf(inv_freq) +
157
+ q_data[0] * cosf(inv_freq));
158
+ q_h[o].y =
159
+ conversion::to<T>(q_data[0] * sinf(inv_freq) + q_data[1] * cosf(inv_freq));
160
+ }
161
+ }
162
+ output_vec[d3] = q;
163
+ } else
164
+ output_vec[d3] = vals_vec[d3];
165
+ }
166
+
167
+ // [B S C*H] - > C * [B A S N]
168
+ template <>
169
+ void launch_bias_add_transform_0213<float>(float* output,
170
+ float* k_cache,
171
+ float* v_cache,
172
+ const float* vals,
173
+ const float* bias,
174
+ int batch_size,
175
+ int seq_length,
176
+ unsigned seq_offset,
177
+ int all_tokens,
178
+ int hidden_dim,
179
+ int heads,
180
+ int num_kv,
181
+ int rotary_dim,
182
+ bool rotate_half,
183
+ bool rotate_every_two,
184
+ cudaStream_t stream,
185
+ int trans_count,
186
+ int max_out_tokens,
187
+ float rope_theta)
188
+ {
189
+ hidden_dim >>= 2;
190
+ int head_ext = (hidden_dim - 1) / MAX_THREADS + 1;
191
+
192
+ dim3 block_dim(hidden_dim / heads, (heads / head_ext));
193
+ dim3 grid_dim(batch_size, seq_length, (trans_count * head_ext));
194
+
195
+ bias_add_transform_0213<<<grid_dim, block_dim, 0, stream>>>(output,
196
+ k_cache,
197
+ v_cache,
198
+ vals,
199
+ bias,
200
+ hidden_dim,
201
+ seq_length,
202
+ seq_offset,
203
+ heads,
204
+ num_kv > 0 ? (heads / num_kv) : 1,
205
+ num_kv > 0 ? num_kv : heads,
206
+ rotary_dim >> 2,
207
+ rotate_half,
208
+ rotate_every_two,
209
+ head_ext,
210
+ max_out_tokens,
211
+ rope_theta);
212
+ }
213
+
214
+ template <typename T>
215
+ void launch_bias_add_transform_0213(T* output,
216
+ T* k_cache,
217
+ T* v_cache,
218
+ const T* vals,
219
+ const T* bias,
220
+ int batch_size,
221
+ int seq_length,
222
+ unsigned seq_offset,
223
+ int all_tokens,
224
+ int hidden_dim,
225
+ int heads,
226
+ int num_kv,
227
+ int rotary_dim,
228
+ bool rotate_half,
229
+ bool rotate_every_two,
230
+ cudaStream_t stream,
231
+ int trans_count,
232
+ int max_out_tokens,
233
+ float rope_theta)
234
+ {
235
+ hidden_dim >>= 3;
236
+ int head_ext = 1; // (hidden_dim - 1) / MAX_THREADS + 1;
237
+ dim3 block_dim(hidden_dim / heads, (heads / head_ext));
238
+ dim3 grid_dim(batch_size, seq_length, (trans_count * head_ext));
239
+ bias_add_transform_0213<<<grid_dim, block_dim, 0, stream>>>(output,
240
+ k_cache,
241
+ v_cache,
242
+ vals,
243
+ bias,
244
+ hidden_dim,
245
+ seq_length,
246
+ seq_offset,
247
+ all_tokens,
248
+ heads,
249
+ num_kv > 0 ? (heads / num_kv) : 1,
250
+ num_kv > 0 ? num_kv : heads,
251
+ rotary_dim >> 3,
252
+ rotate_half,
253
+ rotate_every_two,
254
+ head_ext,
255
+ max_out_tokens,
256
+ rope_theta);
257
+ }
258
+
259
+ #define INSTANTIATE_LAUNCH_BIAS_ADD_TRANSFORM_0213(T) \
260
+ template void launch_bias_add_transform_0213<T>(T*, \
261
+ T*, \
262
+ T*, \
263
+ const T*, \
264
+ const T*, \
265
+ int, \
266
+ int, \
267
+ unsigned, \
268
+ int, \
269
+ int, \
270
+ int, \
271
+ int, \
272
+ int, \
273
+ bool, \
274
+ bool, \
275
+ cudaStream_t, \
276
+ int, \
277
+ int, \
278
+ float)
279
+
280
+ #ifdef BF16_AVAILABLE
281
+ INSTANTIATE_LAUNCH_BIAS_ADD_TRANSFORM_0213(__nv_bfloat16);
282
+ #endif
283
+ INSTANTIATE_LAUNCH_BIAS_ADD_TRANSFORM_0213(__half);
284
+
285
+ // Bias add
286
+
287
+ __global__ void pad_add_transform_0213(float* output,
288
+ const float* vals,
289
+ int hidden_dim,
290
+ int seq_length,
291
+ int padded_seq_len,
292
+ int heads,
293
+ int padded_head_size)
294
+ {
295
+ }
296
+
297
+ template <typename T>
298
+ __global__ void pad_add_transform_0213(T* output,
299
+ const T* vals,
300
+ int hidden_dim,
301
+ int seq_length,
302
+ int padded_seq_len,
303
+ int heads,
304
+ int padded_head_size)
305
+ {
306
+ using T2 =
307
+ typename std::conditional<std::is_same<T, __half>::value, __half2, __nv_bfloat162>::type;
308
+ float4 ZERO;
309
+ const T2 zero_h = conversion::to<T2>(0.f);
310
+ T2* ZERO_h = reinterpret_cast<T2*>(&ZERO);
311
+ #pragma unroll
312
+ for (int i = 0; i < 4; i++) ZERO_h[i] = zero_h;
313
+
314
+ int d0_stride = hidden_dim * seq_length;
315
+ int d1_stride = hidden_dim;
316
+ int d2_stride = hidden_dim / heads;
317
+
318
+ int d0 = blockIdx.x; // Batch
319
+ int d1 = blockIdx.y * blockDim.z + threadIdx.z; // Sequence ID (0-127)
320
+ int d2 = threadIdx.y; // Head (0-11)
321
+ int d3 = threadIdx.x; // Values (groups of 4)
322
+
323
+ int d2_out_stride = padded_head_size * padded_seq_len;
324
+ int d0_out_stride = heads * d2_out_stride;
325
+
326
+ const float4* vals_vec = reinterpret_cast<const float4*>(vals);
327
+ float4* output_vec = reinterpret_cast<float4*>(output);
328
+
329
+ vals_vec += (d0 * d0_stride);
330
+ vals_vec += (d1 * d1_stride);
331
+ vals_vec += (d2 * d2_stride);
332
+
333
+ output_vec += (d1 * padded_head_size);
334
+ output_vec += (d0 * d0_out_stride);
335
+ output_vec += (d2 * d2_out_stride);
336
+
337
+ if (d3 < d2_stride && d1 < seq_length)
338
+ output_vec[d3] = vals_vec[d3];
339
+ else
340
+ output_vec[d3] = ZERO;
341
+ }
342
+
343
+ // [B S C*H] - > C * [B A S N]
344
+ template <>
345
+ void launch_pad_add_transform_0213<float>(float* output,
346
+ const float* vals,
347
+ int batch_size,
348
+ int hidden_dim,
349
+ int seq_length,
350
+ int padded_seq_len,
351
+ int heads,
352
+ int padded_head_size,
353
+ cudaStream_t stream)
354
+ {
355
+ }
356
+
357
+ template <typename T>
358
+ void launch_pad_add_transform_0213(T* output,
359
+ const T* vals,
360
+ int batch_size,
361
+ int hidden_dim,
362
+ int seq_length,
363
+ int padded_seq_len,
364
+ int heads,
365
+ int padded_head_size,
366
+ cudaStream_t stream)
367
+ {
368
+ hidden_dim >>= 3;
369
+ dim3 block_dim((padded_head_size >> 3), heads, 2);
370
+ dim3 grid_dim(batch_size, padded_seq_len / 2);
371
+ pad_add_transform_0213<<<grid_dim, block_dim, 0, stream>>>(
372
+ output, vals, hidden_dim, seq_length, padded_seq_len, heads, padded_head_size >> 3);
373
+ }
374
+
375
+ #define INSTANTIATE_LAUNCH_PAD_ADD_TRANSFORM_0213_SIMPLE(T) \
376
+ template void launch_pad_add_transform_0213<T>( \
377
+ T*, const T*, int, int, int, int, int, int, cudaStream_t);
378
+
379
+ INSTANTIATE_LAUNCH_PAD_ADD_TRANSFORM_0213_SIMPLE(__half);
380
+ #ifdef BF16_AVAILABLE
381
+ INSTANTIATE_LAUNCH_PAD_ADD_TRANSFORM_0213_SIMPLE(__nv_bfloat16);
382
+ #endif
383
+
384
+ // Bias add
385
+ template <typename T>
386
+ __global__ void bias_add_transform_0213(T* output,
387
+ const T* vals,
388
+ const T* bias,
389
+ int hidden_dim,
390
+ int seq_length,
391
+ int heads,
392
+ int head_ext);
393
+
394
+ template <>
395
+ __global__ void bias_add_transform_0213<float>(float* output,
396
+ const float* vals,
397
+ const float* bias,
398
+ int hidden_dim,
399
+ int seq_length,
400
+ int heads,
401
+ int head_ext)
402
+ {
403
+ int d0_stride = hidden_dim * seq_length;
404
+ int d1_stride = hidden_dim;
405
+ int d2_stride = hidden_dim / heads;
406
+
407
+ int d0_out_stride = d0_stride;
408
+ int d1_out_stride = d2_stride;
409
+ int d2_out_stride = d2_stride * seq_length;
410
+
411
+ int d0 = blockIdx.x; // Batch
412
+ int d1 = blockIdx.y; // Sequence ID (0-127)
413
+ int cnt = blockIdx.z / head_ext; // Hidden count
414
+ int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11)
415
+ int d3 = threadIdx.x; // Values (groups of 4)
416
+
417
+ const float4* vals_vec = reinterpret_cast<const float4*>(vals);
418
+ const float4* bias_vec = reinterpret_cast<const float4*>(bias);
419
+ float4* output_vec = reinterpret_cast<float4*>(output);
420
+
421
+ float4 inputs = vals_vec[d0 * d0_stride * (gridDim.z / head_ext) + cnt * d1_stride +
422
+ d1 * d1_stride * (gridDim.z / head_ext) + d2 * d2_stride + d3];
423
+ float4 biases = bias_vec[cnt * d1_stride + d2 * d2_stride + d3];
424
+
425
+ float4 outputs;
426
+ outputs.x = inputs.x + biases.x;
427
+ outputs.y = inputs.y + biases.y;
428
+ outputs.z = inputs.z + biases.z;
429
+ outputs.w = inputs.w + biases.w;
430
+
431
+ output_vec[cnt * d0_out_stride * gridDim.x + d0 * d0_out_stride + d1 * d1_out_stride +
432
+ d2 * d2_out_stride + d3] = outputs;
433
+ }
434
+
435
+ template <typename T>
436
+ __global__ void bias_add_transform_0213(T* output,
437
+ const T* vals,
438
+ const T* bias,
439
+ int hidden_dim,
440
+ int seq_length,
441
+ int heads,
442
+ int head_ext)
443
+ {
444
+ using T2 =
445
+ typename std::conditional<std::is_same<T, __half>::value, __half2, __nv_bfloat162>::type;
446
+ int d0_stride = hidden_dim * seq_length;
447
+ int d1_stride = hidden_dim;
448
+ int d2_stride = hidden_dim / heads;
449
+
450
+ int d2_out_stride = d2_stride * seq_length;
451
+
452
+ int d0 = blockIdx.x; // Batch
453
+ int d1 = blockIdx.y; // Sequence ID (0-127)
454
+ int cnt = blockIdx.z / head_ext; // Hidden count
455
+ int d2 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head (0-11)
456
+ int d3 = threadIdx.x; // Values (groups of 4)
457
+
458
+ float4 vals_arr;
459
+ float4 bias_arr;
460
+ float4 output_arr;
461
+ T2* vals_half = reinterpret_cast<T2*>(&vals_arr);
462
+ T2* bias_half = reinterpret_cast<T2*>(&bias_arr);
463
+ T2* output_half = reinterpret_cast<T2*>(&output_arr);
464
+
465
+ const float4* vals_vec = reinterpret_cast<const float4*>(vals);
466
+ const float4* bias_vec = reinterpret_cast<const float4*>(bias);
467
+ float4* output_vec = reinterpret_cast<float4*>(output);
468
+
469
+ vals_vec += (d0 * d0_stride * (gridDim.z / head_ext));
470
+ vals_vec += (d1 * d1_stride * (gridDim.z / head_ext));
471
+ vals_vec += (cnt * d1_stride);
472
+ vals_vec += (d2 * d2_stride);
473
+
474
+ bias_vec += (cnt * d1_stride);
475
+ bias_vec += (d2 * d2_stride);
476
+
477
+ output_vec += (cnt * d0_stride * gridDim.x);
478
+ output_vec += (d1 * d2_stride);
479
+ output_vec += (d0 * d0_stride);
480
+ output_vec += (d2 * d2_out_stride);
481
+
482
+ bias_arr = bias_vec[d3];
483
+ vals_arr = vals_vec[d3];
484
+
485
+ output_half[0] = vals_half[0] + bias_half[0];
486
+ output_half[1] = vals_half[1] + bias_half[1];
487
+ output_half[2] = vals_half[2] + bias_half[2];
488
+ output_half[3] = vals_half[3] + bias_half[3];
489
+ output_vec[d3] = output_arr;
490
+ }
491
+
492
+ template <typename T>
493
+ __global__ void bias_add_transform_0213_v2(T* output,
494
+ const T* vals,
495
+ const T* bias,
496
+ int hidden_dim,
497
+ int seq_length,
498
+ int heads)
499
+ {
500
+ using T2 =
501
+ typename std::conditional<std::is_same<T, __half>::value, __half2, __nv_bfloat162>::type;
502
+ __shared__ float4 in_data[3072];
503
+
504
+ int d0_stride = hidden_dim * seq_length;
505
+ int d1_stride = hidden_dim;
506
+ int d2_stride = hidden_dim / heads;
507
+ int iteration_stride = d1_stride * blockDim.z; // Hidden * 3 / 8
508
+ int batch_stride = d0_stride * blockDim.z; // Hidden * S * 3 / 8
509
+
510
+ int d0_out_stride = d0_stride;
511
+ int d1_out_stride = d2_stride;
512
+ int d2_out_stride = d2_stride * seq_length;
513
+
514
+ int d0 = blockIdx.x; // Batch
515
+ int d1 = blockIdx.y; // Sequence ID (0-127)
516
+ int cnt = threadIdx.z; // blockIdx.z; // Hidden count
517
+ int d2 = threadIdx.y; // Head (0-11)
518
+ int d3 = threadIdx.x; // Values (groups of 4)
519
+
520
+ float4 vals_arr[1];
521
+ float4 bias_arr[1];
522
+ float4 output_arr[1];
523
+ T2* vals_half = reinterpret_cast<T2*>(vals_arr);
524
+ T2* bias_half = reinterpret_cast<T2*>(bias_arr);
525
+ T2* output_half = reinterpret_cast<T2*>(output_arr);
526
+
527
+ const float4* vals_vec = reinterpret_cast<const float4*>(vals);
528
+ const float4* bias_vec = reinterpret_cast<const float4*>(bias);
529
+ float4* output_vec = reinterpret_cast<float4*>(output);
530
+
531
+ int iter_index = cnt * d1_stride + d2 * d2_stride + d3;
532
+ int input_offset = d0 * batch_stride + d1 * (iteration_stride << 1);
533
+ bias_arr[0] = bias_vec[iter_index];
534
+
535
+ #pragma unroll
536
+ for (int iter = 0; iter < 2; iter++) {
537
+ int iter_id = iter * iteration_stride + iter_index;
538
+ vals_arr[0] = vals_vec[input_offset + iter_id];
539
+
540
+ output_half[0] = vals_half[0] + bias_half[0];
541
+ output_half[1] = vals_half[1] + bias_half[1];
542
+ output_half[2] = vals_half[2] + bias_half[2];
543
+ output_half[3] = vals_half[3] + bias_half[3];
544
+
545
+ in_data[iter_id] = output_arr[0];
546
+ }
547
+ __syncthreads();
548
+
549
+ iteration_stride = blockDim.z * (blockDim.y >> 1);
550
+ int matrix_stride = (d0_out_stride * gridDim.x);
551
+ int head_count = (d2 >> 1) + cnt * (blockDim.y >> 1);
552
+
553
+ int out_index = d0 * d0_out_stride + d1 * (d1_out_stride << 1) + d3 + (d2 % 2) * d2_stride;
554
+
555
+ #pragma unroll
556
+ for (int iter = 0; iter < 2; iter++) {
557
+ int iter_row = (iter * iteration_stride) + head_count;
558
+ int iter_offset =
559
+ (iter_row % blockDim.y) * d2_out_stride + (iter_row / blockDim.y) * matrix_stride;
560
+ output_vec[out_index + iter_offset] =
561
+ in_data[iter_row * d2_stride + d3 + (d2 % 2) * (d1_stride * blockDim.z)];
562
+ }
563
+ }
564
+
565
+ template <typename T>
566
+ __global__ void transform4d_0213(T* out,
567
+ const T* in,
568
+ int heads,
569
+ int seq_length,
570
+ int hidden_dim,
571
+ int head_ext);
572
+
573
+ template <>
574
+ __global__ void transform4d_0213<float>(float* out,
575
+ const float* in,
576
+ int heads,
577
+ int seq_length,
578
+ int hidden_dim,
579
+ int head_ext)
580
+ {
581
+ int d0_stride = hidden_dim * seq_length;
582
+ int d1_stride = d0_stride / heads;
583
+ int d2_stride = hidden_dim / heads;
584
+
585
+ int d0_out_stride = d0_stride;
586
+ int d1_out_stride = d2_stride;
587
+ int d2_out_stride = hidden_dim;
588
+
589
+ int d0 = blockIdx.x; // Batch
590
+ int d1 = blockIdx.y / ((seq_length - 1) / blockDim.y + 1); // Head
591
+ int d2 = (threadIdx.y + blockDim.y * blockIdx.y) % seq_length;
592
+ int cnt = blockIdx.z;
593
+ int d3 = threadIdx.x; // Values (groups of 8)
594
+
595
+ if (d2 < seq_length) {
596
+ const float4* in_vec = reinterpret_cast<const float4*>(in);
597
+ float4* out_vec = reinterpret_cast<float4*>(out);
598
+
599
+ float4 vals_vec = in_vec[cnt * d0_stride * gridDim.x + d0 * d0_stride + d1 * d1_stride +
600
+ d2 * d2_stride + d3];
601
+ out_vec[d0 * d0_out_stride * gridDim.z + cnt * d2_out_stride + d1 * d1_out_stride +
602
+ d2 * d2_out_stride * gridDim.z + d3] = vals_vec;
603
+ }
604
+ }
605
+
606
+ template <typename T>
607
+ __global__ void transform4d_0213(T* out,
608
+ const T* in,
609
+ int heads,
610
+ int seq_length,
611
+ int hidden_dim,
612
+ int head_ext)
613
+ {
614
+ int d0_stride = hidden_dim * (seq_length / head_ext);
615
+ int d1_stride = hidden_dim;
616
+ int d2_stride = hidden_dim / heads;
617
+
618
+ int d0 = blockIdx.x; // Batch
619
+ int d1 = threadIdx.y + (blockIdx.z % head_ext) * (heads / head_ext); // Head
620
+ int d2 = blockIdx.z / head_ext; // Sequence
621
+ int cnt = blockIdx.y; // Hidden count
622
+ int d3 = threadIdx.x; // Values (groups of 8)
623
+
624
+ const float4* in_vec = reinterpret_cast<const float4*>(in);
625
+ float4* out_vec = reinterpret_cast<float4*>(out);
626
+
627
+ in_vec += (cnt * d0_stride * gridDim.x);
628
+ in_vec += (d0 * d0_stride);
629
+ in_vec += (d2 * d2_stride);
630
+ in_vec += (d1 * d2_stride * seq_length);
631
+
632
+ out_vec += (cnt * d1_stride);
633
+ out_vec += (d1 * d2_stride);
634
+ out_vec += (d0 * d0_stride * gridDim.y);
635
+ out_vec += (d2 * d1_stride * gridDim.y);
636
+
637
+ out_vec[d3] = in_vec[d3];
638
+ }
639
+
640
+ template <typename T>
641
+ __global__ void transform4d_0213_v2(T* out, const T* in, int heads, int seq_length, int hidden_dim)
642
+ {
643
+ __shared__ float4 in_data[3072];
644
+
645
+ int d0_stride = hidden_dim * seq_length;
646
+ int d1_stride = hidden_dim;
647
+ int d2_stride = hidden_dim / heads;
648
+
649
+ int d0 = blockIdx.x; // Batch
650
+ int d1 = threadIdx.y; // Head
651
+ int d2 = blockIdx.y; // Sequence
652
+ int cnt = threadIdx.z; // Hidden count
653
+ int d3 = threadIdx.x; // Values (groups of 8)
654
+
655
+ const float4* in_vec = reinterpret_cast<const float4*>(in);
656
+ float4* out_vec = reinterpret_cast<float4*>(out);
657
+
658
+ int input_offset = d0 * d0_stride + d2 * (d2_stride << 1) + d3 + (d1 % 2) * d2_stride;
659
+ int head_count = (d1 >> 1) + cnt * (blockDim.y >> 1);
660
+ int iteration_stride = blockDim.z * (blockDim.y >> 1);
661
+ int matrix_stride = (d0_stride * gridDim.x);
662
+
663
+ #pragma unroll
664
+ for (int iter = 0; iter < 2; iter++) {
665
+ int iter_row = iter * iteration_stride + head_count;
666
+ int iter_offset = (iter_row % blockDim.y) * d2_stride;
667
+
668
+ in_data[d3 + iter_offset + (iter_row / blockDim.y + (d1 % 2) * blockDim.z) * d1_stride] =
669
+ in_vec[input_offset + iter_offset * seq_length +
670
+ (iter_row / blockDim.y) * matrix_stride];
671
+ }
672
+ __syncthreads();
673
+
674
+ iteration_stride = d1_stride * blockDim.z;
675
+ int iter_index = cnt * d1_stride + d1 * d2_stride + d3;
676
+ int output_offset = d0 * d0_stride * blockDim.z + d2 * (iteration_stride << 1);
677
+
678
+ #pragma unroll
679
+ for (int iter = 0; iter < 2; iter++) {
680
+ int iter_id = iter * iteration_stride + iter_index;
681
+ out_vec[output_offset + iter_id] = in_data[iter_id];
682
+ }
683
+ }
684
+
685
+ // 3 * [B A S N] - > [B S C*H]
686
+ template <>
687
+ void launch_transform4d_0213<float>(float* out,
688
+ const float* in,
689
+ int batch_size,
690
+ int heads,
691
+ int seq_length,
692
+ int hidden_dim,
693
+ cudaStream_t stream,
694
+ int trans_count)
695
+ {
696
+ hidden_dim >>= 2;
697
+ dim3 grid_dims(batch_size, heads * ((seq_length - 1) / 8 + 1), trans_count);
698
+ dim3 block_dims(hidden_dim / heads, 8);
699
+ transform4d_0213<float>
700
+ <<<grid_dims, block_dims, 0, stream>>>(out, in, heads, seq_length, hidden_dim, 1);
701
+ }
702
+
703
+ template <typename T>
704
+ void launch_transform4d_0213(T* out,
705
+ const T* in,
706
+ int batch_size,
707
+ int heads,
708
+ int seq_length,
709
+ int hidden_dim,
710
+ cudaStream_t stream,
711
+ int trans_count)
712
+ {
713
+ hidden_dim >>= 3;
714
+ int head_ext = (hidden_dim - 1) / MAX_THREADS + 1;
715
+ dim3 grid_dims(batch_size, trans_count, (seq_length * head_ext));
716
+ dim3 block_dims(hidden_dim / heads, (heads / head_ext));
717
+ transform4d_0213<<<grid_dims, block_dims, 0, stream>>>(
718
+ out, in, heads, seq_length, hidden_dim, head_ext);
719
+ }
720
+
721
+ #define INSTANTIATE_2B_LAUNCH_TRANSFORM4D(T) \
722
+ template void launch_transform4d_0213<T>(T*, const T*, int, int, int, int, cudaStream_t, int);
723
+
724
+ INSTANTIATE_2B_LAUNCH_TRANSFORM4D(__half)
725
+ #ifdef BF16_AVAILABLE
726
+ INSTANTIATE_2B_LAUNCH_TRANSFORM4D(__nv_bfloat16)
727
+ #endif
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/utils/flatten_unflatten.cpp ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Copyright NVIDIA/apex
8
+ This file is adapted from fused adam in NVIDIA/apex, commit a109f85
9
+ */
10
+
11
+ #include <torch/csrc/utils/tensor_flatten.h>
12
+ #include <torch/extension.h>
13
+ // https://github.com/pytorch/pytorch/blob/master/torch/csrc/utils/tensor_flatten.h
14
+
15
+ at::Tensor flatten(std::vector<at::Tensor> tensors)
16
+ {
17
+ return torch::utils::flatten_dense_tensors(tensors);
18
+ }
19
+
20
+ std::vector<at::Tensor> unflatten(at::Tensor flat, std::vector<at::Tensor> tensors)
21
+ {
22
+ return torch::utils::unflatten_dense_tensors(flat, tensors);
23
+ }
24
+
25
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
26
+ {
27
+ m.def("flatten", &flatten, "Flatten dense tensors");
28
+ m.def("unflatten", &unflatten, "Unflatten dense tensors");
29
+ }
venv/lib/python3.10/site-packages/deepspeed/ops/csrc/xpu/adagrad/cpu_adagrad.cpp ADDED
@@ -0,0 +1,196 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "cpu_adagrad.h"
7
+ #include <torch/extension.h>
8
+ #include <cmath>
9
+ #include <iostream>
10
+ #include <memory>
11
+ #include <type_traits>
12
+ #include <unordered_map>
13
+
14
+ static std::unordered_map<int, std::shared_ptr<void>> s_optimizers;
15
+
16
+ // C++ interface
17
+
18
+ void Adagrad_Optimizer::Step_1(float* _params,
19
+ float* grads,
20
+ float* _exp_avg_sq,
21
+ size_t _param_size,
22
+ ds_half_precision_t* dev_params,
23
+ bool half_precision)
24
+ {
25
+ size_t rounded_size = 0;
26
+ #if defined(__AVX512__) or defined(__AVX256__)
27
+ Step_AVX<1>(
28
+ &rounded_size, _params, grads, _exp_avg_sq, _param_size, dev_params, half_precision);
29
+ #endif
30
+ if (_param_size > rounded_size) {
31
+ float step_size = -1 * _alpha;
32
+ ds_half_precision_t* grads_cast_h;
33
+ ds_half_precision_t* params_cast_h;
34
+ if (half_precision) {
35
+ grads_cast_h = reinterpret_cast<ds_half_precision_t*>(grads);
36
+ params_cast_h = reinterpret_cast<ds_half_precision_t*>(_params);
37
+ }
38
+ for (size_t t = rounded_size; t < _param_size; t += TILE) {
39
+ size_t copy_size = TILE;
40
+ if ((t + TILE) > _param_size) copy_size = _param_size - t;
41
+ size_t offset = copy_size + t;
42
+ #pragma omp parallel for
43
+ for (size_t k = t; k < offset; k++) {
44
+ float grad = half_precision ? (float)grads_cast_h[k] : grads[k];
45
+ float param = half_precision ? (float)params_cast_h[k] : _params[k];
46
+ float momentum = grads[k];
47
+ float variance = _exp_avg_sq[k];
48
+ if (_weight_decay > 0) { grad = param * _weight_decay + grad; }
49
+
50
+ variance += grad * grad;
51
+
52
+ grad = sqrt(variance);
53
+ grad += _eps;
54
+ grad = momentum / grad;
55
+ param = grad * step_size + param;
56
+ if (half_precision)
57
+ params_cast_h[k] = (ds_half_precision_t)param;
58
+ else
59
+ _params[k] = param;
60
+ // STORE UPDATE TERM TO GRAD'S MEMORY
61
+ grads[k] = grad * step_size;
62
+ _exp_avg_sq[k] = variance;
63
+ }
64
+ }
65
+ }
66
+ }
67
+
68
+ void Adagrad_Optimizer::Step_4(float* _params,
69
+ float* grads,
70
+ float* _exp_avg_sq,
71
+ size_t _param_size,
72
+ ds_half_precision_t* dev_params,
73
+ bool half_precision)
74
+ {
75
+ size_t rounded_size = 0;
76
+ #if defined(__AVX512__) or defined(__AVX256__)
77
+ Step_AVX<4>(
78
+ &rounded_size, _params, grads, _exp_avg_sq, _param_size, dev_params, half_precision);
79
+ #endif
80
+ if (_param_size > rounded_size)
81
+ Step_1((_params + rounded_size),
82
+ (grads + rounded_size),
83
+ (_exp_avg_sq + rounded_size),
84
+ (_param_size - rounded_size),
85
+ (dev_params != nullptr ? (dev_params + rounded_size) : dev_params),
86
+ half_precision);
87
+ }
88
+
89
+ int create_adagrad_optimizer(int optimizer_id,
90
+ float alpha = 1e-2,
91
+ float eps = 1e-8,
92
+ float weight_decay = 0,
93
+ bool should_log = false)
94
+ {
95
+ auto opt = std::make_shared<Adagrad_Optimizer>(alpha, eps, weight_decay);
96
+
97
+ s_optimizers[optimizer_id] = opt;
98
+
99
+ if (should_log) {
100
+ std::string avx_type = "";
101
+ #if defined(__AVX512__)
102
+ avx_type = "AVX512";
103
+ #else
104
+ #if defined(__AVX256__)
105
+ avx_type = "AVX2";
106
+ #else
107
+ avx_type = "scalar";
108
+ #endif
109
+ #endif
110
+
111
+ printf("Adagrad Optimizer #%d is created with %s arithmetic capability.\n",
112
+ optimizer_id,
113
+ avx_type.c_str());
114
+ printf("Config: alpha=%f, weight_decay=%f\n", alpha, weight_decay);
115
+ }
116
+
117
+ return 0;
118
+ }
119
+
120
+ void Adagrad_Optimizer::Step_8(float* _params,
121
+ float* grads,
122
+ float* _exp_avg_sq,
123
+ size_t _param_size,
124
+ ds_half_precision_t* dev_params,
125
+ bool half_precision)
126
+ {
127
+ size_t rounded_size = 0;
128
+ #if defined(__AVX512__) or defined(__AVX256__)
129
+ Step_AVX<8>(
130
+ &rounded_size, _params, grads, _exp_avg_sq, _param_size, dev_params, half_precision);
131
+ #endif
132
+ if (_param_size > rounded_size)
133
+ Step_4((_params + rounded_size),
134
+ (grads + rounded_size),
135
+ (_exp_avg_sq + rounded_size),
136
+ (_param_size - rounded_size),
137
+ (dev_params != nullptr ? (dev_params + rounded_size) : dev_params),
138
+ half_precision);
139
+ }
140
+
141
+ int ds_adagrad_step(int optimizer_id,
142
+ size_t step,
143
+ float lr,
144
+ float epsilon,
145
+ float weight_decay,
146
+ torch::Tensor& params,
147
+ torch::Tensor& grads,
148
+ torch::Tensor& exp_avg_sq)
149
+ {
150
+ auto params_c = params.contiguous();
151
+ auto grads_c = grads.contiguous();
152
+ auto exp_avg_sq_c = exp_avg_sq.contiguous();
153
+
154
+ float* params_ptr = (float*)params_c.data_ptr();
155
+ float* grads_ptr = (float*)grads_c.data_ptr();
156
+ float* exp_avg_sq_ptr = (float*)exp_avg_sq_c.data_ptr();
157
+
158
+ std::shared_ptr<Adagrad_Optimizer> opt =
159
+ std::static_pointer_cast<Adagrad_Optimizer>(s_optimizers[optimizer_id]);
160
+ opt->IncrementStep(step);
161
+ opt->update_state(lr, epsilon, weight_decay);
162
+ opt->Step_8(params_ptr, grads_ptr, exp_avg_sq_ptr, params_c.numel());
163
+
164
+ return 0;
165
+ }
166
+
167
+ int ds_adagrad_step_plus_copy(int optimizer_id,
168
+ size_t step,
169
+ float lr,
170
+ float epsilon,
171
+ float weight_decay,
172
+ torch::Tensor& params,
173
+ torch::Tensor& grads,
174
+ torch::Tensor& exp_avg_sq,
175
+ torch::Tensor& gpu_params)
176
+ {
177
+ assert(false);
178
+ return 0;
179
+ }
180
+
181
+ int destroy_adagrad_optimizer(int optimizer_id)
182
+ {
183
+ s_optimizers.erase(optimizer_id);
184
+
185
+ return 0;
186
+ }
187
+
188
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
189
+ {
190
+ m.def("adagrad_update", &ds_adagrad_step, "DeepSpeed CPU Adagrad update (C++)");
191
+ m.def("adagrad_update_copy",
192
+ &ds_adagrad_step_plus_copy,
193
+ "DeepSpeed CPU Adagrad update and param copy (C++)");
194
+ m.def("create_adagrad", &create_adagrad_optimizer, "DeepSpeed CPU Adagrad (C++)");
195
+ m.def("destroy_adagrad", &destroy_adagrad_optimizer, "DeepSpeed CPU Adagrad destroy (C++)");
196
+ }