applied-ai-018 commited on
Commit
3456c50
·
verified ·
1 Parent(s): 11ed373

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/__init__.py +6 -0
  2. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ds_kernel.py +32 -0
  3. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/__init__.py +13 -0
  4. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__init__.py +8 -0
  5. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/__init__.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/blocked_kv_rotary.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/blocked_trained_kv_rotary.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/linear_blocked_kv_copy.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.cpp +195 -0
  10. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.cuh +47 -0
  11. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.h +65 -0
  12. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.py +73 -0
  13. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary_cuda.cu +385 -0
  14. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_trained_kv_rotary.py +76 -0
  15. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/linear_blocked_kv_copy.py +74 -0
  16. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/ragged_ops.cpp +48 -0
  17. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/__init__.py +6 -0
  18. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/__pycache__/__init__.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.cpp +61 -0
  20. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.cuh +25 -0
  21. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.h +21 -0
  22. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.py +59 -0
  23. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating_cuda.cu +125 -0
  24. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__init__.py +6 -0
  25. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/container.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/container.py +78 -0
  27. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/model.py +209 -0
  28. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/policy.py +31 -0
  29. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__init__.py +6 -0
  30. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/__init__.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/container.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/model.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/policy.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/container.py +77 -0
  35. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/model.py +207 -0
  36. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/policy.py +30 -0
  37. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/__init__.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/container.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/model.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/policy.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/__init__.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/attn.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/attn_out.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/embedding.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/mlp.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/qkv.cpython-310.pyc +0 -0
  47. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/types.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/unembed.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/utils.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/embedding.py +34 -0
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .ds_kernel import DSKernelBase
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ds_kernel.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from abc import ABC, abstractmethod
7
+
8
+
9
+ class DSKernelBase(ABC):
10
+
11
+ @abstractmethod
12
+ def __init__(self, *args, **kwargs):
13
+ """
14
+ If necessary trigger compilation and warmup
15
+ Autotuning of the kernel would happen at this stage to
16
+ eliminate any potential hangs that might occur mid-deployment
17
+ Validate that the desired run configuration is compatible.
18
+
19
+ It is not necessary to call super on this method.
20
+ """
21
+ raise NotImplementedError()
22
+
23
+ @abstractmethod
24
+ def __call__(self, *args, **kwargs):
25
+ """
26
+ However the kernel needs to be called, it can be called here. Auto-tuning
27
+ should never be performed here.
28
+
29
+ All inputs/outputs should be passed as arguments to this function. No allocations
30
+ should be performed here.
31
+ """
32
+ raise NotImplementedError()
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .atom_builder import *
7
+ from .blocked_flash import *
8
+ from .embed import *
9
+ from .linear_blocked_kv_rotary import *
10
+ from .logits_gather import *
11
+ from .moe_gather import *
12
+ from .moe_scatter import *
13
+ from .top_k_gating import *
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .blocked_kv_rotary import *
7
+ from .blocked_trained_kv_rotary import *
8
+ from .linear_blocked_kv_copy import *
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (339 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/blocked_kv_rotary.cpython-310.pyc ADDED
Binary file (2.91 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/blocked_trained_kv_rotary.cpython-310.pyc ADDED
Binary file (2.91 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/linear_blocked_kv_copy.cpython-310.pyc ADDED
Binary file (2.71 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.cpp ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "blocked_kv_rotary.h"
7
+ #include "ragged_kernel_helpers.h"
8
+
9
+ #define DISPATCH_KV_ROTARY(T_TYPE, C_TYPE) \
10
+ if (q.options().dtype() == torch::T_TYPE) { \
11
+ launch_kv_rotary_kernel<C_TYPE>((C_TYPE*)kv_cache.data_ptr(), \
12
+ (C_TYPE*)q.data_ptr(), \
13
+ (C_TYPE*)k.data_ptr(), \
14
+ (C_TYPE*)v.data_ptr(), \
15
+ (C_TYPE*)inv_freq_ptr, \
16
+ rotary_dim, \
17
+ theta_base, \
18
+ batch_wrapper, \
19
+ qkv_stride, \
20
+ kv_cache_stride, \
21
+ v_offset, \
22
+ inv_freq_stride, \
23
+ q_ratio, \
24
+ head_size, \
25
+ n_tokens, \
26
+ n_q_heads, \
27
+ at::cuda::getCurrentCUDAStream()); \
28
+ }
29
+
30
+ /*
31
+ Rotary position embeddings + copy into KV cache. This implementation assumes
32
+ that the inverse frequencies should be ready from global memory rather than
33
+ synthesized in the kernel.
34
+
35
+ Arguments:
36
+ kv_cache: [n_blocks, block_size, 2, n_kv_heads, head_size]
37
+ q: [n_tokens, n_q_heads * head_size]
38
+ k: [n_tokens, n_kv_heads * head_size]
39
+ v: [n_tokens, n_kv_heads * head_size]
40
+ inv_freq: [max_seq_len, head_size // 2]
41
+ */
42
+ void kv_trained_rotary_embeddings(torch::Tensor& kv_cache,
43
+ torch::Tensor& q,
44
+ torch::Tensor& k,
45
+ torch::Tensor& v,
46
+ torch::Tensor& inv_freq,
47
+ torch::Tensor& batch_metadata,
48
+ torch::Tensor& seq_metadata,
49
+ torch::Tensor& tokens_to_seq,
50
+ torch::Tensor& kv_ptrs)
51
+ {
52
+ const int32_t n_tokens = q.size(0);
53
+ TORCH_CHECK(n_tokens == k.size(0));
54
+ TORCH_CHECK(n_tokens == v.size(0));
55
+
56
+ const float theta_base = 0.f;
57
+ const int32_t rotary_dim = inv_freq.size(0) * 2;
58
+
59
+ // Dimensions
60
+ const int32_t block_size = kv_cache.size(1);
61
+ const int32_t n_kv_heads = kv_cache.size(3);
62
+ const int32_t head_size = kv_cache.size(4);
63
+
64
+ // Strides
65
+ const int32_t qkv_stride = q.stride(0); // Per token
66
+ const int32_t kv_cache_stride = kv_cache.stride(1); // Per token
67
+ const int32_t v_offset = kv_cache.stride(2); // From k_cache to v_cache
68
+ const int32_t inv_freq_stride = inv_freq.stride(0); // Per token idx
69
+
70
+ const int n_q_heads = q.size(1) / head_size;
71
+ const int q_ratio = n_q_heads / n_kv_heads;
72
+
73
+ void* inv_freq_ptr = (void*)inv_freq.data_ptr();
74
+
75
+ BatchWrapperCPP batch_wrapper = make_cpp_batch_wrapper(
76
+ batch_metadata, seq_metadata, tokens_to_seq, kv_ptrs, block_size, kv_cache.size(0));
77
+
78
+ DISPATCH_KV_ROTARY(kHalf, __half);
79
+
80
+ #ifdef BF16_AVAILABLE
81
+ DISPATCH_KV_ROTARY(kBFloat16, __nv_bfloat16);
82
+ #endif
83
+ }
84
+
85
+ /*
86
+ Rotary position embeddings + copy into KV cache. This implementation assumes
87
+ that the inverse frequencies should be synthesized in the kernel.
88
+
89
+ Arguments:
90
+ kv_cache: [n_blocks, block_size, 2, n_kv_heads, head_size]
91
+ q: [n_tokens, n_q_heads * head_size]
92
+ k: [n_tokens, n_kv_heads * head_size]
93
+ v: [n_tokens, n_kv_heads * head_size]
94
+ */
95
+ void kv_rotary_embeddings(torch::Tensor& kv_cache,
96
+ torch::Tensor& q,
97
+ torch::Tensor& k,
98
+ torch::Tensor& v,
99
+ const int32_t rotary_dim,
100
+ const float theta_base,
101
+ torch::Tensor& batch_metadata,
102
+ torch::Tensor& seq_metadata,
103
+ torch::Tensor& tokens_to_seq,
104
+ torch::Tensor& kv_ptrs)
105
+ {
106
+ const int32_t n_tokens = q.size(0);
107
+ TORCH_CHECK(n_tokens == k.size(0));
108
+ TORCH_CHECK(n_tokens == v.size(0));
109
+
110
+ // Dimensions
111
+ const int32_t block_size = kv_cache.size(1);
112
+ const int32_t n_kv_heads = kv_cache.size(3);
113
+ const int32_t head_size = kv_cache.size(4);
114
+
115
+ // Strides
116
+ const int32_t qkv_stride = q.stride(0); // Per token
117
+ const int32_t kv_cache_stride = kv_cache.stride(1); // Per token
118
+ const int32_t v_offset = kv_cache.stride(2); // From k_cache to v_cache
119
+ const int32_t inv_freq_stride = 0; // Per token idx
120
+
121
+ const int n_q_heads = q.size(1) / head_size;
122
+ const int q_ratio = n_q_heads / n_kv_heads;
123
+
124
+ void* inv_freq_ptr = nullptr;
125
+
126
+ BatchWrapperCPP batch_wrapper = make_cpp_batch_wrapper(
127
+ batch_metadata, seq_metadata, tokens_to_seq, kv_ptrs, block_size, kv_cache.size(0));
128
+
129
+ DISPATCH_KV_ROTARY(kHalf, __half);
130
+
131
+ #ifdef BF16_AVAILABLE
132
+ DISPATCH_KV_ROTARY(kBFloat16, __nv_bfloat16);
133
+ #endif
134
+ }
135
+
136
+ #define DISPATCH_KV_COPY(T_TYPE, C_TYPE) \
137
+ if (q.options().dtype() == torch::T_TYPE) { \
138
+ launch_kv_copy_kernel<C_TYPE>((C_TYPE*)kv_cache.data_ptr(), \
139
+ (C_TYPE*)q.data_ptr(), \
140
+ (C_TYPE*)k.data_ptr(), \
141
+ (C_TYPE*)v.data_ptr(), \
142
+ batch_wrapper, \
143
+ qkv_stride, \
144
+ kv_cache_stride, \
145
+ v_offset, \
146
+ q_ratio, \
147
+ head_size, \
148
+ n_tokens, \
149
+ n_q_heads, \
150
+ at::cuda::getCurrentCUDAStream()); \
151
+ }
152
+
153
+ /*
154
+ Copy into linear KV cache.
155
+ */
156
+ void linear_kv_copy(torch::Tensor& kv_cache,
157
+ torch::Tensor& q,
158
+ torch::Tensor& k,
159
+ torch::Tensor& v,
160
+ torch::Tensor& batch_metadata,
161
+ torch::Tensor& seq_metadata,
162
+ torch::Tensor& tokens_to_seq,
163
+ torch::Tensor& kv_ptrs)
164
+ {
165
+ const int32_t n_tokens = q.size(0);
166
+ TORCH_CHECK(n_tokens == k.size(0));
167
+ TORCH_CHECK(n_tokens == v.size(0));
168
+
169
+ // Dimensions
170
+ const int32_t block_size = kv_cache.size(1);
171
+ const int32_t n_kv_heads = kv_cache.size(3);
172
+ const int32_t head_size = kv_cache.size(4);
173
+
174
+ // Strides
175
+ const int32_t qkv_stride = q.stride(0); // Per token
176
+ TORCH_CHECK(qkv_stride == k.stride(0));
177
+ TORCH_CHECK(qkv_stride == v.stride(0));
178
+
179
+ const int32_t kv_cache_stride = kv_cache.stride(1); // Per token
180
+ const int32_t v_offset = kv_cache.stride(2); // From k_cache to v_cache
181
+
182
+ const int n_q_heads = q.size(1) / head_size;
183
+
184
+ TORCH_CHECK(n_q_heads % n_kv_heads == 0);
185
+ const int q_ratio = n_q_heads / n_kv_heads;
186
+
187
+ BatchWrapperCPP batch_wrapper = make_cpp_batch_wrapper(
188
+ batch_metadata, seq_metadata, tokens_to_seq, kv_ptrs, block_size, kv_cache.size(0));
189
+
190
+ DISPATCH_KV_COPY(kHalf, __half);
191
+
192
+ #ifdef BF16_AVAILABLE
193
+ DISPATCH_KV_COPY(kBFloat16, __nv_bfloat16);
194
+ #endif
195
+ }
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.cuh ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include "ds_kernel_utils.h"
9
+ #include "ragged_dtypes.h"
10
+
11
+ #ifdef BF16_AVAILABLE
12
+ #include <cuda_bf16.h>
13
+ #endif
14
+
15
+ template <typename T>
16
+ void launch_kv_rotary_kernel(T* kv_cache,
17
+ T* q,
18
+ T* k,
19
+ T* v,
20
+ T* inv_freq,
21
+ const int32_t rotary_dim,
22
+ const float theta_base,
23
+ const BatchWrapperCPP batch_desc,
24
+ const int qkv_stride,
25
+ const int kv_cache_stride,
26
+ const int v_offset,
27
+ const int inv_freq_stride,
28
+ const int q_ratio,
29
+ const int head_size,
30
+ const int n_tokens,
31
+ const int n_q_heads,
32
+ cudaStream_t stream);
33
+
34
+ template <typename T>
35
+ void launch_kv_copy_kernel(T* kv_cache,
36
+ T* q,
37
+ T* k,
38
+ T* v,
39
+ const BatchWrapperCPP batch_desc,
40
+ const int qkv_stride,
41
+ const int kv_cache_stride,
42
+ const int v_offset,
43
+ const int q_ratio,
44
+ const int head_size,
45
+ const int n_tokens,
46
+ const int n_q_heads,
47
+ cudaStream_t stream);
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.h ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <c10/cuda/CUDAStream.h>
9
+ #include <torch/extension.h>
10
+ #include "blocked_kv_rotary.cuh"
11
+
12
+ /*
13
+ Rotary position embeddings + copy into KV cache. This implementation assumes
14
+ that the inverse frequencies should be ready from global memory rather than
15
+ synthesized in the kernel.
16
+
17
+ Arguments:
18
+ kv_cache: [n_blocks, block_size, 2, n_kv_heads, head_size]
19
+ q: [n_tokens, n_q_heads * head_size]
20
+ k: [n_tokens, n_kv_heads * head_size]
21
+ v: [n_tokens, n_kv_heads * head_size]
22
+ inv_freq: [max_seq_len, head_size // 2]
23
+ */
24
+ void kv_trained_rotary_embeddings(torch::Tensor& kv_cache,
25
+ torch::Tensor& q,
26
+ torch::Tensor& k,
27
+ torch::Tensor& v,
28
+ torch::Tensor& inv_freq,
29
+ torch::Tensor& batch_metadata,
30
+ torch::Tensor& seq_metadata,
31
+ torch::Tensor& tokens_to_seq,
32
+ torch::Tensor& kv_ptrs);
33
+
34
+ /*
35
+ Rotary position embeddings + copy into KV cache. This implementation assumes
36
+ that the inverse frequencies should be synthesized in the kernel.
37
+
38
+ Arguments:
39
+ kv_cache: [n_blocks, block_size, 2, n_kv_heads, head_size]
40
+ q: [n_tokens, n_q_heads * head_size]
41
+ k: [n_tokens, n_kv_heads * head_size]
42
+ v: [n_tokens, n_kv_heads * head_size]
43
+ */
44
+ void kv_rotary_embeddings(torch::Tensor& kv_cache,
45
+ torch::Tensor& q,
46
+ torch::Tensor& k,
47
+ torch::Tensor& v,
48
+ const int32_t rotary_dim,
49
+ const float theta_base,
50
+ torch::Tensor& batch_metadata,
51
+ torch::Tensor& seq_metadata,
52
+ torch::Tensor& tokens_to_seq,
53
+ torch::Tensor& kv_ptrs);
54
+
55
+ /*
56
+ Copy into linear KV cache.
57
+ */
58
+ void linear_kv_copy(torch::Tensor& kv_cache,
59
+ torch::Tensor& q,
60
+ torch::Tensor& k,
61
+ torch::Tensor& v,
62
+ torch::Tensor& batch_metadata,
63
+ torch::Tensor& seq_metadata,
64
+ torch::Tensor& tokens_to_seq,
65
+ torch::Tensor& kv_ptrs);
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from ....inference_utils import DtypeEnum
9
+ from deepspeed.ops.op_builder import RaggedOpsBuilder
10
+ from ....ragged import RaggedBatchWrapper
11
+ from ... import DSKernelBase
12
+
13
+
14
+ class BlockedRotaryEmbeddings(DSKernelBase):
15
+ """
16
+ CUDA Kernel implementation that will perform rotary position embeddings on the queries and keys
17
+ before copying into a blocked KV cache.
18
+ """
19
+
20
+ supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16]
21
+ supported_head_sizes = [64, 80, 128]
22
+ supported_q_ratios = [1, 2, 4, 5, 8, 16, 29, 35, 36, 71]
23
+
24
+ def __init__(self, head_size: int, n_q_heads: int, n_kv_heads: int, dtype: torch.dtype, rotary_dim: int,
25
+ theta_base: float) -> None:
26
+ """
27
+ Args:
28
+ head_size: The size of the attention head.
29
+ q_ratio: Ratio of q heads to kv heads (for GQA)
30
+ dtype: Data type for the input/output. Supported values are torch.float16 and torch.bfloat16.
31
+ """
32
+
33
+ q_ratio = n_q_heads // n_kv_heads
34
+
35
+ if head_size not in BlockedRotaryEmbeddings.supported_head_sizes:
36
+ raise ValueError("Unsupported head size: {}, supported_head_sizes are {}".format(
37
+ head_size, BlockedRotaryEmbeddings.supported_head_sizes))
38
+
39
+ if q_ratio not in BlockedRotaryEmbeddings.supported_q_ratios:
40
+ raise ValueError("Unsupported q_ratio: {}, supported_q_ratios are {}".format(
41
+ q_ratio, BlockedRotaryEmbeddings.supported_q_ratios))
42
+
43
+ if not isinstance(dtype, DtypeEnum):
44
+ dtype = DtypeEnum(dtype)
45
+
46
+ if dtype not in BlockedRotaryEmbeddings.supported_dtypes:
47
+ raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format(
48
+ dtype, BlockedRotaryEmbeddings.supported_dtypes))
49
+
50
+ inf_module = RaggedOpsBuilder().load()
51
+ self.kernel = inf_module.kv_rotary_embeddings
52
+ self.head_size = head_size
53
+ self.n_q_heads = n_q_heads
54
+ self.n_kv_heads = n_kv_heads
55
+ self.rotary_dim = rotary_dim
56
+ self.theta_base = theta_base
57
+
58
+ def __call__(self, kv_cache: torch.Tensor, qkv: torch.Tensor, ragged_batch: RaggedBatchWrapper) -> None:
59
+ """
60
+ Perform rotary embeddings on the queries and keys before copying into a blocked KV cache.
61
+
62
+ Args:
63
+ kv_cache (torch.Tensor): Pre-allocated KV cache of [num_blocks, block_size, 2, n_kv_heads, head_size]
64
+ qkv: Input tensor of shape [num_tokens, head_size * (n_q_heads + 2 * n_kv_heads)]
65
+ ragged_batch: Wrapper for the ragged batch.
66
+ """
67
+
68
+ q = qkv[:, :self.head_size * self.n_q_heads]
69
+ k = qkv[:, self.head_size * self.n_q_heads:self.head_size * (self.n_q_heads + self.n_kv_heads)]
70
+ v = qkv[:, self.head_size * (self.n_q_heads + self.n_kv_heads):]
71
+
72
+ self.kernel(kv_cache, q, k, v, self.rotary_dim, self.theta_base, ragged_batch.batch_metadata_buffer(),
73
+ ragged_batch.inflight_seq_descriptors(), ragged_batch.tokens_to_seq(), ragged_batch.kv_ptrs())
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary_cuda.cu ADDED
@@ -0,0 +1,385 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <cassert>
7
+ #include "blocked_kv_rotary.cuh"
8
+ #include "conversion_utils.h"
9
+ #include "ds_kernel_utils.h"
10
+ #include "memory_access_utils.h"
11
+
12
+ namespace cg = cooperative_groups;
13
+
14
+ namespace kv_rot {
15
+
16
+ constexpr int granularity = 16;
17
+ constexpr int threads = 256;
18
+
19
+ } // namespace kv_rot
20
+
21
+ /*
22
+ Supports head size 32, 64, 128, 256
23
+ */
24
+
25
+ template <typename T, int qRatio, int headSize, bool doRotary, int paddedHeadSize>
26
+ __global__ void kv_rotary_pos_kernel(T* kv_cache,
27
+ T* q,
28
+ T* k,
29
+ T* v,
30
+ const T* inv_freq,
31
+ const int32_t rotary_dim,
32
+ const float theta_base,
33
+ const BatchWrapperCPP batch_desc,
34
+ const int qkv_stride,
35
+ const int kv_cache_stride,
36
+ const int v_offset,
37
+ const int inv_freq_stride)
38
+ {
39
+ // Derived constexpr
40
+ constexpr int vector_T = kv_rot::granularity / sizeof(T);
41
+ constexpr int real_threads_per_head = headSize / vector_T;
42
+ constexpr int threads_per_head = paddedHeadSize / vector_T;
43
+
44
+ constexpr int tokens_per_block = kv_rot::threads / threads_per_head;
45
+
46
+ // CG helpers
47
+ cg::thread_block tb = cg::this_thread_block();
48
+ cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
49
+ cg::thread_block_tile<threads_per_head> head_group = cg::tiled_partition<threads_per_head>(tb);
50
+
51
+ // Parallelize on the head dimension for X blocks
52
+ const int head_idx = blockIdx.x;
53
+
54
+ const int block_seq_idx = threadIdx.x / threads_per_head;
55
+ const int base_neuron_idx = head_group.thread_rank() * vector_T;
56
+ const int half_rotary_size = rotary_dim / 2;
57
+ const int half_dim_lanes = half_rotary_size / vector_T;
58
+ const int half_idx = base_neuron_idx % half_rotary_size;
59
+
60
+ // Multiple tokens processed by the same threadblock
61
+ const int token_idx = blockIdx.y * tokens_per_block + block_seq_idx;
62
+ const bool valid_token = token_idx < batch_desc.batch_metadata->n_tokens;
63
+
64
+ const bool valid_thread = valid_token && (head_group.thread_rank() < real_threads_per_head);
65
+ const bool load_inv_freq = (inv_freq != nullptr) && valid_thread;
66
+
67
+ // If we have GQA, then only one of the Q heads needs to do rotary + copy
68
+ // for each of the heads in the group.
69
+ bool need_kv = head_idx % qRatio == 0;
70
+ // Make sure the following code is warp uniform
71
+ need_kv = warp.shfl(need_kv, 0);
72
+
73
+ const int kv_head_idx = head_idx / qRatio;
74
+
75
+ // Ensure we don't access invalid portions of the seq_metadata
76
+ const int32_t seq_id = (valid_thread) ? batch_desc.tokens_to_seq[token_idx] : 0;
77
+ const InflightSeqDescriptor seq_desc = batch_desc.seq_metadata[seq_id];
78
+ // This will give an invalid index if valid_thread is false, but should never affect memory.
79
+ const int32_t global_token_idx = seq_desc.seen_tokens + (token_idx - seq_desc.start_idx);
80
+
81
+ T* q_row = q + token_idx * qkv_stride + head_idx * headSize;
82
+ T q_reg[vector_T];
83
+
84
+ if (need_kv) {
85
+ // The following logic assumes a linearly blocked KV cache. This means that no sparsity has
86
+ // been introduced into cache history.
87
+ const KVCacheDescriptor kv_desc = batch_desc.kv_desc;
88
+ const int32_t seq_kv_block_idx = global_token_idx / kv_desc.block_size;
89
+ const int32_t mapped_kv_block_idx =
90
+ (valid_thread) ? kv_desc.block_lists[seq_id][seq_kv_block_idx] : 0;
91
+
92
+ const int32_t kv_block_offset = global_token_idx % kv_desc.block_size;
93
+ const int32_t kv_offset =
94
+ (mapped_kv_block_idx * kv_desc.block_size + kv_block_offset) * kv_cache_stride +
95
+ kv_head_idx * headSize;
96
+
97
+ // Load indices from QKV output
98
+ T* k_row = k + token_idx * qkv_stride + kv_head_idx * headSize;
99
+ T* v_row = v + token_idx * qkv_stride + kv_head_idx * headSize;
100
+
101
+ T k_reg[vector_T], v_reg[vector_T], inv_freq_reg[vector_T];
102
+
103
+ mem_access::load_global<kv_rot::granularity>(q_reg, q_row + base_neuron_idx, valid_thread);
104
+ mem_access::load_global<kv_rot::granularity>(k_reg, k_row + base_neuron_idx, valid_thread);
105
+ mem_access::load_global<kv_rot::granularity>(v_reg, v_row + base_neuron_idx, valid_thread);
106
+ mem_access::load_global<kv_rot::granularity>(
107
+ inv_freq_reg, inv_freq + half_idx, load_inv_freq);
108
+ if constexpr (doRotary) {
109
+ #pragma unroll
110
+ for (int i = 0; i < vector_T; i++) {
111
+ const int head_neuron_idx = base_neuron_idx + i;
112
+
113
+ float inv_freq_flt;
114
+ if (inv_freq != nullptr) {
115
+ inv_freq_flt = conversion::to<float>(inv_freq_reg[i]) * (float)global_token_idx;
116
+ } else {
117
+ inv_freq_flt =
118
+ (float)((head_neuron_idx % half_rotary_size) * 2) / (float)rotary_dim;
119
+ // Conversion to T and back means that both branches of this if statement
120
+ // will produce the same results if using the same algo for producing the
121
+ // freqs.
122
+ T trunc_freq = conversion::to<T>(1.0 / powf(theta_base, inv_freq_flt));
123
+ inv_freq_flt = conversion::to<float>(trunc_freq) * (float)global_token_idx;
124
+ }
125
+
126
+ float rotary_sign = (head_neuron_idx >= half_rotary_size) ? -1.0f : 1.0f;
127
+ float q_f = conversion::to<float>(q_reg[i]);
128
+ float k_f = conversion::to<float>(k_reg[i]);
129
+ float q_rot = q_f * rotary_sign;
130
+ float k_rot = k_f * rotary_sign;
131
+
132
+ const int target_lane = (head_neuron_idx < half_rotary_size)
133
+ ? head_group.thread_rank() + half_dim_lanes
134
+ : head_group.thread_rank() - half_dim_lanes;
135
+
136
+ const float q_rot_temp = head_group.shfl(q_rot, target_lane);
137
+ const float k_rot_temp = head_group.shfl(k_rot, target_lane);
138
+
139
+ if (base_neuron_idx < rotary_dim) {
140
+ q_reg[i] = conversion::to<T>(q_f * cosf(inv_freq_flt) +
141
+ q_rot_temp * sinf(inv_freq_flt));
142
+ k_reg[i] = conversion::to<T>(k_f * cosf(inv_freq_flt) +
143
+ k_rot_temp * sinf(inv_freq_flt));
144
+ }
145
+ }
146
+ }
147
+
148
+ if (valid_thread) {
149
+ mem_access::store_global<kv_rot::granularity>(kv_cache + kv_offset + base_neuron_idx,
150
+ k_reg);
151
+ mem_access::store_global<kv_rot::granularity>(
152
+ kv_cache + kv_offset + base_neuron_idx + v_offset, v_reg);
153
+ }
154
+ } else {
155
+ T inv_freq_reg[vector_T];
156
+
157
+ mem_access::load_global<kv_rot::granularity>(q_reg, q_row + base_neuron_idx, valid_thread);
158
+ mem_access::load_global<kv_rot::granularity>(
159
+ inv_freq_reg, inv_freq + half_idx, load_inv_freq);
160
+
161
+ if constexpr (doRotary) {
162
+ #pragma unroll
163
+ for (int i = 0; i < vector_T; i++) {
164
+ const int head_neuron_idx = base_neuron_idx + i;
165
+
166
+ float inv_freq_flt;
167
+ if (inv_freq != nullptr) {
168
+ inv_freq_flt = conversion::to<float>(inv_freq_reg[i]) * (float)global_token_idx;
169
+ } else {
170
+ inv_freq_flt =
171
+ (float)((head_neuron_idx % half_rotary_size) * 2) / (float)rotary_dim;
172
+ inv_freq_flt = 1.0 / powf(theta_base, inv_freq_flt) * (float)global_token_idx;
173
+ }
174
+
175
+ float rotary_sign = (head_neuron_idx >= half_rotary_size) ? -1.0f : 1.0f;
176
+ float q_f = conversion::to<float>(q_reg[i]);
177
+ float q_rot = q_f * rotary_sign;
178
+
179
+ const int target_lane = (head_neuron_idx < half_rotary_size)
180
+ ? head_group.thread_rank() + half_dim_lanes
181
+ : head_group.thread_rank() - half_dim_lanes;
182
+
183
+ const float q_rot_temp = head_group.shfl(q_rot, target_lane);
184
+ if (base_neuron_idx < rotary_dim)
185
+ q_reg[i] = conversion::to<T>(q_f * cosf(inv_freq_flt) +
186
+ q_rot_temp * sinf(inv_freq_flt));
187
+ }
188
+ }
189
+ }
190
+
191
+ if (valid_thread && doRotary) {
192
+ mem_access::store_global<kv_rot::granularity>(q_row + base_neuron_idx, q_reg);
193
+ }
194
+ }
195
+
196
+ #define DISPATCH_KV_ROTARY_IMPL(Q_RATIO, HEAD_SIZE, PADDED_HEAD_SIZE) \
197
+ if (q_ratio == Q_RATIO && head_size == HEAD_SIZE) \
198
+ kv_rotary_pos_kernel<T, Q_RATIO, HEAD_SIZE, true, PADDED_HEAD_SIZE> \
199
+ <<<grid, block, 0, stream>>>(kv_cache, \
200
+ q, \
201
+ k, \
202
+ v, \
203
+ inv_freq, \
204
+ rotary_dim, \
205
+ theta_base, \
206
+ batch_desc, \
207
+ qkv_stride, \
208
+ kv_cache_stride, \
209
+ v_offset, \
210
+ inv_freq_stride);
211
+
212
+ #define LAUNCH_KV_ROTARY_FOR_Q_RATIO_HEAD_SIZE(Q_RATIO, HEAD_SIZE) \
213
+ if (padded_head_size == 64) { \
214
+ DISPATCH_KV_ROTARY_IMPL(Q_RATIO, HEAD_SIZE, 64); \
215
+ } else if (padded_head_size == 128) { \
216
+ DISPATCH_KV_ROTARY_IMPL(Q_RATIO, HEAD_SIZE, 128); \
217
+ } else { \
218
+ assert(false); \
219
+ }
220
+
221
+ #define LAUNCH_KV_ROTARY_FOR_Q_RATIO(Q_RATIO) \
222
+ if (head_size == 64) { \
223
+ LAUNCH_KV_ROTARY_FOR_Q_RATIO_HEAD_SIZE(Q_RATIO, 64); \
224
+ } else if (head_size == 80) { \
225
+ LAUNCH_KV_ROTARY_FOR_Q_RATIO_HEAD_SIZE(Q_RATIO, 80); \
226
+ } else if (head_size == 128) { \
227
+ LAUNCH_KV_ROTARY_FOR_Q_RATIO_HEAD_SIZE(Q_RATIO, 128); \
228
+ } else { \
229
+ assert(false); \
230
+ }
231
+
232
+ template <typename T>
233
+ void launch_kv_rotary_kernel(T* kv_cache,
234
+ T* q,
235
+ T* k,
236
+ T* v,
237
+ T* inv_freq,
238
+ const int32_t rotary_dim,
239
+ const float theta_base,
240
+ const BatchWrapperCPP batch_desc,
241
+ const int qkv_stride,
242
+ const int kv_cache_stride,
243
+ const int v_offset,
244
+ const int inv_freq_stride,
245
+ const int q_ratio,
246
+ const int head_size,
247
+ const int n_tokens,
248
+ const int n_q_heads,
249
+ cudaStream_t stream)
250
+ {
251
+ constexpr int vector_T = kv_rot::granularity / sizeof(T);
252
+
253
+ const int padded_head_size = next_pow2(head_size);
254
+ const int threads_per_head = padded_head_size / vector_T;
255
+
256
+ const int tokens_per_block = kv_rot::threads / threads_per_head;
257
+
258
+ const dim3 block(kv_rot::threads);
259
+ const int token_blocks = (n_tokens + tokens_per_block - 1) / tokens_per_block;
260
+ const dim3 grid(n_q_heads, token_blocks);
261
+
262
+ LAUNCH_KV_ROTARY_FOR_Q_RATIO(1)
263
+ LAUNCH_KV_ROTARY_FOR_Q_RATIO(2)
264
+ LAUNCH_KV_ROTARY_FOR_Q_RATIO(4)
265
+ LAUNCH_KV_ROTARY_FOR_Q_RATIO(5)
266
+ LAUNCH_KV_ROTARY_FOR_Q_RATIO(8)
267
+ LAUNCH_KV_ROTARY_FOR_Q_RATIO(16)
268
+ LAUNCH_KV_ROTARY_FOR_Q_RATIO(29)
269
+ LAUNCH_KV_ROTARY_FOR_Q_RATIO(35)
270
+ LAUNCH_KV_ROTARY_FOR_Q_RATIO(36)
271
+ LAUNCH_KV_ROTARY_FOR_Q_RATIO(71)
272
+ }
273
+
274
+ #define INSTANTIATE_KV_ROTARY_KERNEL(TYPE) \
275
+ template void launch_kv_rotary_kernel<TYPE>(TYPE * kv_cache, \
276
+ TYPE * q, \
277
+ TYPE * k, \
278
+ TYPE * v, \
279
+ TYPE * inv_freq, \
280
+ const int32_t rotary_dim, \
281
+ const float theta_base, \
282
+ const BatchWrapperCPP batch_desc, \
283
+ const int qkv_stride, \
284
+ const int kv_cache_stride, \
285
+ const int v_offset, \
286
+ const int inv_freq_stride, \
287
+ const int q_ratio, \
288
+ const int head_size, \
289
+ const int n_tokens, \
290
+ const int n_q_heads, \
291
+ cudaStream_t stream);
292
+
293
+ INSTANTIATE_KV_ROTARY_KERNEL(__half)
294
+
295
+ #ifdef BF16_AVAILABLE
296
+ INSTANTIATE_KV_ROTARY_KERNEL(__nv_bfloat16)
297
+ #endif
298
+
299
+ #define DISPATCH_KV_COPY_IMPL(Q_RATIO, HEAD_SIZE, PADDED_HEAD_SIZE) \
300
+ if (q_ratio == Q_RATIO && head_size == HEAD_SIZE) \
301
+ kv_rotary_pos_kernel<T, Q_RATIO, HEAD_SIZE, false, PADDED_HEAD_SIZE> \
302
+ <<<grid, block, 0, stream>>>(kv_cache, \
303
+ q, \
304
+ k, \
305
+ v, \
306
+ nullptr, \
307
+ -1, \
308
+ 0.f, \
309
+ batch_desc, \
310
+ qkv_stride, \
311
+ kv_cache_stride, \
312
+ v_offset, \
313
+ 0);
314
+
315
+ #define LAUNCH_KV_COPY_FOR_Q_RATIO_HEAD_SIZE(Q_RATIO, HEAD_SIZE) \
316
+ if (padded_head_size == 64) { \
317
+ DISPATCH_KV_COPY_IMPL(Q_RATIO, HEAD_SIZE, 64); \
318
+ } else if (padded_head_size == 128) { \
319
+ DISPATCH_KV_COPY_IMPL(Q_RATIO, HEAD_SIZE, 128); \
320
+ } else { \
321
+ assert(false); \
322
+ }
323
+
324
+ #define LAUNCH_KV_COPY_FOR_Q_RATIO(Q_RATIO) \
325
+ if (head_size == 64) { \
326
+ LAUNCH_KV_COPY_FOR_Q_RATIO_HEAD_SIZE(Q_RATIO, 64); \
327
+ } else if (head_size == 80) { \
328
+ LAUNCH_KV_COPY_FOR_Q_RATIO_HEAD_SIZE(Q_RATIO, 80); \
329
+ } else if (head_size == 128) { \
330
+ LAUNCH_KV_COPY_FOR_Q_RATIO_HEAD_SIZE(Q_RATIO, 128); \
331
+ } else { \
332
+ assert(false); \
333
+ }
334
+
335
+ template <typename T>
336
+ void launch_kv_copy_kernel(T* kv_cache,
337
+ T* q,
338
+ T* k,
339
+ T* v,
340
+ const BatchWrapperCPP batch_desc,
341
+ const int qkv_stride,
342
+ const int kv_cache_stride,
343
+ const int v_offset,
344
+ const int q_ratio,
345
+ const int head_size,
346
+ const int n_tokens,
347
+ const int n_q_heads,
348
+ cudaStream_t stream)
349
+ {
350
+ constexpr int vector_T = kv_rot::granularity / sizeof(T);
351
+ const int padded_head_size = next_pow2(head_size);
352
+ const int threads_per_head = padded_head_size / vector_T;
353
+ const int tokens_per_block = kv_rot::threads / threads_per_head;
354
+
355
+ const dim3 block(kv_rot::threads);
356
+ const int token_blocks = (n_tokens + tokens_per_block - 1) / tokens_per_block;
357
+ const dim3 grid(n_q_heads, token_blocks);
358
+
359
+ LAUNCH_KV_COPY_FOR_Q_RATIO(1)
360
+ LAUNCH_KV_COPY_FOR_Q_RATIO(2)
361
+ LAUNCH_KV_COPY_FOR_Q_RATIO(4)
362
+ LAUNCH_KV_COPY_FOR_Q_RATIO(5)
363
+ LAUNCH_KV_COPY_FOR_Q_RATIO(8)
364
+ }
365
+
366
+ #define INSTANTIATE_KV_COPY_KERNEL(TYPE) \
367
+ template void launch_kv_copy_kernel<TYPE>(TYPE * kv_cache, \
368
+ TYPE * q, \
369
+ TYPE * k, \
370
+ TYPE * v, \
371
+ const BatchWrapperCPP batch_desc, \
372
+ const int qkv_stride, \
373
+ const int kv_cache_stride, \
374
+ const int v_offset, \
375
+ const int q_ratio, \
376
+ const int head_size, \
377
+ const int n_tokens, \
378
+ const int n_q_heads, \
379
+ cudaStream_t stream);
380
+
381
+ INSTANTIATE_KV_COPY_KERNEL(__half)
382
+
383
+ #ifdef BF16_AVAILABLE
384
+ INSTANTIATE_KV_COPY_KERNEL(__nv_bfloat16)
385
+ #endif
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_trained_kv_rotary.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ # Copyright (c) Microsoft Corporation.
7
+ # SPDX-License-Identifier: Apache-2.0
8
+
9
+ # DeepSpeed Team
10
+
11
+ import torch
12
+
13
+ from ....inference_utils import DtypeEnum
14
+ from deepspeed.ops.op_builder import RaggedOpsBuilder
15
+ from ....ragged import RaggedBatchWrapper
16
+ from ... import DSKernelBase
17
+
18
+
19
+ class BlockedTrainedRotaryEmbeddings(DSKernelBase):
20
+ """
21
+ CUDA Kernel implementation that will perform rotary position embeddings on the queries and keys
22
+ before copying into a blocked KV cache.
23
+ """
24
+
25
+ supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16]
26
+ supported_head_sizes = [64, 80, 128]
27
+ supported_q_ratios = [1, 2, 4, 5, 8]
28
+
29
+ def __init__(self, head_size: int, n_q_heads: int, n_kv_heads: int, dtype: torch.dtype) -> None:
30
+ """
31
+ Args:
32
+ head_size: The size of the attention head.
33
+ dtype: Data type for the input/output. Supported values are torch.float16 and torch.bfloat16.
34
+ """
35
+
36
+ q_ratio = n_q_heads // n_kv_heads
37
+
38
+ if head_size not in BlockedTrainedRotaryEmbeddings.supported_head_sizes:
39
+ raise ValueError("Unsupported head size: {}, supported_head_sizes are {}".format(
40
+ head_size, BlockedTrainedRotaryEmbeddings.supported_head_sizes))
41
+
42
+ if q_ratio not in BlockedTrainedRotaryEmbeddings.supported_q_ratios:
43
+ raise ValueError("Unsupported q_ratio: {}, supported_q_ratios are {}".format(
44
+ q_ratio, BlockedTrainedRotaryEmbeddings.supported_q_ratios))
45
+
46
+ if not isinstance(dtype, DtypeEnum):
47
+ dtype = DtypeEnum(dtype)
48
+
49
+ if dtype not in BlockedTrainedRotaryEmbeddings.supported_dtypes:
50
+ raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format(
51
+ dtype, BlockedTrainedRotaryEmbeddings.supported_dtypes))
52
+
53
+ inf_module = RaggedOpsBuilder().load()
54
+ self.kernel = inf_module.kv_trained_rotary_embeddings
55
+ self.head_size = head_size
56
+ self.n_q_heads = n_q_heads
57
+ self.n_kv_heads = n_kv_heads
58
+
59
+ def __call__(self, kv_cache: torch.Tensor, qkv: torch.Tensor, ragged_batch: RaggedBatchWrapper,
60
+ inverse_freqs: torch.Tensor) -> None:
61
+ """
62
+ Perform rotary embeddings on the queries and keys before copying into a blocked KV cache.
63
+
64
+ Args:
65
+ kv_cache (torch.Tensor): Pre-allocated KV cache of [num_blocks, block_size, 2, n_kv_heads, head_size]
66
+ qkv: Input tensor of shape [num_tokens, head_size * (n_q_heads + 2 * n_kv_heads)]
67
+ ragged_batch: Wrapper for the ragged batch.
68
+ inverse_freqs: Inverse frequencies for the rotary embeddings. Shape [max_seq_len, rotary_dim // 2]
69
+ """
70
+
71
+ q = qkv[:, :self.head_size * self.n_q_heads]
72
+ k = qkv[:, self.head_size * self.n_q_heads:self.head_size * (self.n_q_heads + self.n_kv_heads)]
73
+ v = qkv[:, self.head_size * (self.n_q_heads + self.n_kv_heads):]
74
+
75
+ self.kernel(kv_cache, q, k, v, inverse_freqs, ragged_batch.batch_metadata_buffer(),
76
+ ragged_batch.inflight_seq_descriptors(), ragged_batch.tokens_to_seq(), ragged_batch.kv_ptrs())
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/linear_blocked_kv_copy.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ # Copyright (c) Microsoft Corporation.
7
+ # SPDX-License-Identifier: Apache-2.0
8
+
9
+ # DeepSpeed Team
10
+
11
+ import torch
12
+
13
+ from ....inference_utils import DtypeEnum
14
+ from ....ragged import RaggedBatchWrapper
15
+ from deepspeed.ops.op_builder import RaggedOpsBuilder
16
+ from ... import DSKernelBase
17
+
18
+
19
+ class LinearBlockedKVCopy(DSKernelBase):
20
+ """
21
+ CUDA Kernel implementation that will perform rotary position embeddings on the queries and keys
22
+ before copying into a blocked KV cache.
23
+ """
24
+
25
+ supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16]
26
+ supported_head_sizes = [64, 80, 128]
27
+ supported_q_ratios = [1, 2, 4, 5, 8]
28
+
29
+ def __init__(self, head_size: int, n_q_heads: int, n_kv_heads: int, dtype: torch.dtype) -> None:
30
+ """
31
+ Args:
32
+ head_size: The size of the attention head.
33
+ dtype: Data type for the input/output. Supported values are torch.float16 and torch.bfloat16.
34
+ """
35
+
36
+ q_ratio = n_q_heads // n_kv_heads
37
+
38
+ if head_size not in LinearBlockedKVCopy.supported_head_sizes:
39
+ raise ValueError("Unsupported head size: {}, supported_head_sizes are {}".format(
40
+ head_size, LinearBlockedKVCopy.supported_head_sizes))
41
+
42
+ if q_ratio not in LinearBlockedKVCopy.supported_q_ratios:
43
+ raise ValueError("Unsupported q_ratio: {}, supported_q_ratios are {}".format(
44
+ q_ratio, LinearBlockedKVCopy.supported_q_ratios))
45
+
46
+ if not isinstance(dtype, DtypeEnum):
47
+ dtype = DtypeEnum(dtype)
48
+
49
+ if dtype not in LinearBlockedKVCopy.supported_dtypes:
50
+ raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format(
51
+ dtype, LinearBlockedKVCopy.supported_dtypes))
52
+
53
+ inf_module = RaggedOpsBuilder().load()
54
+ self.kernel = inf_module.linear_kv_copy
55
+ self.head_size = head_size
56
+ self.n_q_heads = n_q_heads
57
+ self.n_kv_heads = n_kv_heads
58
+
59
+ def __call__(self, kv_cache: torch.Tensor, qkv: torch.Tensor, ragged_batch: RaggedBatchWrapper) -> None:
60
+ """
61
+ Perform rotary embeddings on the queries and keys before copying into a blocked KV cache.
62
+
63
+ Args:
64
+ kv_cache (torch.Tensor): Pre-allocated KV cache of [num_blocks, block_size, 2, n_kv_heads, head_size]
65
+ qkv: Input tensor of shape [num_tokens, head_size * (n_q_heads + 2 * n_kv_heads)]
66
+ ragged_batch: Wrapper for the ragged batch.
67
+ """
68
+
69
+ q = qkv[:, :self.head_size * self.n_q_heads]
70
+ k = qkv[:, self.head_size * self.n_q_heads:self.head_size * (self.n_q_heads + self.n_kv_heads)]
71
+ v = qkv[:, self.head_size * (self.n_q_heads + self.n_kv_heads):]
72
+
73
+ self.kernel(kv_cache, q, k, v, ragged_batch.batch_metadata_buffer(), ragged_batch.inflight_seq_descriptors(),
74
+ ragged_batch.tokens_to_seq(), ragged_batch.kv_ptrs())
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/ragged_ops.cpp ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <torch/extension.h>
7
+
8
+ #include "atom_builder.h"
9
+ #include "blocked_flash.h"
10
+ #include "blocked_kv_rotary.h"
11
+ #include "embed.h"
12
+ #include "logits_gather.h"
13
+ #include "moe_gather.h"
14
+ #include "moe_scatter.h"
15
+ #include "top_k_gating.h"
16
+
17
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
18
+ {
19
+ // atom_builder.h
20
+ m.def("build_atoms", &build_atoms, "Host kernel for building the atoms.");
21
+
22
+ // blocked_flash.h
23
+ m.def("flash_attn_by_atoms",
24
+ &flash_attn_by_atoms,
25
+ "Blocked flash attention scheduled with atoms");
26
+
27
+ // blocked_kv_rotary.h
28
+ m.def("kv_rotary_embeddings", &kv_rotary_embeddings, "KV rotary embedding for blocked KV");
29
+ m.def("kv_trained_rotary_embeddings",
30
+ &kv_trained_rotary_embeddings,
31
+ "KV rotary embeddings for blocked KV");
32
+ m.def("linear_kv_copy", &linear_kv_copy, "Linear copy for blocked KV");
33
+
34
+ // embed.h
35
+ m.def("ragged_embed", &ragged_embed, "Embedding lookup for ragged batch");
36
+
37
+ // logits_gather.h
38
+ m.def("gather_for_logits", &gather_for_logits, "Sparse gather from ragged batch");
39
+
40
+ // moe_gather.h
41
+ m.def("moe_gather", &moe_gather, "MoE gather for top-1-gating.");
42
+
43
+ // moe_scatter.h
44
+ m.def("moe_scatter", &moe_scatter, "MoE scatter for top-1-gating.");
45
+
46
+ // top_k_gating.h
47
+ m.def("top_k_gating", &top_k_gating, "Top-1 gating for MoE with ragged batch awareness.");
48
+ }
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .top_k_gating import RaggedTopKGating
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (275 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.cpp ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "top_k_gating.h"
7
+ #include <c10/cuda/CUDAStream.h>
8
+
9
+ #define DISPATCH_TOP_K_GATING(T_TYPE, C_TYPE) \
10
+ if (logits.options().dtype() == torch::T_TYPE) { \
11
+ launch_top_k_gating((int32_t*)expert_counts.data_ptr(), \
12
+ (float*)scores.data_ptr(), \
13
+ (int32_t*)assignments.data_ptr(), \
14
+ (int32_t*)offsets.data_ptr(), \
15
+ (const C_TYPE*)logits.data_ptr(), \
16
+ batch_metadata_ptr, \
17
+ n_tokens, \
18
+ n_experts, \
19
+ n_top_k, \
20
+ at::cuda::getCurrentCUDAStream()); \
21
+ return; \
22
+ }
23
+
24
+ /*
25
+ Perform softmax plus atomics in order to do first pass of top_k_gating.
26
+ */
27
+ void top_k_gating(torch::Tensor& expert_counts,
28
+ torch::Tensor& scores,
29
+ torch::Tensor& assignments,
30
+ torch::Tensor& offsets,
31
+ torch::Tensor& logits,
32
+ torch::Tensor& batch_metadata)
33
+ {
34
+ const int32_t n_tokens = scores.size(0);
35
+ const int32_t n_top_k = scores.size(1);
36
+
37
+ // Should have the same buffer size for scores, offsets, and assignments
38
+ TORCH_CHECK(n_tokens == offsets.size(0));
39
+ TORCH_CHECK(n_tokens == logits.size(0));
40
+ TORCH_CHECK(n_tokens == assignments.size(0));
41
+
42
+ TORCH_CHECK(n_top_k == offsets.size(1));
43
+ TORCH_CHECK(n_top_k == assignments.size(1));
44
+
45
+ TORCH_CHECK(expert_counts.scalar_type() == torch::kInt32);
46
+ TORCH_CHECK(scores.scalar_type() == torch::kFloat);
47
+ TORCH_CHECK(assignments.scalar_type() == torch::kInt32);
48
+ TORCH_CHECK(offsets.scalar_type() == torch::kInt32);
49
+
50
+ const int32_t n_experts = logits.size(1);
51
+ const RaggedBatchDescriptor* batch_metadata_ptr =
52
+ reinterpret_cast<const RaggedBatchDescriptor*>(batch_metadata.data_ptr());
53
+
54
+ DISPATCH_TOP_K_GATING(kFloat, float)
55
+ DISPATCH_TOP_K_GATING(kHalf, __half)
56
+ #ifdef BF16_AVAILABLE
57
+ DISPATCH_TOP_K_GATING(kBFloat16, __nv_bfloat16)
58
+ #endif
59
+
60
+ TORCH_CHECK(false, "Unsupported dtype for logits in top_k_gating");
61
+ }
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.cuh ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include "ds_kernel_utils.h"
9
+ #include "ragged_dtypes.h"
10
+
11
+ namespace gating {
12
+ constexpr int unassigned = -1;
13
+ } // namespace gating
14
+
15
+ template <typename T>
16
+ void launch_top_k_gating(int32_t* expert_counts,
17
+ float* scores,
18
+ int32_t* assignments,
19
+ int32_t* offsets,
20
+ const T* logits,
21
+ const RaggedBatchDescriptor* batch_metadata,
22
+ const int32_t n_tokens,
23
+ const int32_t n_experts,
24
+ const int32_t n_top_k,
25
+ cudaStream_t stream);
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.h ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <c10/cuda/CUDAStream.h>
9
+ #include <torch/extension.h>
10
+ #include "ragged_dtypes.h"
11
+ #include "top_k_gating.cuh"
12
+
13
+ /*
14
+ Perform softmax plus atomics to get token mapping.
15
+ */
16
+ void top_k_gating(torch::Tensor& expert_counts,
17
+ torch::Tensor& scores,
18
+ torch::Tensor& assignments,
19
+ torch::Tensor& offsets,
20
+ torch::Tensor& logits,
21
+ torch::Tensor& batch_metadata);
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from typing import Tuple
9
+
10
+ from ... import DSKernelBase
11
+ from ....inference_utils import DtypeEnum
12
+ from ....ragged import RaggedBatchWrapper
13
+ from deepspeed.ops.op_builder import RaggedOpsBuilder
14
+
15
+
16
+ class RaggedTopKGating(DSKernelBase):
17
+ """
18
+ CUDA implementation of top-1 gating. This will perform a softmax on the logits,
19
+ and return the scale as well as its idx within that expert's allocation.
20
+ """
21
+
22
+ supported_logit_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16, DtypeEnum.fp32]
23
+
24
+ def __init__(self, logit_dtype: DtypeEnum) -> None:
25
+
26
+ if not isinstance(logit_dtype, DtypeEnum):
27
+ logit_dtype = DtypeEnum(logit_dtype)
28
+
29
+ if logit_dtype not in RaggedTopKGating.supported_logit_dtypes:
30
+ raise RuntimeError(f"Unsupported logit dtype {logit_dtype}")
31
+
32
+ inf_module = RaggedOpsBuilder().load()
33
+ self.kernel = inf_module.top_k_gating
34
+
35
+ def __call__(self, expert_counts: torch.Tensor, scores: torch.Tensor, assignments: torch.Tensor,
36
+ offsets: torch.Tensor, logits: torch.Tensor,
37
+ batch: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
38
+ """
39
+ Perform the ragged top_k_gating.
40
+
41
+ Arguments:
42
+ expert_counts (torch.Tensor): Tensor of 0s of shape [n_experts] to be filled with
43
+ number of tokens assigned to each expert. This must be filled with 0s else
44
+ the copy kernel will buffer overflow. In order to minimize the zero-fill cost,
45
+ it is recommended to write to 0 during the MoE output remapping.
46
+ scores (torch.Tensor): Preallocated output of shape [n_tokens, n_top_k] to place expert scaling
47
+ value.
48
+ expert_assignment (torch.Tensor): Preallocated output of shape [n_tokens, n_top_k] to place
49
+ which expert a token has been assigned to.
50
+ expert_offset (torch.Tensor): Preallocated output of shape [n_tokens, n_top_k] to place which
51
+ offset within an experts group a token is.
52
+ logits (torch.Tensor): Raw logits of gating function.
53
+ batch (RaggedBatchWrapper): Batch information for ragged tensor.
54
+
55
+ Returns:
56
+ tuple of (expert_counts, scores, expert_assignment, expert_offset)
57
+ """
58
+ self.kernel(expert_counts, scores, assignments, offsets, logits, batch.batch_metadata_buffer())
59
+ return expert_counts, scores, assignments, offsets
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating_cuda.cu ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "conversion_utils.h"
7
+ #include "memory_access_utils.h"
8
+ #include "reduction_utils.h"
9
+ #include "top_k_gating.cuh"
10
+ #include "top_k_utils.h"
11
+
12
+ using ROp = reduce::ROpType;
13
+
14
+ template <typename T, int TOP_K>
15
+ __global__ void top_k_gating_kernel(int32_t* expert_counts,
16
+ float* scores,
17
+ int32_t* assignments,
18
+ int32_t* offsets,
19
+ const T* logits,
20
+ const RaggedBatchDescriptor* batch_metadata,
21
+ const int32_t n_experts)
22
+ {
23
+ const int32_t token_idx = blockIdx.x;
24
+ const int32_t expert_idx = threadIdx.x;
25
+ const int32_t max_warps = 1024 / hw_warp_size;
26
+
27
+ // CG helpers
28
+ cg::thread_block tb = cg::this_thread_block();
29
+ cg::thread_block_tile<hw_warp_size> warp = cg::tiled_partition<hw_warp_size>(tb);
30
+
31
+ // Padding tokens do not require
32
+ if (token_idx >= batch_metadata->n_tokens) {
33
+ if (threadIdx.x == 0) {
34
+ #pragma unroll
35
+ for (int i = 0; i < TOP_K; i++) {
36
+ assignments[token_idx * TOP_K + i] = gating::unassigned;
37
+ offsets[token_idx * TOP_K + i] = gating::unassigned;
38
+ }
39
+ }
40
+ return;
41
+ }
42
+
43
+ const T* token_logits = logits + token_idx * n_experts;
44
+
45
+ float logit_val;
46
+ if (expert_idx < n_experts) {
47
+ logit_val = conversion::to<float>(token_logits[expert_idx]);
48
+ } else {
49
+ reduce::init<ROp::Max>(&logit_val);
50
+ }
51
+ float reduce_val = logit_val;
52
+
53
+ int32_t local_assigned_experts[TOP_K];
54
+ float local_assigned_logits[TOP_K];
55
+
56
+ // Training code tends to use ``torch.argmax`` to select the expert, which
57
+ // which has ties broken by the lower index. Since our fused comparison algorithm
58
+ // breaks ties by the higher index (since it's the lower 32-bits of the 64-bit
59
+ // comparison), we invert the expert index to break ties by the lower index.
60
+ int32_t inverted_expert = n_experts - expert_idx - 1;
61
+
62
+ // Find the top k logits
63
+ for (int i = 0; i < TOP_K; ++i) {
64
+ const reduce::IdxReduceResult res =
65
+ reduce::idx_reduce<ROp::Max, max_warps>(tb, warp, reduce_val, inverted_expert);
66
+ local_assigned_experts[i] = n_experts - res.idx - 1;
67
+ local_assigned_logits[i] = res.val;
68
+
69
+ // Set the max logit to -inf so that it is not selected again
70
+ if (threadIdx.x == n_experts - res.idx - 1) { reduce::init<ROp::Max>(&reduce_val); }
71
+ }
72
+
73
+ const float max_logit = local_assigned_logits[0];
74
+ float softmax_sum = __expf(logit_val - max_logit);
75
+ reduce::block<ROp::Add>(tb, warp, softmax_sum);
76
+
77
+ for (int i = 0; i < TOP_K; ++i) {
78
+ const float softmax = __expf(local_assigned_logits[i] - max_logit) / softmax_sum;
79
+
80
+ if (threadIdx.x == 0) {
81
+ scores[token_idx * TOP_K + i] = softmax;
82
+ assignments[token_idx * TOP_K + i] = local_assigned_experts[i];
83
+ offsets[token_idx * TOP_K + i] =
84
+ atomicAdd(expert_counts + local_assigned_experts[i], 1);
85
+ }
86
+ }
87
+ }
88
+
89
+ template <typename T>
90
+ void launch_top_k_gating(int32_t* expert_counts,
91
+ float* scores,
92
+ int32_t* assignments,
93
+ int32_t* offsets,
94
+ const T* logits,
95
+ const RaggedBatchDescriptor* batch_metadata,
96
+ const int32_t n_tokens,
97
+ const int32_t n_experts,
98
+ const int32_t n_top_k,
99
+ cudaStream_t stream)
100
+ {
101
+ const dim3 grid(n_tokens);
102
+ const dim3 block(((n_experts + hw_warp_size - 1) / hw_warp_size) * hw_warp_size);
103
+
104
+ TOP_K_SWITCH(n_top_k, [&] {
105
+ top_k_gating_kernel<T, CONST_TOP_K><<<grid, block, 0, stream>>>(
106
+ expert_counts, scores, assignments, offsets, logits, batch_metadata, n_experts);
107
+ });
108
+ }
109
+
110
+ #define INSTANTIATE_top_k_KERNEL(T) \
111
+ template void launch_top_k_gating<T>(int32_t * expert_counts, \
112
+ float* scores, \
113
+ int32_t* assignments, \
114
+ int32_t* offsets, \
115
+ const T* logits, \
116
+ const RaggedBatchDescriptor* batch_metadata, \
117
+ const int32_t n_tokens, \
118
+ const int32_t n_experts, \
119
+ const int32_t n_top_k, \
120
+ cudaStream_t stream);
121
+
122
+ INSTANTIATE_top_k_KERNEL(float) INSTANTIATE_top_k_KERNEL(__half)
123
+ #ifdef BF16_AVAILABLE
124
+ INSTANTIATE_top_k_KERNEL(__nv_bfloat16)
125
+ #endif
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .policy import Llama2Policy
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/container.cpython-310.pyc ADDED
Binary file (1.77 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/container.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ # Create a container object to save model-specific tensors using the policy file above.
7
+
8
+ from ..common_parameters import *
9
+ from ..layer_container_base import LayerContainer
10
+ '''
11
+ # HF Llama model looks like this:
12
+
13
+ LlamaForCausalLM(
14
+ (model): LlamaModel(
15
+ (embed_tokens): Embedding(32000, 4096, padding_idx=0)
16
+ (layers): ModuleList(
17
+ (0-31): 32 x LlamaDecoderLayer(
18
+ (self_attn): LlamaAttention(
19
+ (q_proj): Linear(in_features=4096, out_features=4096, bias=False)
20
+ (k_proj): Linear(in_features=4096, out_features=4096, bias=False)
21
+ (v_proj): Linear(in_features=4096, out_features=4096, bias=False)
22
+ (o_proj): Linear(in_features=4096, out_features=4096, bias=False)
23
+ (rotary_emb): LlamaRotaryEmbedding()
24
+ )
25
+ (mlp): LlamaMLP(
26
+ (gate_proj): Linear(in_features=4096, out_features=11008, bias=False)
27
+ (up_proj): Linear(in_features=4096, out_features=11008, bias=False)
28
+ (down_proj): Linear(in_features=11008, out_features=4096, bias=False)
29
+ (act_fn): SiLUActivation()
30
+ )
31
+ (input_layernorm): LlamaRMSNorm()
32
+ (post_attention_layernorm): LlamaRMSNorm()
33
+ )
34
+ )
35
+ (norm): LlamaRMSNorm()
36
+ )
37
+ (lm_head): Linear(in_features=4096, out_features=32000, bias=False)
38
+ )
39
+ '''
40
+
41
+
42
+ class Llama2TransformerContainer(LayerContainer):
43
+ """
44
+ Transformer layer container for the Llama-2 model.
45
+ """
46
+ qkv_w: UnfusedQKVParameter
47
+ attn_out_w: AttentionOutputParameter
48
+ mlp_1_w: GatedMLPParameter
49
+ mlp_2_w: MLP2Parameter
50
+ attn_norm_gamma: NormParameter
51
+ mlp_norm_gamma: NormParameter
52
+
53
+ PARAM_MAPPING = {
54
+ "self_attn.q_proj.weight": "qkv_w.q_params",
55
+ "self_attn.k_proj.weight": "qkv_w.k_params",
56
+ "self_attn.v_proj.weight": "qkv_w.v_params",
57
+ "self_attn.o_proj.weight": "attn_out_w.params",
58
+ "mlp.gate_proj.weight": "mlp_1_w.gate_params",
59
+ "mlp.up_proj.weight": "mlp_1_w.up_params",
60
+ "mlp.down_proj.weight": "mlp_2_w.params",
61
+ "input_layernorm.weight": "attn_norm_gamma.params",
62
+ "post_attention_layernorm.weight": "mlp_norm_gamma.params",
63
+ }
64
+
65
+
66
+ class Llama2NonTransformerContainer(LayerContainer):
67
+ """
68
+ Non-Transformer layer container for the Llama-2 model.
69
+ """
70
+ word_emb: EmbeddingParameter
71
+ word_unembed: UnembedParameter
72
+ final_norm: NormParameter
73
+
74
+ PARAM_MAPPING = {
75
+ "model.embed_tokens.weight": "word_emb.params",
76
+ "model.norm.weight": "final_norm.params",
77
+ "lm_head.weight": "word_unembed.params",
78
+ }
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/model.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Iterable, Optional, Tuple
7
+
8
+ import torch
9
+
10
+ import deepspeed.comm as dist
11
+
12
+ from ...allocator import empty_from
13
+ from ...inference_utils import ActivationType, DtypeEnum
14
+ from .. import *
15
+ from ...modules.configs import *
16
+ from ...modules.interfaces import *
17
+ from ...ragged import RaggedBatchWrapper
18
+
19
+ from .container import Llama2NonTransformerContainer, Llama2TransformerContainer
20
+
21
+
22
+ class Llama2InferenceModel(DSTransformerModelBase):
23
+ """
24
+ Inference model implementation for ragged batching for Llama-2 models.
25
+ """
26
+
27
+ _non_transformer: Optional[Llama2NonTransformerContainer]
28
+ """
29
+ Embed + unembed container. Specializing the type annotation.
30
+ """
31
+
32
+ _transformer: Optional[Iterable[Llama2TransformerContainer]]
33
+ """
34
+ Per-layer transformer container. Specializing the type annotation.
35
+ """
36
+ """
37
+ Properties ineherited from `DSInferenceModelBase`
38
+ """
39
+
40
+ @property
41
+ def max_sequence_length(self) -> int:
42
+ return self._config.max_seq_length
43
+
44
+ """
45
+ Properties ineherited from `DSTransformerModelBase`
46
+ """
47
+
48
+ @property
49
+ def num_layers(self) -> int:
50
+ return self._config.num_hidden_layers
51
+
52
+ @property
53
+ def model_dim(self) -> int:
54
+ return self._config.hidden_size
55
+
56
+ @property
57
+ def vocab_size(self) -> int:
58
+ return self._config.vocab_size
59
+
60
+ @property
61
+ def head_size(self) -> int:
62
+ return self.model_dim // self.n_heads
63
+
64
+ @property
65
+ def n_heads(self) -> int:
66
+ return self._config.num_attention_heads
67
+
68
+ @property
69
+ def intermediate_dim(self) -> int:
70
+ return self._config.intermediate_size
71
+
72
+ @property
73
+ def n_heads_kv(self) -> int:
74
+ return self._config.num_key_value_heads
75
+
76
+ @property
77
+ def activation_dtype(self) -> DtypeEnum:
78
+ if self._config.torch_dtype == torch.float16:
79
+ return DtypeEnum.fp16
80
+ elif self._config.torch_dtype == torch.bfloat16:
81
+ return DtypeEnum.bf16
82
+ else:
83
+ raise NotImplementedError("Only fp16 and bf16 are supported")
84
+
85
+ @property
86
+ def mlp_activation_fn(self) -> ActivationType:
87
+ activation = self._config.hidden_act.lower()
88
+ # llama model family is special and is always gated so force gated versions of relu, gelu, silu
89
+ if activation == "gelu":
90
+ return ActivationType.GEGLU
91
+ elif activation == "relu":
92
+ return ActivationType.ReGLU
93
+ elif activation == "gegelu":
94
+ return ActivationType.GEGLU
95
+ elif activation == "silu":
96
+ return ActivationType.SiGLU
97
+ else:
98
+ raise NotImplementedError(f"Activation {activation} not supported")
99
+
100
+ @property
101
+ def norm_type(self) -> NormTypeEnum:
102
+ return NormTypeEnum.RMSNorm
103
+
104
+ @property
105
+ def positional_embedding_type(self) -> PositionalEmbeddingType:
106
+ return PositionalEmbeddingType.rotate_half
107
+
108
+ @property
109
+ def positional_embedding_config(self) -> Optional[RotateHalfConfig]:
110
+ return RotateHalfConfig(theta_base=self._config.rope_theta)
111
+
112
+ """
113
+ Forward implementations
114
+ """
115
+
116
+ def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor:
117
+ """
118
+ Performs the embedding lookup prior to running the transformer of the model.
119
+
120
+ Arguments:
121
+ ragged_batch (RaggedBatchWrapper): The batch to embed.
122
+
123
+ Returns:
124
+ torch.Tensor: The embedded batch.
125
+ """
126
+ embed = self.embed(ragged_batch, self._non_transformer.word_emb)
127
+
128
+ if embed.shape[-1] != self.model_dim:
129
+ raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}")
130
+
131
+ return embed
132
+
133
+ def _forward_transformer_layer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor,
134
+ ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]:
135
+ """
136
+ Executes one (slightly offset) layer of the transformer. This implementation does a peak-ahead
137
+ optimization to fuse the layer norm of the next layer into the current layer.
138
+
139
+ Arguments:
140
+ layer_idx (int): The index of the layer to execute.
141
+ residual (torch.Tensor): The residual tensor from the previous layer.
142
+ hidden_states (torch.Tensor): The hidden states from the previous layer. This is the
143
+ hidden states after pre normalization.
144
+ ragged_batch_info (RaggedBatchWrapper): The batch metadata.
145
+ """
146
+ # TODO(cmikeh2): Distribute ragged_batch_info to all modules
147
+
148
+ cur_params = self._transformer[layer_idx]
149
+ kv_cache = self.state_manager.get_cache(layer_idx)
150
+
151
+ hidden_states = self.qkv(hidden_states, cur_params.qkv_w, b=None)
152
+ hidden_states = self.attn(hidden_states, kv_cache, ragged_batch_info)
153
+ hidden_states = self.attn_out(hidden_states, cur_params.attn_out_w, b=None)
154
+
155
+ if self.tp_size > 1:
156
+ dist.all_reduce(hidden_states, group=self._base_mp_group)
157
+
158
+ residual, hidden_states = self.norm(residual, hidden_states, cur_params.mlp_norm_gamma, beta=None)
159
+
160
+ # Should be configurable in the future
161
+ hidden_states = self.mlp_1(hidden_states, cur_params.mlp_1_w, b=None)
162
+ hidden_states = self.mlp_2(hidden_states, cur_params.mlp_2_w, b=None)
163
+
164
+ if self.tp_size > 1:
165
+ dist.all_reduce(hidden_states, group=self._base_mp_group)
166
+
167
+ if layer_idx != self.num_layers - 1:
168
+ next_params = self._transformer[layer_idx + 1]
169
+ residual, hidden_states = self.norm(residual, hidden_states, next_params.attn_norm_gamma, beta=None)
170
+ else:
171
+ # On last layer, we just need to perform the residual add. Adding into the residual
172
+ # here is safe.
173
+ residual.add_(hidden_states)
174
+
175
+ return residual, hidden_states
176
+
177
+ def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor:
178
+ """
179
+ Performs unembedding of the hidden states to logits. This will only sample the final
180
+ token of each sequence.
181
+ """
182
+ logits = self.unembed(hidden_states,
183
+ self._non_transformer.word_unembed,
184
+ ragged_batch_info,
185
+ gamma=self._non_transformer.final_norm)
186
+
187
+ if self.tp_size > 1:
188
+ comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1]))
189
+ full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size))
190
+
191
+ dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group)
192
+
193
+ full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size))
194
+
195
+ return full_logits
196
+ else:
197
+ return logits
198
+
199
+ def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor:
200
+
201
+ residual = self._forward_embed(wrapped_batch)
202
+
203
+ residual, hidden_states = self.norm(residual, None, self._transformer[0].attn_norm_gamma, beta=None)
204
+
205
+ for layer_idx in range(self.num_layers):
206
+ residual, hidden_states = self._forward_transformer_layer(layer_idx, residual, hidden_states,
207
+ wrapped_batch)
208
+
209
+ return self._forward_unembed(residual, wrapped_batch)
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/policy.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Any
7
+
8
+ from ...config_v2 import RaggedInferenceEngineConfig
9
+ from ..inference_policy_base import ContainerMap, InferenceV2Policy
10
+ from .container import Llama2NonTransformerContainer, Llama2TransformerContainer
11
+ from .model import Llama2InferenceModel
12
+
13
+
14
+ class Llama2Policy(InferenceV2Policy):
15
+
16
+ def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> Llama2InferenceModel:
17
+ return Llama2InferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group)
18
+
19
+ def build_container_map(self) -> ContainerMap:
20
+ map = ContainerMap()
21
+
22
+ transformer_containers = [Llama2TransformerContainer(self.model) for _ in range(self.model.num_layers)]
23
+
24
+ map.set_transformer_params(['model.layers'], transformer_containers)
25
+
26
+ map.set_non_transformer_params(Llama2NonTransformerContainer(self.model))
27
+
28
+ map.set_unmapped_params(
29
+ [f'model.layers.{i}.self_attn.rotary_emb.inv_freq' for i in range(self.model.num_layers)])
30
+
31
+ return map
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .policy import MistralPolicy
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (264 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/container.cpython-310.pyc ADDED
Binary file (1.86 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/model.cpython-310.pyc ADDED
Binary file (6.86 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/policy.cpython-310.pyc ADDED
Binary file (1.56 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/container.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ # Create a container object to save model-specific tensors using the policy file above.
7
+
8
+ from deepspeed.inference.v2.model_implementations.common_parameters import *
9
+ from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer
10
+ '''
11
+ # HF Mistral model (mistralai/Mistral-7B-v0.1) looks like this:
12
+ MistralForCausalLM(
13
+ (model): MistralModel(
14
+ (embed_tokens): Embedding(32000, 4096)
15
+ (layers): ModuleList(
16
+ (0-31): 32 x MistralDecoderLayer(
17
+ (self_attn): MistralAttention(
18
+ (q_proj): Linear(in_features=4096, out_features=4096, bias=False)
19
+ (k_proj): Linear(in_features=4096, out_features=1024, bias=False)
20
+ (v_proj): Linear(in_features=4096, out_features=1024, bias=False)
21
+ (o_proj): Linear(in_features=4096, out_features=4096, bias=False)
22
+ (rotary_emb): MistralRotaryEmbedding()
23
+ )
24
+ (mlp): MistralMLP(
25
+ (gate_proj): Linear(in_features=4096, out_features=14336, bias=False)
26
+ (up_proj): Linear(in_features=4096, out_features=14336, bias=False)
27
+ (down_proj): Linear(in_features=14336, out_features=4096, bias=False)
28
+ (act_fn): SiLUActivation()
29
+ )
30
+ (input_layernorm): MistralRMSNorm()
31
+ (post_attention_layernorm): MistralRMSNorm()
32
+ )
33
+ )
34
+ (norm): MistralRMSNorm()
35
+ )
36
+ (lm_head): Linear(in_features=4096, out_features=32000, bias=False)
37
+ )
38
+ '''
39
+
40
+
41
+ class MistralTransformerContainer(LayerContainer):
42
+ """
43
+ Transformer layer container for the Mistral model.
44
+ """
45
+ qkv_w: UnfusedQKVParameter
46
+ attn_out_w: AttentionOutputParameter
47
+ mlp_1_w: GatedMLPParameter
48
+ mlp_2_w: MLP2Parameter
49
+ attn_norm_gamma: NormParameter
50
+ mlp_norm_gamma: NormParameter
51
+
52
+ PARAM_MAPPING = {
53
+ "self_attn.q_proj.weight": "qkv_w.q_params",
54
+ "self_attn.k_proj.weight": "qkv_w.k_params",
55
+ "self_attn.v_proj.weight": "qkv_w.v_params",
56
+ "self_attn.o_proj.weight": "attn_out_w.params",
57
+ "mlp.gate_proj.weight": "mlp_1_w.gate_params",
58
+ "mlp.up_proj.weight": "mlp_1_w.up_params",
59
+ "mlp.down_proj.weight": "mlp_2_w.params",
60
+ "input_layernorm.weight": "attn_norm_gamma.params",
61
+ "post_attention_layernorm.weight": "mlp_norm_gamma.params",
62
+ }
63
+
64
+
65
+ class MistralNonTransformerContainer(LayerContainer):
66
+ """
67
+ Non-Transformer layer container for the Mistral model.
68
+ """
69
+ word_emb: EmbeddingParameter
70
+ word_unembed: UnembedParameter
71
+ final_norm: NormParameter
72
+
73
+ PARAM_MAPPING = {
74
+ "model.embed_tokens.weight": "word_emb.params",
75
+ "model.norm.weight": "final_norm.params",
76
+ "lm_head.weight": "word_unembed.params",
77
+ }
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/model.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Iterable, Optional, Tuple
7
+
8
+ import torch
9
+
10
+ import deepspeed.comm as dist
11
+
12
+ from ...allocator import empty_from
13
+ from ...inference_utils import ActivationType, DtypeEnum
14
+ from ...model_implementations import *
15
+ from ...modules.configs import *
16
+ from ...modules.interfaces import *
17
+ from ...ragged import RaggedBatchWrapper
18
+
19
+ from .container import MistralNonTransformerContainer, MistralTransformerContainer
20
+
21
+
22
+ class MistralInferenceModel(DSTransformerModelBase):
23
+ """
24
+ Inference model implementation for ragged batching for Mistral models.
25
+ """
26
+
27
+ _non_transformer: Optional[MistralNonTransformerContainer]
28
+ """
29
+ Embed + unembed container. Specializing the type annotation.
30
+ """
31
+
32
+ _transformer: Optional[Iterable[MistralTransformerContainer]]
33
+ """
34
+ Per-layer transformer container. Specializing the type annotation.
35
+ """
36
+ """
37
+ Properties ineherited from `DSInferenceModelBase`
38
+ """
39
+
40
+ @property
41
+ def max_sequence_length(self) -> int:
42
+ return self._config.max_seq_length
43
+
44
+ """
45
+ Properties ineherited from `DSTransformerModelBase`
46
+ """
47
+
48
+ @property
49
+ def num_layers(self) -> int:
50
+ return self._config.num_hidden_layers
51
+
52
+ @property
53
+ def model_dim(self) -> int:
54
+ return self._config.hidden_size
55
+
56
+ @property
57
+ def vocab_size(self) -> int:
58
+ return self._config.vocab_size
59
+
60
+ @property
61
+ def head_size(self) -> int:
62
+ return self.model_dim // self.n_heads
63
+
64
+ @property
65
+ def n_heads(self) -> int:
66
+ return self._config.num_attention_heads
67
+
68
+ @property
69
+ def intermediate_dim(self) -> int:
70
+ return self._config.intermediate_size
71
+
72
+ @property
73
+ def n_heads_kv(self) -> int:
74
+ return self._config.num_key_value_heads
75
+
76
+ @property
77
+ def activation_dtype(self) -> DtypeEnum:
78
+ if self._config.torch_dtype == torch.float16:
79
+ return DtypeEnum.fp16
80
+ elif self._config.torch_dtype == torch.bfloat16:
81
+ return DtypeEnum.bf16
82
+ else:
83
+ raise NotImplementedError("Only fp16 and bf16 are supported")
84
+
85
+ @property
86
+ def mlp_activation_fn(self) -> ActivationType:
87
+ activation = self._config.hidden_act.lower()
88
+ if activation == "gelu":
89
+ return ActivationType.GEGLU
90
+ elif activation == "relu":
91
+ return ActivationType.ReGLU
92
+ elif activation == "gegelu":
93
+ return ActivationType.GEGLU
94
+ elif activation == "silu":
95
+ return ActivationType.SiGLU
96
+ else:
97
+ raise NotImplementedError(f"Activation {activation} not supported")
98
+
99
+ @property
100
+ def norm_type(self) -> NormTypeEnum:
101
+ return NormTypeEnum.RMSNorm
102
+
103
+ @property
104
+ def positional_embedding_type(self) -> PositionalEmbeddingType:
105
+ return PositionalEmbeddingType.rotate_half
106
+
107
+ @property
108
+ def positional_embedding_config(self) -> Optional[RotateHalfConfig]:
109
+ return RotateHalfConfig(theta_base=self._config.rope_theta)
110
+
111
+ """
112
+ Forward implementations
113
+ """
114
+
115
+ def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor:
116
+ """
117
+ Performs the embedding lookup prior to running the transformer of the model.
118
+
119
+ Arguments:
120
+ ragged_batch (RaggedBatchWrapper): The batch to embed.
121
+
122
+ Returns:
123
+ torch.Tensor: The embedded batch.
124
+ """
125
+ embed = self.embed(ragged_batch, self._non_transformer.word_emb)
126
+
127
+ if embed.shape[-1] != self.model_dim:
128
+ raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}")
129
+
130
+ return embed
131
+
132
+ def _forward_transformer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor,
133
+ ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]:
134
+ """
135
+ Executes one (slightly offset) layer of the transformer. This implementation does a peak-ahead
136
+ optimization to fuse the layer norm of the next layer into the current layer.
137
+
138
+ Arguments:
139
+ layer_idx (int): The index of the layer to execute.
140
+ residual (torch.Tensor): The residual tensor from the previous layer.
141
+ hidden_states (torch.Tensor): The hidden states from the previous layer. This is the
142
+ hidden states after pre normalization.
143
+ ragged_batch_info (RaggedBatchWrapper): The batch metadata.
144
+ """
145
+ # TODO(cmikeh2): Distribute ragged_batch_info to all modules
146
+
147
+ cur_params = self._transformer[layer_idx]
148
+ kv_cache = self.state_manager.get_cache(layer_idx)
149
+
150
+ hidden_states = self.qkv(hidden_states, cur_params.qkv_w, b=None)
151
+ hidden_states = self.attn(hidden_states, kv_cache, ragged_batch_info)
152
+ hidden_states = self.attn_out(hidden_states, cur_params.attn_out_w, b=None)
153
+
154
+ if self.tp_size > 1:
155
+ dist.all_reduce(hidden_states, group=self._base_mp_group)
156
+
157
+ residual, hidden_states = self.norm(residual, hidden_states, cur_params.mlp_norm_gamma, beta=None)
158
+
159
+ # Should be configurable in the future
160
+ hidden_states = self.mlp_1(hidden_states, cur_params.mlp_1_w, b=None)
161
+ hidden_states = self.mlp_2(hidden_states, cur_params.mlp_2_w, b=None)
162
+
163
+ if self.tp_size > 1:
164
+ dist.all_reduce(hidden_states, group=self._base_mp_group)
165
+
166
+ if layer_idx != self.num_layers - 1:
167
+ next_params = self._transformer[layer_idx + 1]
168
+ residual, hidden_states = self.norm(residual, hidden_states, next_params.attn_norm_gamma, beta=None)
169
+ else:
170
+ # On last layer, we just need to perform the residual add. Adding into the residual
171
+ # here is safe.
172
+ residual.add_(hidden_states)
173
+
174
+ return residual, hidden_states
175
+
176
+ def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor:
177
+ """
178
+ Performs unembedding of the hidden states to logits. This will only sample the final
179
+ token of each sequence.
180
+ """
181
+ logits = self.unembed(hidden_states,
182
+ self._non_transformer.word_unembed,
183
+ ragged_batch_info,
184
+ gamma=self._non_transformer.final_norm)
185
+
186
+ if self.tp_size > 1:
187
+ comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1]))
188
+ full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size))
189
+
190
+ dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group)
191
+
192
+ full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size))
193
+
194
+ return full_logits
195
+ else:
196
+ return logits
197
+
198
+ def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor:
199
+
200
+ residual = self._forward_embed(wrapped_batch)
201
+
202
+ residual, hidden_states = self.norm(residual, None, self._transformer[0].attn_norm_gamma, beta=None)
203
+
204
+ for layer_idx in range(self.num_layers):
205
+ residual, hidden_states = self._forward_transformer(layer_idx, residual, hidden_states, wrapped_batch)
206
+
207
+ return self._forward_unembed(residual, wrapped_batch)
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/policy.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Any
7
+
8
+ from ...config_v2 import RaggedInferenceEngineConfig
9
+ from ..inference_policy_base import ContainerMap, InferenceV2Policy
10
+ from .container import MistralNonTransformerContainer, MistralTransformerContainer
11
+ from .model import MistralInferenceModel
12
+
13
+
14
+ class MistralPolicy(InferenceV2Policy):
15
+
16
+ def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> MistralInferenceModel:
17
+ return MistralInferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group)
18
+
19
+ def build_container_map(self) -> ContainerMap:
20
+ map = ContainerMap()
21
+
22
+ transformer_containers = [MistralTransformerContainer(self.model) for _ in range(self.model.num_layers)]
23
+
24
+ map.set_transformer_params(['model.layers'], transformer_containers)
25
+
26
+ map.set_non_transformer_params(MistralNonTransformerContainer(self.model))
27
+
28
+ map.set_unmapped_params([])
29
+
30
+ return map
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (256 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/container.cpython-310.pyc ADDED
Binary file (2.41 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/model.cpython-310.pyc ADDED
Binary file (6.16 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/policy.cpython-310.pyc ADDED
Binary file (1.57 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (349 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/attn.cpython-310.pyc ADDED
Binary file (1.67 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/attn_out.cpython-310.pyc ADDED
Binary file (2.94 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/embedding.cpython-310.pyc ADDED
Binary file (1.47 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/mlp.cpython-310.pyc ADDED
Binary file (2.53 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/qkv.cpython-310.pyc ADDED
Binary file (4.75 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/types.cpython-310.pyc ADDED
Binary file (506 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/unembed.cpython-310.pyc ADDED
Binary file (1.84 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/utils.cpython-310.pyc ADDED
Binary file (4.38 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/embedding.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from .types import ShardingType
9
+ from .utils import shard_param, get_shard_endpoints
10
+
11
+
12
+ def shard_embedding_param(param: torch.Tensor, shard_rank: int, num_shards: int) -> torch.Tensor:
13
+ """
14
+ Utility method for sharding an embedding parameter.
15
+
16
+ Args:
17
+ param (torch.Tensor): The parameter to shard. Should be of shape [vocab_size, model_dim]
18
+ shard_rank (int): Which shard of the partitioned tensor to return.
19
+ num_shards (int): The total number of shards the parameter is distributed across.
20
+ """
21
+ return shard_param(param, ShardingType.INNER_DIMENSION, shard_rank, num_shards)
22
+
23
+
24
+ def sharded_embedding_dim(embedding_size: int, shard_rank: int, num_shards: int) -> int:
25
+ """
26
+ Utility method for getting the size of the embedding dimension of a sharded embedding.
27
+
28
+ Args:
29
+ embedding_size (int): The size of the embedding.
30
+ shard_rank (int): Which shard of the partitioned tensor to return.
31
+ num_shards (int): The total number of shards the parameter is distributed across.
32
+ """
33
+ start_idx, end_idx = get_shard_endpoints(embedding_size, shard_rank, num_shards)
34
+ return end_idx - start_idx