Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/flat_model_helpers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_model_base.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_policy_base.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_transformer_base.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/layer_container_base.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/parameter_base.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__init__.py +13 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/mlp_parameters.py +81 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/norm_parameters.py +22 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/qkv_parameters.py +115 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__init__.py +6 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/container.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/model.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/policy.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/container.py +129 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/model.py +213 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/model.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/policy.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__init__.py +6 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/container.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/model.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/policy.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/container.py +46 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/model.py +261 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/policy.py +31 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__init__.py +6 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/container.py +94 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/model.py +197 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/policy.py +30 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__init__.py +6 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__pycache__/containers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__pycache__/model.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__pycache__/policy.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/containers.py +91 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/model.py +199 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/policy.py +32 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__init__.py +6 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/container.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/model.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/policy.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/container.py +77 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/model.py +223 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/policy.py +30 -0
- venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__init__.py +6 -0
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (638 Bytes). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/flat_model_helpers.cpython-310.pyc
ADDED
Binary file (8.46 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_model_base.cpython-310.pyc
ADDED
Binary file (9.41 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_policy_base.cpython-310.pyc
ADDED
Binary file (8.95 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_transformer_base.cpython-310.pyc
ADDED
Binary file (21.7 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/layer_container_base.cpython-310.pyc
ADDED
Binary file (11.2 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/parameter_base.cpython-310.pyc
ADDED
Binary file (9.04 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__init__.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
from .attn_output_parameters import *
|
7 |
+
from .embedding_parameters import *
|
8 |
+
from .mlp_parameters import *
|
9 |
+
from .moe_parameters import *
|
10 |
+
from .norm_parameters import *
|
11 |
+
from .qkv_parameters import *
|
12 |
+
from .unembed_parameters import *
|
13 |
+
from .invfreq_parameters import *
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/mlp_parameters.py
ADDED
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
import torch
|
7 |
+
|
8 |
+
from ...model_implementations.parameter_base import ParameterBase
|
9 |
+
"""
|
10 |
+
MLP Parameter Containers
|
11 |
+
"""
|
12 |
+
|
13 |
+
|
14 |
+
class MLP1Parameter(ParameterBase):
|
15 |
+
"""
|
16 |
+
First MLP projection weight container. This performs a straight pass-through to the
|
17 |
+
model implementation for transformation.
|
18 |
+
"""
|
19 |
+
params: torch.Tensor
|
20 |
+
|
21 |
+
def finalize(self) -> torch.Tensor:
|
22 |
+
# NOTE(cmikeh2): If we are gated but not in the format specified below, we should trigger a permutation here.
|
23 |
+
# I am not currently aware of any models that use this format (or how we should even detect it; probably should
|
24 |
+
# just be a different param entirely, but until then we'll just assume the format is correct).
|
25 |
+
return self.inference_model.transform_mlp_1_param(self.params)
|
26 |
+
|
27 |
+
|
28 |
+
class GatedMLPParameter(ParameterBase):
|
29 |
+
"""
|
30 |
+
Gated MLP projection container.
|
31 |
+
"""
|
32 |
+
|
33 |
+
gate_params: torch.Tensor
|
34 |
+
"""
|
35 |
+
Weight parameter for the gating matrix.
|
36 |
+
"""
|
37 |
+
|
38 |
+
up_params: torch.Tensor
|
39 |
+
"""
|
40 |
+
For lack of a better name, the non-gating weight parameters.
|
41 |
+
"""
|
42 |
+
|
43 |
+
def finalize(self) -> torch.Tensor:
|
44 |
+
"""
|
45 |
+
Our gated format (this is different from InferenceV1!) is to have the gate and activated neurons
|
46 |
+
interleaved. So if we have 4 output neurons (two effective neurons) with 4 input neurons, the finalized
|
47 |
+
parameter will look like:
|
48 |
+
[g0_0, g0_1, g0_2, g0_3]
|
49 |
+
[a0_0, a0_1, a0_2, a0_3]
|
50 |
+
[g1_0, g1_1, g1_2, g1_3]
|
51 |
+
[a1_0, a1_1, a1_2, a1_3]
|
52 |
+
|
53 |
+
As a reference, in inference v1, the format is:
|
54 |
+
[g0_0, g0_1, g0_2, g0_3]
|
55 |
+
[g1_0, g1_1, g1_2, g1_3]
|
56 |
+
[a0_0, a0_1, a0_2, a0_3]
|
57 |
+
[a1_0, a1_1, a1_2, a1_3]
|
58 |
+
"""
|
59 |
+
assert self.gate_params.shape[0] == self.up_params.shape[
|
60 |
+
0], "Gated MLP parameters must have the same number of neurons."
|
61 |
+
total_neurons = self.gate_params.shape[0] + self.up_params.shape[0]
|
62 |
+
|
63 |
+
# flip the order if even with the correct tokenizer we get wrong output
|
64 |
+
#fused_param = torch.cat([self.up_params, self.gate_params], dim=-1).reshape(total_neurons, -1)
|
65 |
+
fused_param = torch.cat([self.gate_params, self.up_params], dim=-1).reshape(total_neurons, -1)
|
66 |
+
return self.inference_model.transform_mlp_1_param(fused_param)
|
67 |
+
|
68 |
+
|
69 |
+
class MLP2Parameter(ParameterBase):
|
70 |
+
"""
|
71 |
+
Second MLP projection weight container. This performs a straight pass-through to the
|
72 |
+
model implementation for transformation.
|
73 |
+
"""
|
74 |
+
|
75 |
+
params: torch.Tensor
|
76 |
+
"""
|
77 |
+
Full weight parameter.
|
78 |
+
"""
|
79 |
+
|
80 |
+
def finalize(self) -> torch.Tensor:
|
81 |
+
return self.inference_model.transform_mlp_2_param(self.params)
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/norm_parameters.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
import torch
|
7 |
+
|
8 |
+
from ...model_implementations.parameter_base import ParameterBase
|
9 |
+
"""
|
10 |
+
Common Attention Output Parameter Patterns
|
11 |
+
"""
|
12 |
+
|
13 |
+
|
14 |
+
class NormParameter(ParameterBase):
|
15 |
+
"""
|
16 |
+
Simple normalization container.
|
17 |
+
"""
|
18 |
+
|
19 |
+
params: torch.Tensor
|
20 |
+
|
21 |
+
def finalize(self) -> torch.Tensor:
|
22 |
+
return self.inference_model.transform_norm_param(self.params)
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/qkv_parameters.py
ADDED
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
import torch
|
7 |
+
|
8 |
+
from ...model_implementations.parameter_base import ParameterBase
|
9 |
+
"""
|
10 |
+
Common QKV Parameter Patterns
|
11 |
+
"""
|
12 |
+
|
13 |
+
|
14 |
+
class FusedQKVParameter(ParameterBase):
|
15 |
+
"""
|
16 |
+
Traditional fused QKV parameters for QKV projection. This is functionally
|
17 |
+
a direct copy.
|
18 |
+
|
19 |
+
src_qkv_w shape: [3 * out_features, in_features]
|
20 |
+
qkv_w shape: [3 * out_features, in_features]
|
21 |
+
"""
|
22 |
+
|
23 |
+
params: torch.Tensor
|
24 |
+
|
25 |
+
def finalize(self) -> torch.Tensor:
|
26 |
+
return self.inference_model.transform_qkv_param(self.params)
|
27 |
+
|
28 |
+
|
29 |
+
class UnfusedQKVParameter(ParameterBase):
|
30 |
+
"""
|
31 |
+
QKV parameter container for unfused QKV projection.
|
32 |
+
|
33 |
+
src_param shapes: 3 x [out_features, in_features]
|
34 |
+
dst_param shape: [3 x out_features, in_features]
|
35 |
+
"""
|
36 |
+
|
37 |
+
q_params: torch.Tensor
|
38 |
+
|
39 |
+
k_params: torch.Tensor
|
40 |
+
|
41 |
+
v_params: torch.Tensor
|
42 |
+
|
43 |
+
def finalize(self):
|
44 |
+
fused_param = torch.cat([self.q_params, self.k_params, self.v_params], dim=0)
|
45 |
+
return self.inference_model.transform_qkv_param(fused_param)
|
46 |
+
|
47 |
+
|
48 |
+
def megatron_qkv_reshape(param: torch.Tensor, head_size: int, n_heads: int) -> torch.Tensor:
|
49 |
+
assert param.shape[0] == 3 * n_heads * head_size
|
50 |
+
|
51 |
+
all_heads = torch.chunk(param, chunks=3 * n_heads, dim=0)
|
52 |
+
q_heads = all_heads[::3]
|
53 |
+
k_heads = all_heads[1::3]
|
54 |
+
v_heads = all_heads[2::3]
|
55 |
+
return torch.cat([q_heads, k_heads, v_heads], dim=0)
|
56 |
+
|
57 |
+
|
58 |
+
class MegatronQKVParameter(ParameterBase):
|
59 |
+
"""
|
60 |
+
QKV parameter container for Megatron-style QKV projection. Megatron stores the parameter
|
61 |
+
as [n_heads, 3, head_size, in_features] whereas our inference system is built around
|
62 |
+
[3, n_heads, head_size, in_features]. This container handles the conversion.
|
63 |
+
|
64 |
+
Note: this container expects the model implementation to implement properties for
|
65 |
+
`head_size` and `n_heads`.
|
66 |
+
|
67 |
+
src_qkv_w shape: [3 * out_features, in_features]
|
68 |
+
qkv_w shape: [3 * out_features, in_features]
|
69 |
+
"""
|
70 |
+
|
71 |
+
params: torch.Tensor
|
72 |
+
|
73 |
+
def finalize(self) -> torch.Tensor:
|
74 |
+
head_size = self.inference_model.head_size
|
75 |
+
n_heads = self.inference_model.n_heads
|
76 |
+
|
77 |
+
transposed_param = megatron_qkv_reshape(self.params, head_size, n_heads)
|
78 |
+
return self.inference_model.transform_qkv_param(transposed_param)
|
79 |
+
|
80 |
+
|
81 |
+
def transform_gqa_megatron(src_param: torch.Tensor, head_size: int, n_q_heads: int, n_kv_heads: int) -> torch.Tensor:
|
82 |
+
assert src_param.shape[0] == (2 * n_kv_heads + n_q_heads) * head_size
|
83 |
+
|
84 |
+
head_ratio = n_q_heads // n_kv_heads
|
85 |
+
|
86 |
+
# Reshape to get the groups as the leading dimension
|
87 |
+
groups_leading_view = src_param.reshape(n_kv_heads, 2 + head_ratio, head_size, -1)
|
88 |
+
q_heads = groups_leading_view[:, :head_ratio, :, :].reshape(-1, groups_leading_view.shape[-1])
|
89 |
+
k_heads = groups_leading_view[:, head_ratio, :, :].reshape(-1, groups_leading_view.shape[-1])
|
90 |
+
v_heads = groups_leading_view[:, head_ratio + 1, :, :].reshape(-1, groups_leading_view.shape[-1])
|
91 |
+
# Squeeze will remove extra dimension for bias
|
92 |
+
return torch.cat([q_heads, k_heads, v_heads], dim=0).squeeze()
|
93 |
+
|
94 |
+
|
95 |
+
class GQAMegatronQKVParameter(ParameterBase):
|
96 |
+
"""
|
97 |
+
QKV parameter for Megatron-style QKV projection with GQA-style QKV projection. In this
|
98 |
+
storage format each of the groups is stored consecutively, so there will be multiple q_heads,
|
99 |
+
then one k head, and one v head.
|
100 |
+
|
101 |
+
Note: this container expects the model implementation to implement properties for
|
102 |
+
`head_size`, `n_q_heads`, and `n_kv_heads`.
|
103 |
+
|
104 |
+
src_qkv_w shape: [(2 * n_kv_heads + n_q_heads) * head_size, in_features]
|
105 |
+
qkv_w shape: [(2 * n_kv_heads + n_q_heads) * head_size, in_features]
|
106 |
+
"""
|
107 |
+
|
108 |
+
params: torch.Tensor
|
109 |
+
|
110 |
+
def finalize(self) -> torch.Tensor:
|
111 |
+
head_size = self.inference_model.head_size
|
112 |
+
n_q_heads = self.inference_model.n_heads_q
|
113 |
+
n_kv_heads = self.inference_model.n_heads_kv
|
114 |
+
transposed_param = transform_gqa_megatron(self.params, head_size, n_q_heads, n_kv_heads)
|
115 |
+
return self.inference_model.transform_qkv_param(transposed_param)
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
from .policy import FalconPolicy
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (262 Bytes). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/container.cpython-310.pyc
ADDED
Binary file (2.3 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/model.cpython-310.pyc
ADDED
Binary file (7.02 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/policy.cpython-310.pyc
ADDED
Binary file (1.86 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/container.py
ADDED
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
# Create a container object to save model-specific tensors using the policy file above.
|
7 |
+
|
8 |
+
from ..common_parameters import *
|
9 |
+
from ..layer_container_base import LayerContainer
|
10 |
+
'''
|
11 |
+
# HF Falcon 7b model looks like this:
|
12 |
+
|
13 |
+
FalconForCausalLM(
|
14 |
+
(transformer): FalconModel(
|
15 |
+
(word_embeddings): Embedding(65024, 4544)
|
16 |
+
(h): ModuleList(
|
17 |
+
(0-31): 32 x FalconDecoderLayer(
|
18 |
+
(self_attention): FalconAttention(
|
19 |
+
(maybe_rotary): FalconRotaryEmbedding()
|
20 |
+
(query_key_value): FalconLinear(in_features=4544, out_features=4672, bias=False)
|
21 |
+
(dense): FalconLinear(in_features=4544, out_features=4544, bias=False)
|
22 |
+
(attention_dropout): Dropout(p=0.0, inplace=False)
|
23 |
+
)
|
24 |
+
(mlp): FalconMLP(
|
25 |
+
(dense_h_to_4h): FalconLinear(in_features=4544, out_features=18176, bias=False)
|
26 |
+
(act): GELU(approximate='none')
|
27 |
+
(dense_4h_to_h): FalconLinear(in_features=18176, out_features=4544, bias=False)
|
28 |
+
)
|
29 |
+
(input_layernorm): LayerNorm((4544,), eps=1e-05, elementwise_affine=True)
|
30 |
+
)
|
31 |
+
)
|
32 |
+
(ln_f): LayerNorm((4544,), eps=1e-05, elementwise_affine=True)
|
33 |
+
)
|
34 |
+
(lm_head): Linear(in_features=4544, out_features=65024, bias=False)
|
35 |
+
)
|
36 |
+
'''
|
37 |
+
|
38 |
+
|
39 |
+
class FalconTransformerContainer(LayerContainer):
|
40 |
+
"""
|
41 |
+
Transformer layer container for the Falcon model.
|
42 |
+
"""
|
43 |
+
qkv_w: FusedQKVParameter
|
44 |
+
attn_out_w: AttentionOutputParameter
|
45 |
+
mlp_1_w: MLP1Parameter
|
46 |
+
mlp_2_w: MLP2Parameter
|
47 |
+
ln_attn_gamma: NormParameter
|
48 |
+
ln_attn_beta: NormParameter
|
49 |
+
|
50 |
+
PARAM_MAPPING = {
|
51 |
+
"self_attention.query_key_value.weight": "qkv_w.params",
|
52 |
+
"self_attention.dense.weight": "attn_out_w.params",
|
53 |
+
"mlp.dense_h_to_4h.weight": "mlp_1_w.params",
|
54 |
+
"mlp.dense_4h_to_h.weight": "mlp_2_w.params",
|
55 |
+
"input_layernorm.weight": "ln_attn_gamma.params",
|
56 |
+
"input_layernorm.bias": "ln_attn_beta.params",
|
57 |
+
}
|
58 |
+
|
59 |
+
|
60 |
+
class FalconNonTransformerContainer(LayerContainer):
|
61 |
+
"""
|
62 |
+
Non-Transformer layer container for the Falcon model.
|
63 |
+
"""
|
64 |
+
word_emb: EmbeddingParameter
|
65 |
+
word_unembed: UnembedParameter
|
66 |
+
final_norm_gamma: NormParameter
|
67 |
+
final_norm_beta: NormParameter
|
68 |
+
|
69 |
+
PARAM_MAPPING = {
|
70 |
+
"transformer.word_embeddings.weight": "word_emb.params",
|
71 |
+
"transformer.ln_f.weight": "final_norm_gamma.params",
|
72 |
+
"transformer.ln_f.bias": "final_norm_beta.params",
|
73 |
+
"lm_head.weight": "word_unembed.params",
|
74 |
+
}
|
75 |
+
|
76 |
+
|
77 |
+
'''
|
78 |
+
# HF Falcon 40b model looks like this:
|
79 |
+
|
80 |
+
FalconForCausalLM(
|
81 |
+
(transformer): FalconModel(
|
82 |
+
(word_embeddings): Embedding(65024, 8192)
|
83 |
+
(h): ModuleList(
|
84 |
+
(0-59): 60 x FalconDecoderLayer(
|
85 |
+
(self_attention): FalconAttention(
|
86 |
+
(maybe_rotary): FalconRotaryEmbedding()
|
87 |
+
(query_key_value): FalconLinear(in_features=8192, out_features=9216, bias=False)
|
88 |
+
(dense): FalconLinear(in_features=8192, out_features=8192, bias=False)
|
89 |
+
(attention_dropout): Dropout(p=0.0, inplace=False)
|
90 |
+
)
|
91 |
+
(mlp): FalconMLP(
|
92 |
+
(dense_h_to_4h): FalconLinear(in_features=8192, out_features=32768, bias=False)
|
93 |
+
(act): GELU(approximate='none')
|
94 |
+
(dense_4h_to_h): FalconLinear(in_features=32768, out_features=8192, bias=False)
|
95 |
+
)
|
96 |
+
(ln_attn): LayerNorm((8192,), eps=1e-05, elementwise_affine=True)
|
97 |
+
(ln_mlp): LayerNorm((8192,), eps=1e-05, elementwise_affine=True)
|
98 |
+
)
|
99 |
+
)
|
100 |
+
(ln_f): LayerNorm((8192,), eps=1e-05, elementwise_affine=True)
|
101 |
+
)
|
102 |
+
(lm_head): Linear(in_features=8192, out_features=65024, bias=False)
|
103 |
+
)
|
104 |
+
'''
|
105 |
+
|
106 |
+
|
107 |
+
class FalconNewArchTransformerContainer(LayerContainer):
|
108 |
+
"""
|
109 |
+
Transformer layer container for the Falcon model.
|
110 |
+
"""
|
111 |
+
qkv_w: GQAMegatronQKVParameter
|
112 |
+
attn_out_w: AttentionOutputParameter
|
113 |
+
mlp_1_w: MLP1Parameter
|
114 |
+
mlp_2_w: MLP2Parameter
|
115 |
+
ln_attn_gamma: NormParameter
|
116 |
+
ln_attn_beta: NormParameter
|
117 |
+
ln_mlp_gamma: NormParameter
|
118 |
+
ln_mlp_beta: NormParameter
|
119 |
+
|
120 |
+
PARAM_MAPPING = {
|
121 |
+
"self_attention.query_key_value.weight": "qkv_w.params",
|
122 |
+
"self_attention.dense.weight": "attn_out_w.params",
|
123 |
+
"mlp.dense_h_to_4h.weight": "mlp_1_w.params",
|
124 |
+
"mlp.dense_4h_to_h.weight": "mlp_2_w.params",
|
125 |
+
"ln_attn.weight": "ln_attn_gamma.params",
|
126 |
+
"ln_attn.bias": "ln_attn_beta.params",
|
127 |
+
"ln_mlp.weight": "ln_mlp_gamma.params",
|
128 |
+
"ln_mlp.bias": "ln_mlp_beta.params",
|
129 |
+
}
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/model.py
ADDED
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
from typing import Iterable, Optional, Tuple
|
7 |
+
|
8 |
+
import torch
|
9 |
+
|
10 |
+
import deepspeed.comm as dist
|
11 |
+
|
12 |
+
from ...allocator import empty_from
|
13 |
+
from ...inference_utils import ActivationType, DtypeEnum
|
14 |
+
from .. import *
|
15 |
+
from ...modules.configs import *
|
16 |
+
from ...modules.interfaces import *
|
17 |
+
from ...ragged import RaggedBatchWrapper
|
18 |
+
|
19 |
+
from .container import FalconNonTransformerContainer, FalconTransformerContainer
|
20 |
+
|
21 |
+
|
22 |
+
class FalconInferenceModel(DSTransformerModelBase):
|
23 |
+
"""
|
24 |
+
Inference model implementation for ragged batching for Llama-2 models.
|
25 |
+
"""
|
26 |
+
|
27 |
+
_non_transformer: Optional[FalconNonTransformerContainer]
|
28 |
+
"""
|
29 |
+
Embed + unembed container. Specializing the type annotation.
|
30 |
+
"""
|
31 |
+
|
32 |
+
_transformer: Optional[Iterable[FalconTransformerContainer]]
|
33 |
+
"""
|
34 |
+
Per-layer transformer container. Specializing the type annotation.
|
35 |
+
"""
|
36 |
+
"""
|
37 |
+
Properties inherited from `DSInferenceModelBase`
|
38 |
+
"""
|
39 |
+
|
40 |
+
@property
|
41 |
+
def max_sequence_length(self) -> int:
|
42 |
+
return self._config.max_seq_length
|
43 |
+
|
44 |
+
"""
|
45 |
+
Properties inherited from `DSTransformerModelBase`
|
46 |
+
"""
|
47 |
+
|
48 |
+
@property
|
49 |
+
def num_layers(self) -> int:
|
50 |
+
return self._config.num_hidden_layers
|
51 |
+
|
52 |
+
@property
|
53 |
+
def model_dim(self) -> int:
|
54 |
+
return self._config.hidden_size
|
55 |
+
|
56 |
+
@property
|
57 |
+
def vocab_size(self) -> int:
|
58 |
+
return self._config.vocab_size
|
59 |
+
|
60 |
+
@property
|
61 |
+
def head_size(self) -> int:
|
62 |
+
return self.model_dim // self.n_heads
|
63 |
+
|
64 |
+
@property
|
65 |
+
def n_heads(self) -> int:
|
66 |
+
return self._config.num_attention_heads
|
67 |
+
|
68 |
+
@property
|
69 |
+
def intermediate_dim(self) -> int:
|
70 |
+
return 4 * self._config.hidden_size
|
71 |
+
|
72 |
+
@property
|
73 |
+
def n_heads_kv(self) -> int:
|
74 |
+
return self._config.num_kv_heads if (self._config.new_decoder_architecture
|
75 |
+
or not self._config.multi_query) else 1
|
76 |
+
|
77 |
+
@property
|
78 |
+
def activation_dtype(self) -> DtypeEnum:
|
79 |
+
if self._config.torch_dtype == torch.float16:
|
80 |
+
return DtypeEnum.fp16
|
81 |
+
elif self._config.torch_dtype == torch.bfloat16:
|
82 |
+
return DtypeEnum.bf16
|
83 |
+
else:
|
84 |
+
raise NotImplementedError("Only fp16 and bf16 are supported")
|
85 |
+
|
86 |
+
@property
|
87 |
+
def mlp_activation_fn(self) -> ActivationType:
|
88 |
+
return ActivationType.GELU
|
89 |
+
|
90 |
+
@property
|
91 |
+
def norm_type(self) -> NormTypeEnum:
|
92 |
+
return NormTypeEnum.LayerNorm
|
93 |
+
|
94 |
+
@property
|
95 |
+
def positional_embedding_type(self) -> PositionalEmbeddingType:
|
96 |
+
return PositionalEmbeddingType.rotate_half
|
97 |
+
|
98 |
+
@property
|
99 |
+
def positional_embedding_config(self) -> RotateHalfConfig:
|
100 |
+
"""
|
101 |
+
The positional embedding configuration for the model.
|
102 |
+
"""
|
103 |
+
return RotateHalfConfig()
|
104 |
+
|
105 |
+
"""
|
106 |
+
Forward implementations
|
107 |
+
"""
|
108 |
+
|
109 |
+
def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor:
|
110 |
+
"""
|
111 |
+
Performs the embedding lookup prior to running the transformer of the model.
|
112 |
+
|
113 |
+
Arguments:
|
114 |
+
ragged_batch (RaggedBatchWrapper): The batch to embed.
|
115 |
+
|
116 |
+
Returns:
|
117 |
+
torch.Tensor: The embedded batch.
|
118 |
+
"""
|
119 |
+
embed = self.embed(ragged_batch, self._non_transformer.word_emb)
|
120 |
+
|
121 |
+
if embed.shape[-1] != self.model_dim:
|
122 |
+
raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}")
|
123 |
+
|
124 |
+
return embed
|
125 |
+
|
126 |
+
def _forward_transformer_layer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor,
|
127 |
+
ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]:
|
128 |
+
"""
|
129 |
+
Executes one (slightly offset) layer of the transformer. This implementation does a peak-ahead
|
130 |
+
optimization to fuse the layer norm of the next layer into the current layer.
|
131 |
+
|
132 |
+
Arguments:
|
133 |
+
layer_idx (int): The index of the layer to execute.
|
134 |
+
residual (torch.Tensor): The residual tensor from the previous layer.
|
135 |
+
hidden_states (torch.Tensor): The hidden states from the previous layer. This is the
|
136 |
+
hidden states after pre normalization.
|
137 |
+
ragged_batch_info (RaggedBatchWrapper): The batch metadata.
|
138 |
+
"""
|
139 |
+
assert self.config.parallel_attn, "Only parallel attention implementation is supported"
|
140 |
+
|
141 |
+
cur_params = self._transformer[layer_idx]
|
142 |
+
kv_cache = self.state_manager.get_cache(layer_idx)
|
143 |
+
|
144 |
+
attn_ln_out = hidden_states
|
145 |
+
attn_hidden_state = self.qkv(attn_ln_out, cur_params.qkv_w, b=None)
|
146 |
+
attn_hidden_state = self.attn(attn_hidden_state, kv_cache, ragged_batch_info)
|
147 |
+
attention_output = self.attn_out(attn_hidden_state, cur_params.attn_out_w, b=None)
|
148 |
+
|
149 |
+
if self.config.new_decoder_architecture:
|
150 |
+
residual, mlp_ln_out = self.norm(residual,
|
151 |
+
None,
|
152 |
+
gamma=cur_params.ln_mlp_gamma,
|
153 |
+
beta=cur_params.ln_mlp_beta)
|
154 |
+
else:
|
155 |
+
mlp_ln_out = hidden_states
|
156 |
+
|
157 |
+
mlp_hidden_state = self.mlp_1(mlp_ln_out, cur_params.mlp_1_w, b=None)
|
158 |
+
mlp_output = self.mlp_2(mlp_hidden_state, cur_params.mlp_2_w, b=None)
|
159 |
+
|
160 |
+
mlp_output.add_(attention_output)
|
161 |
+
|
162 |
+
if self.tp_size > 1:
|
163 |
+
dist.all_reduce(mlp_output, group=self._base_mp_group)
|
164 |
+
|
165 |
+
if layer_idx != self.num_layers - 1:
|
166 |
+
next_params = self._transformer[layer_idx + 1]
|
167 |
+
residual, mlp_output = self.norm(residual,
|
168 |
+
mlp_output,
|
169 |
+
next_params.ln_attn_gamma,
|
170 |
+
beta=next_params.ln_attn_beta)
|
171 |
+
else:
|
172 |
+
# On last layer, we just need to perform the residual add. Adding into the residual
|
173 |
+
# here is safe.
|
174 |
+
residual.add_(mlp_output)
|
175 |
+
|
176 |
+
return residual, mlp_output
|
177 |
+
|
178 |
+
def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor:
|
179 |
+
"""
|
180 |
+
Performs unembedding of the hidden states to logits. This will only sample the final
|
181 |
+
token of each sequence.
|
182 |
+
"""
|
183 |
+
logits = self.unembed(hidden_states,
|
184 |
+
self._non_transformer.word_unembed,
|
185 |
+
ragged_batch_info,
|
186 |
+
gamma=self._non_transformer.final_norm_gamma,
|
187 |
+
beta=self._non_transformer.final_norm_beta)
|
188 |
+
|
189 |
+
if self.tp_size > 1:
|
190 |
+
comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1]))
|
191 |
+
full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size))
|
192 |
+
|
193 |
+
dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group)
|
194 |
+
|
195 |
+
full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size))
|
196 |
+
|
197 |
+
return full_logits
|
198 |
+
else:
|
199 |
+
return logits
|
200 |
+
|
201 |
+
def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor:
|
202 |
+
residual = self._forward_embed(wrapped_batch)
|
203 |
+
|
204 |
+
residual, hidden_states = self.norm(residual,
|
205 |
+
None,
|
206 |
+
gamma=self._transformer[0].ln_attn_gamma,
|
207 |
+
beta=self._transformer[0].ln_attn_beta)
|
208 |
+
|
209 |
+
for layer_idx in range(self.num_layers):
|
210 |
+
residual, hidden_states = self._forward_transformer_layer(layer_idx, residual, hidden_states,
|
211 |
+
wrapped_batch)
|
212 |
+
|
213 |
+
return self._forward_unembed(residual, wrapped_batch)
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (264 Bytes). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/model.cpython-310.pyc
ADDED
Binary file (6.84 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/policy.cpython-310.pyc
ADDED
Binary file (1.73 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
from .policy import MixtralPolicy
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (264 Bytes). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/container.cpython-310.pyc
ADDED
Binary file (1.88 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/model.cpython-310.pyc
ADDED
Binary file (8.84 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/policy.cpython-310.pyc
ADDED
Binary file (1.56 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/container.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
# Create a container object to save model-specific tensors using the policy file above.
|
7 |
+
|
8 |
+
from deepspeed.inference.v2.model_implementations.common_parameters import *
|
9 |
+
from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer
|
10 |
+
|
11 |
+
|
12 |
+
class MixtralTransformerContainer(LayerContainer):
|
13 |
+
|
14 |
+
qkv_w: UnfusedQKVParameter
|
15 |
+
attn_out_w: AttentionOutputParameter
|
16 |
+
moe_gate: MoEGatingWeightParameter
|
17 |
+
moe_mlp_1: UnfusedMoEGatedMLPParameter
|
18 |
+
moe_mlp_2: UnfusedMoEMLP2Parameter
|
19 |
+
attn_norm_gamma: NormParameter
|
20 |
+
mlp_norm_gamma: NormParameter
|
21 |
+
|
22 |
+
PARAM_MAPPING = {
|
23 |
+
"input_layernorm.weight": "attn_norm_gamma.params",
|
24 |
+
"post_attention_layernorm.weight": "mlp_norm_gamma.params",
|
25 |
+
"self_attn.q_proj.weight": "qkv_w.q_params",
|
26 |
+
"self_attn.k_proj.weight": "qkv_w.k_params",
|
27 |
+
"self_attn.v_proj.weight": "qkv_w.v_params",
|
28 |
+
"self_attn.o_proj.weight": "attn_out_w.params",
|
29 |
+
"block_sparse_moe.gate.weight": "moe_gate.params",
|
30 |
+
"block_sparse_moe.experts.*.w1.weight": "moe_mlp_1.gating_experts",
|
31 |
+
"block_sparse_moe.experts.*.w3.weight": "moe_mlp_1.up_experts",
|
32 |
+
"block_sparse_moe.experts.*.w2.weight": "moe_mlp_2.experts",
|
33 |
+
}
|
34 |
+
|
35 |
+
|
36 |
+
class MixtralNonTransformerContainer(LayerContainer):
|
37 |
+
|
38 |
+
word_emb: EmbeddingParameter
|
39 |
+
word_unembed: UnembedParameter
|
40 |
+
final_norm: NormParameter
|
41 |
+
|
42 |
+
PARAM_MAPPING = {
|
43 |
+
"model.embed_tokens.weight": "word_emb.params",
|
44 |
+
"lm_head.weight": "word_unembed.params",
|
45 |
+
"model.norm.weight": "final_norm.params",
|
46 |
+
}
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/model.py
ADDED
@@ -0,0 +1,261 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
from typing import Iterable, Optional, Tuple
|
7 |
+
|
8 |
+
import torch
|
9 |
+
|
10 |
+
import deepspeed.comm as dist
|
11 |
+
|
12 |
+
from ...allocator import empty_from
|
13 |
+
from ...config_v2 import RaggedInferenceEngineConfig
|
14 |
+
from ...inference_utils import ActivationType, DtypeEnum
|
15 |
+
from ...model_implementations import *
|
16 |
+
from ...modules.configs import *
|
17 |
+
from ...modules.interfaces import *
|
18 |
+
from ...ragged import RaggedBatchWrapper
|
19 |
+
from ..inference_model_base import (
|
20 |
+
DSModelImplementationConfig,
|
21 |
+
MPType,
|
22 |
+
)
|
23 |
+
|
24 |
+
from .container import MixtralNonTransformerContainer, MixtralTransformerContainer
|
25 |
+
|
26 |
+
|
27 |
+
class MixtralInferenceModel(DSMoETransformerModelBase):
|
28 |
+
"""
|
29 |
+
Inference model implementation for Mixtral models.
|
30 |
+
"""
|
31 |
+
|
32 |
+
_non_transformer: Optional[MixtralNonTransformerContainer]
|
33 |
+
"""
|
34 |
+
Embed + unembed container. Specializing the type annotation.
|
35 |
+
"""
|
36 |
+
|
37 |
+
_transformer: Optional[Iterable[MixtralTransformerContainer]]
|
38 |
+
"""
|
39 |
+
Per-layer transformer container. Specializing the type annotation.
|
40 |
+
"""
|
41 |
+
"""
|
42 |
+
Properties ineherited from `DSInferenceModelBase`
|
43 |
+
"""
|
44 |
+
|
45 |
+
@property
|
46 |
+
def max_sequence_length(self) -> int:
|
47 |
+
return self._config.max_position_embeddings
|
48 |
+
|
49 |
+
"""
|
50 |
+
Properties ineherited from `DSTransformerModelBase`
|
51 |
+
"""
|
52 |
+
|
53 |
+
@property
|
54 |
+
def num_layers(self) -> int:
|
55 |
+
return self._config.num_hidden_layers
|
56 |
+
|
57 |
+
@property
|
58 |
+
def model_dim(self) -> int:
|
59 |
+
return self._config.hidden_size
|
60 |
+
|
61 |
+
@property
|
62 |
+
def vocab_size(self) -> int:
|
63 |
+
return self._config.vocab_size
|
64 |
+
|
65 |
+
@property
|
66 |
+
def head_size(self) -> int:
|
67 |
+
return self.model_dim // self.n_heads
|
68 |
+
|
69 |
+
@property
|
70 |
+
def n_heads(self) -> int:
|
71 |
+
return self._config.num_attention_heads
|
72 |
+
|
73 |
+
@property
|
74 |
+
def intermediate_dim(self) -> int:
|
75 |
+
return self._config.intermediate_size
|
76 |
+
|
77 |
+
@property
|
78 |
+
def n_heads_kv(self) -> int:
|
79 |
+
return self._config.num_key_value_heads
|
80 |
+
|
81 |
+
@property
|
82 |
+
def activation_dtype(self) -> DtypeEnum:
|
83 |
+
if self._config.torch_dtype == torch.float16:
|
84 |
+
return DtypeEnum.fp16
|
85 |
+
elif self._config.torch_dtype == torch.bfloat16:
|
86 |
+
return DtypeEnum.bf16
|
87 |
+
else:
|
88 |
+
raise NotImplementedError("Only fp16 and bf16 are supported")
|
89 |
+
|
90 |
+
@property
|
91 |
+
def mlp_activation_fn(self) -> ActivationType:
|
92 |
+
activation = self._config.hidden_act.lower()
|
93 |
+
if activation == "gelu":
|
94 |
+
return ActivationType.GEGLU
|
95 |
+
elif activation == "relu":
|
96 |
+
return ActivationType.ReGLU
|
97 |
+
elif activation == "gegelu":
|
98 |
+
return ActivationType.GEGLU
|
99 |
+
elif activation == "silu":
|
100 |
+
return ActivationType.SiGLU
|
101 |
+
else:
|
102 |
+
raise NotImplementedError(f"Activation {activation} not supported")
|
103 |
+
|
104 |
+
@property
|
105 |
+
def norm_type(self) -> NormTypeEnum:
|
106 |
+
return NormTypeEnum.RMSNorm
|
107 |
+
|
108 |
+
@property
|
109 |
+
def positional_embedding_type(self) -> PositionalEmbeddingType:
|
110 |
+
return PositionalEmbeddingType.rotate_half
|
111 |
+
|
112 |
+
@property
|
113 |
+
def positional_embedding_config(self) -> Optional[RotateHalfConfig]:
|
114 |
+
"""
|
115 |
+
The positional embedding configuration for the model.
|
116 |
+
"""
|
117 |
+
return RotateHalfConfig(theta_base=self._config.rope_theta)
|
118 |
+
|
119 |
+
"""
|
120 |
+
Inherited from `DSMoETransformerModelBase`
|
121 |
+
"""
|
122 |
+
|
123 |
+
@property
|
124 |
+
def n_experts(self) -> int:
|
125 |
+
return self._config.num_local_experts
|
126 |
+
|
127 |
+
@property
|
128 |
+
def n_top_k(self) -> int:
|
129 |
+
return self._config.num_experts_per_tok
|
130 |
+
|
131 |
+
@property
|
132 |
+
def normalize_expert_scores(self) -> bool:
|
133 |
+
return True
|
134 |
+
|
135 |
+
"""
|
136 |
+
Model implementation
|
137 |
+
"""
|
138 |
+
|
139 |
+
def __init__(self, config: DSModelImplementationConfig, engine_config: RaggedInferenceEngineConfig,
|
140 |
+
base_mp_group: MPType) -> None:
|
141 |
+
"""
|
142 |
+
Base implementation for initialization. By default, this will initialize
|
143 |
+
the traditional components of a transformer model:
|
144 |
+
- Embedding
|
145 |
+
- QKV projection
|
146 |
+
- Self attention
|
147 |
+
- Attention output projection
|
148 |
+
- Feed forward network
|
149 |
+
- Normalization
|
150 |
+
- Unembedding
|
151 |
+
|
152 |
+
Arguments:
|
153 |
+
config (DSModelImplementationConfig): Model-specific configuration. No assumptions
|
154 |
+
should be made about this config that are not closely tied to the specific
|
155 |
+
model implementation.
|
156 |
+
engine_config (RaggedInferenceEngineConfig): Engine configuration.
|
157 |
+
base_mp_group (MPType): Base communication group for Tensor-parallel inference.
|
158 |
+
"""
|
159 |
+
super().__init__(config, engine_config, base_mp_group)
|
160 |
+
|
161 |
+
self.make_norm_layer()
|
162 |
+
self.make_qkv_layer()
|
163 |
+
self.make_attn_layer()
|
164 |
+
self.make_attn_out_layer()
|
165 |
+
self.make_moe_layer()
|
166 |
+
self.make_embedding_layer()
|
167 |
+
self.make_unembedding_layer()
|
168 |
+
self._kv_cache_config = None
|
169 |
+
|
170 |
+
def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor:
|
171 |
+
"""
|
172 |
+
Performs the embedding lookup prior to running the transformer of the model.
|
173 |
+
|
174 |
+
Arguments:
|
175 |
+
ragged_batch (RaggedBatchWrapper): The batch to embed.
|
176 |
+
|
177 |
+
Returns:
|
178 |
+
torch.Tensor: The embedded batch.
|
179 |
+
"""
|
180 |
+
embed = self.embed(ragged_batch, self._non_transformer.word_emb)
|
181 |
+
|
182 |
+
if embed.shape[-1] != self.model_dim:
|
183 |
+
raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}")
|
184 |
+
|
185 |
+
return embed
|
186 |
+
|
187 |
+
def _forward_transformer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor,
|
188 |
+
ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]:
|
189 |
+
"""
|
190 |
+
Executes one (slightly offset) layer of the transformer. This implementation does a peak-ahead
|
191 |
+
optimization to fuse the layer norm of the next layer into the current layer.
|
192 |
+
|
193 |
+
Arguments:
|
194 |
+
layer_idx (int): The index of the layer to execute.
|
195 |
+
residual (torch.Tensor): The residual tensor from the previous layer.
|
196 |
+
hidden_states (torch.Tensor): The hidden states from the previous layer. This is the
|
197 |
+
hidden states after pre normalization.
|
198 |
+
ragged_batch_info (RaggedBatchWrapper): The batch metadata.
|
199 |
+
"""
|
200 |
+
# TODO(cmikeh2): Distribute ragged_batch_info to all modules
|
201 |
+
|
202 |
+
cur_params = self._transformer[layer_idx]
|
203 |
+
kv_cache = self.state_manager.get_cache(layer_idx)
|
204 |
+
|
205 |
+
hidden_states = self.qkv(hidden_states, cur_params.qkv_w)
|
206 |
+
hidden_states = self.attn(hidden_states, kv_cache, ragged_batch_info)
|
207 |
+
hidden_states = self.attn_out(hidden_states, cur_params.attn_out_w)
|
208 |
+
|
209 |
+
if self.tp_size > 1:
|
210 |
+
dist.all_reduce(hidden_states, group=self._base_mp_group)
|
211 |
+
|
212 |
+
residual, hidden_states = self.norm(residual, hidden_states, cur_params.mlp_norm_gamma)
|
213 |
+
|
214 |
+
hidden_states = self.moe(hidden_states, ragged_batch_info, cur_params.moe_gate, cur_params.moe_mlp_1,
|
215 |
+
cur_params.moe_mlp_2)
|
216 |
+
|
217 |
+
if self.tp_size > 1:
|
218 |
+
dist.all_reduce(hidden_states, group=self._base_mp_group)
|
219 |
+
|
220 |
+
if layer_idx != self.num_layers - 1:
|
221 |
+
next_params = self._transformer[layer_idx + 1]
|
222 |
+
residual, hidden_states = self.norm(residual, hidden_states, next_params.attn_norm_gamma)
|
223 |
+
else:
|
224 |
+
# On last layer, we just need to perform the residual add. Adding into the residual
|
225 |
+
# here is safe.
|
226 |
+
residual.add_(hidden_states)
|
227 |
+
|
228 |
+
return residual, hidden_states
|
229 |
+
|
230 |
+
def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor:
|
231 |
+
"""
|
232 |
+
Performs unembedding of the hidden states to logits. This will only sample the final
|
233 |
+
token of each sequence.
|
234 |
+
"""
|
235 |
+
logits = self.unembed(hidden_states,
|
236 |
+
self._non_transformer.word_unembed,
|
237 |
+
ragged_batch_info,
|
238 |
+
gamma=self._non_transformer.final_norm)
|
239 |
+
|
240 |
+
if self.tp_size > 1:
|
241 |
+
comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1]))
|
242 |
+
full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size))
|
243 |
+
|
244 |
+
dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group)
|
245 |
+
|
246 |
+
full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size))
|
247 |
+
|
248 |
+
return full_logits
|
249 |
+
else:
|
250 |
+
return logits
|
251 |
+
|
252 |
+
def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor:
|
253 |
+
|
254 |
+
residual = self._forward_embed(wrapped_batch)
|
255 |
+
|
256 |
+
residual, hidden_states = self.norm(residual, None, self._transformer[0].attn_norm_gamma, beta=None)
|
257 |
+
|
258 |
+
for layer_idx in range(self.num_layers):
|
259 |
+
residual, hidden_states = self._forward_transformer(layer_idx, residual, hidden_states, wrapped_batch)
|
260 |
+
|
261 |
+
return self._forward_unembed(residual, wrapped_batch)
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/policy.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
from typing import Any
|
7 |
+
|
8 |
+
from ...config_v2 import RaggedInferenceEngineConfig
|
9 |
+
from ..inference_policy_base import ContainerMap, InferenceV2Policy
|
10 |
+
from .container import MixtralTransformerContainer, MixtralNonTransformerContainer
|
11 |
+
from .model import MixtralInferenceModel
|
12 |
+
|
13 |
+
|
14 |
+
class MixtralPolicy(InferenceV2Policy):
|
15 |
+
|
16 |
+
def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> MixtralInferenceModel:
|
17 |
+
return MixtralInferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group)
|
18 |
+
|
19 |
+
def build_container_map(self) -> ContainerMap:
|
20 |
+
|
21 |
+
map = ContainerMap()
|
22 |
+
|
23 |
+
transformer_containers = [MixtralTransformerContainer(self.model) for _ in range(self.model.num_layers)]
|
24 |
+
|
25 |
+
map.set_transformer_params(['model.layers'], transformer_containers)
|
26 |
+
|
27 |
+
map.set_non_transformer_params(MixtralNonTransformerContainer(self.model))
|
28 |
+
|
29 |
+
map.set_unmapped_params([])
|
30 |
+
|
31 |
+
return map
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
from .policy import OPTPolicy
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/container.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
# Create a container object to save model-specific tensors using the policy file above.
|
7 |
+
|
8 |
+
from ..common_parameters import *
|
9 |
+
from ..layer_container_base import LayerContainer
|
10 |
+
'''
|
11 |
+
# HF OPT model looks like this:
|
12 |
+
|
13 |
+
OPTForCausalLM(
|
14 |
+
(model): OPTModel(
|
15 |
+
(decoder): OPTDecoder(
|
16 |
+
(embed_tokens): Embedding(50272, 768, padding_idx=1)
|
17 |
+
(embed_positions): OPTLearnedPositionalEmbedding(2050, 768)
|
18 |
+
(final_layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
19 |
+
(layers): ModuleList(
|
20 |
+
(0-11): 12 x OPTDecoderLayer(
|
21 |
+
(self_attn): OPTAttention(
|
22 |
+
(k_proj): Linear(in_features=768, out_features=768, bias=True)
|
23 |
+
(v_proj): Linear(in_features=768, out_features=768, bias=True)
|
24 |
+
(q_proj): Linear(in_features=768, out_features=768, bias=True)
|
25 |
+
(out_proj): Linear(in_features=768, out_features=768, bias=True)
|
26 |
+
)
|
27 |
+
(activation_fn): ReLU()
|
28 |
+
(self_attn_layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
29 |
+
(fc1): Linear(in_features=768, out_features=3072, bias=True)
|
30 |
+
(fc2): Linear(in_features=3072, out_features=768, bias=True)
|
31 |
+
(final_layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)
|
32 |
+
)
|
33 |
+
)
|
34 |
+
)
|
35 |
+
)
|
36 |
+
(lm_head): Linear(in_features=768, out_features=50272, bias=False)
|
37 |
+
)
|
38 |
+
|
39 |
+
'''
|
40 |
+
|
41 |
+
|
42 |
+
class OPTTransformerContainer(LayerContainer):
|
43 |
+
"""
|
44 |
+
Transformer layer container for the OPT model.
|
45 |
+
"""
|
46 |
+
qkv_w: UnfusedQKVParameter
|
47 |
+
qkv_b: UnfusedQKVParameter
|
48 |
+
attn_out_w: AttentionOutputParameter
|
49 |
+
attn_out_b: AttentionOutputParameter
|
50 |
+
mlp_1_w: MLP1Parameter
|
51 |
+
mlp_1_b: MLP1Parameter
|
52 |
+
mlp_2_w: MLP2Parameter
|
53 |
+
mlp_2_b: MLP2Parameter
|
54 |
+
attn_norm_beta: NormParameter
|
55 |
+
attn_norm_gamma: NormParameter
|
56 |
+
mlp_norm_beta: NormParameter
|
57 |
+
mlp_norm_gamma: NormParameter
|
58 |
+
|
59 |
+
PARAM_MAPPING = {
|
60 |
+
"self_attn.q_proj.weight": "qkv_w.q_params",
|
61 |
+
"self_attn.q_proj.bias": "qkv_b.q_params",
|
62 |
+
"self_attn.k_proj.weight": "qkv_w.k_params",
|
63 |
+
"self_attn.k_proj.bias": "qkv_b.k_params",
|
64 |
+
"self_attn.v_proj.weight": "qkv_w.v_params",
|
65 |
+
"self_attn.v_proj.bias": "qkv_b.v_params",
|
66 |
+
"self_attn.out_proj.weight": "attn_out_w.params",
|
67 |
+
"self_attn.out_proj.bias": "attn_out_b.params",
|
68 |
+
"fc1.weight": "mlp_1_w.params",
|
69 |
+
"fc1.bias": "mlp_1_b.params",
|
70 |
+
"fc2.weight": "mlp_2_w.params",
|
71 |
+
"fc2.bias": "mlp_2_b.params",
|
72 |
+
"self_attn_layer_norm.weight": "attn_norm_gamma.params",
|
73 |
+
"self_attn_layer_norm.bias": "attn_norm_beta.params",
|
74 |
+
"final_layer_norm.weight": "mlp_norm_gamma.params",
|
75 |
+
"final_layer_norm.bias": "mlp_norm_beta.params",
|
76 |
+
}
|
77 |
+
|
78 |
+
|
79 |
+
class OPTNonTransformerContainer(LayerContainer):
|
80 |
+
"""
|
81 |
+
Non-Transformer layer container for the OPT model.
|
82 |
+
"""
|
83 |
+
word_emb: EmbeddingParameter
|
84 |
+
word_emb_pos: EmbeddingParameter
|
85 |
+
word_unembed: UnembedParameter
|
86 |
+
final_norm_w: NormParameter
|
87 |
+
final_norm_b: NormParameter
|
88 |
+
|
89 |
+
PARAM_MAPPING = {
|
90 |
+
"*decoder.embed_tokens.weight": ["word_emb.params", "word_unembed.params"],
|
91 |
+
"*decoder.embed_positions.weight": "word_emb_pos.params",
|
92 |
+
"*decoder.final_layer_norm.weight": "final_norm_w.params",
|
93 |
+
"*decoder.final_layer_norm.bias": "final_norm_b.params",
|
94 |
+
}
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/model.py
ADDED
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
from typing import Iterable, Optional, Tuple
|
7 |
+
|
8 |
+
import torch
|
9 |
+
|
10 |
+
import deepspeed.comm as dist
|
11 |
+
|
12 |
+
from ...allocator import empty_from
|
13 |
+
from ...inference_utils import ActivationType, DtypeEnum
|
14 |
+
from ...model_implementations import *
|
15 |
+
from ...modules.configs import *
|
16 |
+
from ...ragged import RaggedBatchWrapper
|
17 |
+
from .container import OPTNonTransformerContainer, OPTTransformerContainer
|
18 |
+
|
19 |
+
from ...modules.heuristics import instantiate_embed
|
20 |
+
|
21 |
+
|
22 |
+
class OPTInferenceModel(DSTransformerModelBase):
|
23 |
+
"""
|
24 |
+
Inference model implementation for ragged batching for OPT models.
|
25 |
+
"""
|
26 |
+
|
27 |
+
_non_transformer: Optional[OPTNonTransformerContainer]
|
28 |
+
"""
|
29 |
+
Embed + unembed container. Specializing the type annotation.
|
30 |
+
"""
|
31 |
+
|
32 |
+
_transformer: Optional[Iterable[OPTTransformerContainer]]
|
33 |
+
"""
|
34 |
+
Per-layer transformer container. Specializing the type annotation.
|
35 |
+
"""
|
36 |
+
"""
|
37 |
+
Properties ineherited from `DSInferenceModelBase`
|
38 |
+
"""
|
39 |
+
|
40 |
+
@property
|
41 |
+
def max_sequence_length(self) -> int:
|
42 |
+
return self._config.max_seq_length
|
43 |
+
|
44 |
+
"""
|
45 |
+
Properties ineherited from `DSTransformerModelBase`
|
46 |
+
"""
|
47 |
+
|
48 |
+
@property
|
49 |
+
def num_layers(self) -> int:
|
50 |
+
return self._config.num_hidden_layers
|
51 |
+
|
52 |
+
@property
|
53 |
+
def model_dim(self) -> int:
|
54 |
+
return self._config.hidden_size
|
55 |
+
|
56 |
+
@property
|
57 |
+
def vocab_size(self) -> int:
|
58 |
+
return self._config.vocab_size
|
59 |
+
|
60 |
+
@property
|
61 |
+
def head_size(self) -> int:
|
62 |
+
return self.model_dim // self.n_heads
|
63 |
+
|
64 |
+
@property
|
65 |
+
def n_heads(self) -> int:
|
66 |
+
return self._config.num_attention_heads
|
67 |
+
|
68 |
+
@property
|
69 |
+
def intermediate_dim(self) -> int:
|
70 |
+
return self._config.ffn_dim
|
71 |
+
|
72 |
+
@property
|
73 |
+
def activation_dtype(self) -> DtypeEnum:
|
74 |
+
if self._config.torch_dtype == torch.float16:
|
75 |
+
return DtypeEnum.fp16
|
76 |
+
elif self._config.torch_dtype == torch.bfloat16:
|
77 |
+
return DtypeEnum.bf16
|
78 |
+
else:
|
79 |
+
raise NotImplementedError("Only fp16 and bf16 are supported")
|
80 |
+
|
81 |
+
@property
|
82 |
+
def mlp_activation_fn(self) -> ActivationType:
|
83 |
+
return ActivationType.RELU
|
84 |
+
|
85 |
+
@property
|
86 |
+
def norm_type(self) -> NormTypeEnum:
|
87 |
+
return NormTypeEnum.LayerNorm
|
88 |
+
|
89 |
+
@property
|
90 |
+
def positional_embedding_type(self) -> PositionalEmbeddingType:
|
91 |
+
return PositionalEmbeddingType.none
|
92 |
+
|
93 |
+
@property
|
94 |
+
def positional_embedding_config(self) -> Optional[RotateHalfConfig]:
|
95 |
+
return None
|
96 |
+
|
97 |
+
"""
|
98 |
+
Overrides of ``DSTransformerModelBase`` methods
|
99 |
+
"""
|
100 |
+
|
101 |
+
def make_embedding_layer(self) -> None:
|
102 |
+
"""
|
103 |
+
Performs setup and creates embedding DSModule. Since OPT includes trained
|
104 |
+
positional embeddings, we will override the base model implementation.
|
105 |
+
"""
|
106 |
+
|
107 |
+
embed_config = DSEmbeddingsConfig(max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
|
108 |
+
residual_dtype=self.activation_dtype,
|
109 |
+
embedding_dim=self.model_dim,
|
110 |
+
positional_embedding=True,
|
111 |
+
positional_offset=2)
|
112 |
+
|
113 |
+
self.embed = instantiate_embed(embed_config, self._engine_config)
|
114 |
+
|
115 |
+
"""
|
116 |
+
Forward implementations
|
117 |
+
"""
|
118 |
+
|
119 |
+
def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor:
|
120 |
+
embed = self.embed(ragged_batch, self._non_transformer.word_emb, self._non_transformer.word_emb_pos)
|
121 |
+
if embed.shape[-1] != self.model_dim:
|
122 |
+
raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}")
|
123 |
+
|
124 |
+
return embed
|
125 |
+
|
126 |
+
def _forward_transformer_layer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor,
|
127 |
+
ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]:
|
128 |
+
# TODO(cmikeh2): Distribute ragged_batch_info to all modules
|
129 |
+
|
130 |
+
cur_params = self._transformer[layer_idx]
|
131 |
+
kv_cache = self.state_manager.get_cache(layer_idx)
|
132 |
+
|
133 |
+
hidden_states = self.qkv(hidden_states, cur_params.qkv_w, b=cur_params.qkv_b)
|
134 |
+
hidden_states = self.attn(hidden_states, kv_cache, ragged_batch_info)
|
135 |
+
hidden_states = self.attn_out(hidden_states, cur_params.attn_out_w, b=cur_params.attn_out_b)
|
136 |
+
|
137 |
+
if self.tp_size > 1:
|
138 |
+
dist.all_reduce(hidden_states, group=self._base_mp_group)
|
139 |
+
|
140 |
+
residual, hidden_states = self.norm(residual,
|
141 |
+
hidden_states,
|
142 |
+
cur_params.mlp_norm_gamma,
|
143 |
+
beta=cur_params.mlp_norm_beta)
|
144 |
+
|
145 |
+
# Should be configurable in the future
|
146 |
+
hidden_states = self.mlp_1(hidden_states, cur_params.mlp_1_w, b=cur_params.mlp_1_b)
|
147 |
+
hidden_states = self.mlp_2(hidden_states, cur_params.mlp_2_w, b=cur_params.mlp_2_b)
|
148 |
+
|
149 |
+
if self.tp_size > 1:
|
150 |
+
dist.all_reduce(hidden_states, group=self._base_mp_group)
|
151 |
+
|
152 |
+
if layer_idx != self.num_layers - 1:
|
153 |
+
next_params = self._transformer[layer_idx + 1]
|
154 |
+
residual, hidden_states = self.norm(residual,
|
155 |
+
hidden_states,
|
156 |
+
next_params.attn_norm_gamma,
|
157 |
+
beta=next_params.attn_norm_beta)
|
158 |
+
else:
|
159 |
+
# On last layer, we just need to perform the residual add. Adding into the residual
|
160 |
+
# here is safe.
|
161 |
+
residual.add_(hidden_states)
|
162 |
+
|
163 |
+
return residual, hidden_states
|
164 |
+
|
165 |
+
def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor:
|
166 |
+
logits = self.unembed(hidden_states,
|
167 |
+
self._non_transformer.word_unembed,
|
168 |
+
ragged_batch_info,
|
169 |
+
gamma=self._non_transformer.final_norm_w,
|
170 |
+
beta=self._non_transformer.final_norm_b)
|
171 |
+
|
172 |
+
if self.tp_size > 1:
|
173 |
+
comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1]))
|
174 |
+
full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size))
|
175 |
+
|
176 |
+
dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group)
|
177 |
+
|
178 |
+
full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size))
|
179 |
+
|
180 |
+
return full_logits
|
181 |
+
else:
|
182 |
+
return logits
|
183 |
+
|
184 |
+
def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor:
|
185 |
+
|
186 |
+
residual = self._forward_embed(wrapped_batch)
|
187 |
+
|
188 |
+
residual, hidden_states = self.norm(residual,
|
189 |
+
None,
|
190 |
+
self._transformer[0].attn_norm_gamma,
|
191 |
+
beta=self._transformer[0].attn_norm_beta)
|
192 |
+
|
193 |
+
for layer_idx in range(self.num_layers):
|
194 |
+
residual, hidden_states = self._forward_transformer_layer(layer_idx, residual, hidden_states,
|
195 |
+
wrapped_batch)
|
196 |
+
|
197 |
+
return self._forward_unembed(residual, wrapped_batch)
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/policy.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
from typing import Any
|
7 |
+
|
8 |
+
from ...config_v2 import RaggedInferenceEngineConfig
|
9 |
+
from ..inference_policy_base import ContainerMap, InferenceV2Policy
|
10 |
+
from .container import OPTNonTransformerContainer, OPTTransformerContainer
|
11 |
+
from .model import OPTInferenceModel
|
12 |
+
|
13 |
+
|
14 |
+
class OPTPolicy(InferenceV2Policy):
|
15 |
+
|
16 |
+
def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> OPTInferenceModel:
|
17 |
+
return OPTInferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group)
|
18 |
+
|
19 |
+
def build_container_map(self) -> ContainerMap:
|
20 |
+
map = ContainerMap()
|
21 |
+
|
22 |
+
transformer_containers = [OPTTransformerContainer(self.model) for _ in range(self.model.num_layers)]
|
23 |
+
|
24 |
+
map.set_transformer_params(['model.decoder.layers', 'decoder.layers'], transformer_containers)
|
25 |
+
|
26 |
+
map.set_non_transformer_params(OPTNonTransformerContainer(self.model))
|
27 |
+
|
28 |
+
map.set_unmapped_params(['lm_head.weight'])
|
29 |
+
|
30 |
+
return map
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
from .policy import PhiPolicy
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (256 Bytes). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__pycache__/containers.cpython-310.pyc
ADDED
Binary file (2.15 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__pycache__/model.cpython-310.pyc
ADDED
Binary file (6.83 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__pycache__/policy.cpython-310.pyc
ADDED
Binary file (1.73 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/containers.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
# Create a container object to save model-specific tensors using the policy file above.
|
7 |
+
|
8 |
+
from ..common_parameters import *
|
9 |
+
from ..layer_container_base import LayerContainer
|
10 |
+
'''
|
11 |
+
# HF Phi-2 model looks like this:
|
12 |
+
|
13 |
+
PhiForCausalLM(
|
14 |
+
(model): PhiModel(
|
15 |
+
(embed_tokens): Embedding(51200, 2560)
|
16 |
+
(embed_dropout): Dropout(p=0.0, inplace=False)
|
17 |
+
(layers): ModuleList(
|
18 |
+
(0-31): 32 x PhiDecoderLayer(
|
19 |
+
(self_attn): PhiAttention(
|
20 |
+
(q_proj): Linear(in_features=2560, out_features=2560, bias=True)
|
21 |
+
(k_proj): Linear(in_features=2560, out_features=2560, bias=True)
|
22 |
+
(v_proj): Linear(in_features=2560, out_features=2560, bias=True)
|
23 |
+
(dense): Linear(in_features=2560, out_features=2560, bias=True)
|
24 |
+
(rotary_emb): PhiRotaryEmbedding()
|
25 |
+
)
|
26 |
+
(mlp): PhiMLP(
|
27 |
+
(activation_fn): NewGELUActivation()
|
28 |
+
(fc1): Linear(in_features=2560, out_features=10240, bias=True)
|
29 |
+
(fc2): Linear(in_features=10240, out_features=2560, bias=True)
|
30 |
+
)
|
31 |
+
(input_layernorm): LayerNorm((2560,), eps=1e-05, elementwise_affine=True)
|
32 |
+
(resid_dropout): Dropout(p=0.1, inplace=False)
|
33 |
+
)
|
34 |
+
)
|
35 |
+
(final_layernorm): LayerNorm((2560,), eps=1e-05, elementwise_affine=True)
|
36 |
+
)
|
37 |
+
(lm_head): Linear(in_features=2560, out_features=51200, bias=True)
|
38 |
+
)
|
39 |
+
'''
|
40 |
+
|
41 |
+
|
42 |
+
class PhiTransformerContainer(LayerContainer):
|
43 |
+
"""
|
44 |
+
Transformer layer container for the Phi model.
|
45 |
+
"""
|
46 |
+
qkv_w: UnfusedQKVParameter
|
47 |
+
qkv_b: UnfusedQKVParameter
|
48 |
+
attn_out_w: AttentionOutputParameter
|
49 |
+
attn_out_b: AttentionOutputParameter
|
50 |
+
mlp_1_w: MLP1Parameter
|
51 |
+
mlp_1_b: MLP1Parameter
|
52 |
+
mlp_2_w: MLP2Parameter
|
53 |
+
mlp_2_b: MLP2Parameter
|
54 |
+
ln_gamma: NormParameter
|
55 |
+
ln_beta: NormParameter
|
56 |
+
|
57 |
+
PARAM_MAPPING = {
|
58 |
+
"self_attn.q_proj.weight": "qkv_w.q_params",
|
59 |
+
"self_attn.k_proj.weight": "qkv_w.k_params",
|
60 |
+
"self_attn.v_proj.weight": "qkv_w.v_params",
|
61 |
+
"self_attn.q_proj.bias": "qkv_b.q_params",
|
62 |
+
"self_attn.k_proj.bias": "qkv_b.k_params",
|
63 |
+
"self_attn.v_proj.bias": "qkv_b.v_params",
|
64 |
+
"self_attn.dense.weight": "attn_out_w.params",
|
65 |
+
"self_attn.dense.bias": "attn_out_b.params",
|
66 |
+
"mlp.fc1.weight": "mlp_1_w.params",
|
67 |
+
"mlp.fc1.bias": "mlp_1_b.params",
|
68 |
+
"mlp.fc2.weight": "mlp_2_w.params",
|
69 |
+
"mlp.fc2.bias": "mlp_2_b.params",
|
70 |
+
"input_layernorm.weight": "ln_gamma.params",
|
71 |
+
"input_layernorm.bias": "ln_beta.params",
|
72 |
+
}
|
73 |
+
|
74 |
+
|
75 |
+
class PhiNonTransformerContainer(LayerContainer):
|
76 |
+
"""
|
77 |
+
Non-Transformer layer container for the Phi model.
|
78 |
+
"""
|
79 |
+
word_emb: EmbeddingParameter
|
80 |
+
word_unembed_w: UnembedParameter
|
81 |
+
word_unembed_b: UnembedParameter
|
82 |
+
final_norm_gamma: NormParameter
|
83 |
+
final_norm_beta: NormParameter
|
84 |
+
|
85 |
+
PARAM_MAPPING = {
|
86 |
+
"model.embed_tokens.weight": "word_emb.params",
|
87 |
+
"model.final_layernorm.weight": "final_norm_gamma.params",
|
88 |
+
"model.final_layernorm.bias": "final_norm_beta.params",
|
89 |
+
"lm_head.weight": "word_unembed_w.params",
|
90 |
+
"lm_head.bias": "word_unembed_b.params",
|
91 |
+
}
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/model.py
ADDED
@@ -0,0 +1,199 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
from typing import Iterable, Optional, Tuple
|
7 |
+
|
8 |
+
import torch
|
9 |
+
|
10 |
+
import deepspeed.comm as dist
|
11 |
+
|
12 |
+
from ...allocator import empty_from
|
13 |
+
from ...inference_utils import ActivationType, DtypeEnum
|
14 |
+
from .. import *
|
15 |
+
from ...modules.configs import *
|
16 |
+
from ...modules.interfaces import *
|
17 |
+
from ...ragged import RaggedBatchWrapper
|
18 |
+
|
19 |
+
from .containers import PhiNonTransformerContainer, PhiTransformerContainer
|
20 |
+
|
21 |
+
|
22 |
+
class PhiInferenceModel(DSTransformerModelBase):
|
23 |
+
"""
|
24 |
+
Inference model implementation for ragged batching for Llama-2 models.
|
25 |
+
"""
|
26 |
+
|
27 |
+
_non_transformer: Optional[PhiNonTransformerContainer]
|
28 |
+
"""
|
29 |
+
Embed + unembed container. Specializing the type annotation.
|
30 |
+
"""
|
31 |
+
|
32 |
+
_transformer: Optional[Iterable[PhiTransformerContainer]]
|
33 |
+
"""
|
34 |
+
Per-layer transformer container. Specializing the type annotation.
|
35 |
+
"""
|
36 |
+
"""
|
37 |
+
Properties inherited from `DSInferenceModelBase`
|
38 |
+
"""
|
39 |
+
|
40 |
+
@property
|
41 |
+
def max_sequence_length(self) -> int:
|
42 |
+
return self._config.max_seq_length
|
43 |
+
|
44 |
+
"""
|
45 |
+
Properties inherited from `DSTransformerModelBase`
|
46 |
+
"""
|
47 |
+
|
48 |
+
@property
|
49 |
+
def num_layers(self) -> int:
|
50 |
+
return self._config.num_hidden_layers
|
51 |
+
|
52 |
+
@property
|
53 |
+
def model_dim(self) -> int:
|
54 |
+
return self._config.hidden_size
|
55 |
+
|
56 |
+
@property
|
57 |
+
def vocab_size(self) -> int:
|
58 |
+
return self._config.vocab_size
|
59 |
+
|
60 |
+
@property
|
61 |
+
def head_size(self) -> int:
|
62 |
+
return self.model_dim // self.n_heads
|
63 |
+
|
64 |
+
@property
|
65 |
+
def n_heads(self) -> int:
|
66 |
+
return self._config.num_attention_heads
|
67 |
+
|
68 |
+
@property
|
69 |
+
def intermediate_dim(self) -> int:
|
70 |
+
return self._config.intermediate_size
|
71 |
+
|
72 |
+
@property
|
73 |
+
def n_heads_kv(self) -> int:
|
74 |
+
return self._config.num_key_value_heads
|
75 |
+
|
76 |
+
@property
|
77 |
+
def activation_dtype(self) -> DtypeEnum:
|
78 |
+
if self._config.torch_dtype == torch.float16:
|
79 |
+
return DtypeEnum.fp16
|
80 |
+
elif self._config.torch_dtype == torch.bfloat16:
|
81 |
+
return DtypeEnum.bf16
|
82 |
+
else:
|
83 |
+
raise NotImplementedError("Only fp16 and bf16 are supported")
|
84 |
+
|
85 |
+
@property
|
86 |
+
def mlp_activation_fn(self) -> ActivationType:
|
87 |
+
return ActivationType.GELU
|
88 |
+
|
89 |
+
@property
|
90 |
+
def norm_type(self) -> NormTypeEnum:
|
91 |
+
return NormTypeEnum.LayerNorm
|
92 |
+
|
93 |
+
@property
|
94 |
+
def positional_embedding_type(self) -> PositionalEmbeddingType:
|
95 |
+
return PositionalEmbeddingType.rotate_half
|
96 |
+
|
97 |
+
@property
|
98 |
+
def positional_embedding_config(self) -> Optional[RotateHalfConfig]:
|
99 |
+
rotary_dim = int(self._config.partial_rotary_factor * self.head_size)
|
100 |
+
return RotateHalfConfig(rotate_dim=rotary_dim, theta_base=self._config.rope_theta)
|
101 |
+
|
102 |
+
"""
|
103 |
+
Forward implementations
|
104 |
+
"""
|
105 |
+
|
106 |
+
def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor:
|
107 |
+
"""
|
108 |
+
Performs the embedding lookup prior to running the transformer of the model.
|
109 |
+
|
110 |
+
Arguments:
|
111 |
+
ragged_batch (RaggedBatchWrapper): The batch to embed.
|
112 |
+
|
113 |
+
Returns:
|
114 |
+
torch.Tensor: The embedded batch.
|
115 |
+
"""
|
116 |
+
embed = self.embed(ragged_batch, self._non_transformer.word_emb)
|
117 |
+
|
118 |
+
if embed.shape[-1] != self.model_dim:
|
119 |
+
raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}")
|
120 |
+
|
121 |
+
return embed
|
122 |
+
|
123 |
+
def _forward_transformer_layer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor,
|
124 |
+
ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]:
|
125 |
+
"""
|
126 |
+
Executes one (slightly offset) layer of the transformer. This implementation does a peak-ahead
|
127 |
+
optimization to fuse the layer norm of the next layer into the current layer.
|
128 |
+
|
129 |
+
Arguments:
|
130 |
+
layer_idx (int): The index of the layer to execute.
|
131 |
+
residual (torch.Tensor): The residual tensor from the previous layer.
|
132 |
+
hidden_states (torch.Tensor): The hidden states from the previous layer. This is the
|
133 |
+
hidden states after pre normalization.
|
134 |
+
ragged_batch_info (RaggedBatchWrapper): The batch metadata.
|
135 |
+
"""
|
136 |
+
cur_params = self._transformer[layer_idx]
|
137 |
+
kv_cache = self.state_manager.get_cache(layer_idx)
|
138 |
+
|
139 |
+
attn_ln_out = hidden_states
|
140 |
+
attn_hidden_state = self.qkv(attn_ln_out, cur_params.qkv_w, b=cur_params.qkv_b)
|
141 |
+
attn_hidden_state = self.attn(attn_hidden_state, kv_cache, ragged_batch_info)
|
142 |
+
attention_output = self.attn_out(attn_hidden_state, cur_params.attn_out_w, b=cur_params.attn_out_b)
|
143 |
+
|
144 |
+
mlp_ln_out = hidden_states
|
145 |
+
mlp_hidden_state = self.mlp_1(mlp_ln_out, cur_params.mlp_1_w, b=cur_params.mlp_1_b)
|
146 |
+
mlp_output = self.mlp_2(mlp_hidden_state, cur_params.mlp_2_w, b=cur_params.mlp_2_b)
|
147 |
+
|
148 |
+
mlp_output.add_(attention_output)
|
149 |
+
|
150 |
+
if self.tp_size > 1:
|
151 |
+
dist.all_reduce(mlp_output, group=self._base_mp_group)
|
152 |
+
|
153 |
+
if layer_idx != self.num_layers - 1:
|
154 |
+
next_params = self._transformer[layer_idx + 1]
|
155 |
+
residual, mlp_output = self.norm(residual, mlp_output, next_params.ln_gamma, beta=next_params.ln_beta)
|
156 |
+
else:
|
157 |
+
# On last layer, we just need to perform the residual add. Adding into the residual
|
158 |
+
# here is safe.
|
159 |
+
residual.add_(mlp_output)
|
160 |
+
|
161 |
+
return residual, mlp_output
|
162 |
+
|
163 |
+
def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor:
|
164 |
+
"""
|
165 |
+
Performs unembedding of the hidden states to logits. This will only sample the final
|
166 |
+
token of each sequence.
|
167 |
+
"""
|
168 |
+
logits = self.unembed(hidden_states,
|
169 |
+
self._non_transformer.word_unembed_w,
|
170 |
+
ragged_batch_info,
|
171 |
+
bias=self._non_transformer.word_unembed_b,
|
172 |
+
gamma=self._non_transformer.final_norm_gamma,
|
173 |
+
beta=self._non_transformer.final_norm_beta)
|
174 |
+
|
175 |
+
if self.tp_size > 1:
|
176 |
+
comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1]))
|
177 |
+
full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size))
|
178 |
+
|
179 |
+
dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group)
|
180 |
+
|
181 |
+
full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size))
|
182 |
+
|
183 |
+
return full_logits
|
184 |
+
else:
|
185 |
+
return logits
|
186 |
+
|
187 |
+
def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor:
|
188 |
+
residual = self._forward_embed(wrapped_batch)
|
189 |
+
|
190 |
+
residual, hidden_states = self.norm(residual,
|
191 |
+
None,
|
192 |
+
gamma=self._transformer[0].ln_gamma,
|
193 |
+
beta=self._transformer[0].ln_beta)
|
194 |
+
|
195 |
+
for layer_idx in range(self.num_layers):
|
196 |
+
residual, hidden_states = self._forward_transformer_layer(layer_idx, residual, hidden_states,
|
197 |
+
wrapped_batch)
|
198 |
+
|
199 |
+
return self._forward_unembed(residual, wrapped_batch)
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/policy.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
from typing import Any
|
7 |
+
|
8 |
+
from ...config_v2 import RaggedInferenceEngineConfig
|
9 |
+
from ..inference_policy_base import ContainerMap, InferenceV2Policy
|
10 |
+
from .containers import PhiNonTransformerContainer, PhiTransformerContainer
|
11 |
+
from .model import PhiInferenceModel
|
12 |
+
|
13 |
+
|
14 |
+
class PhiPolicy(InferenceV2Policy):
|
15 |
+
|
16 |
+
def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> PhiInferenceModel:
|
17 |
+
return PhiInferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group)
|
18 |
+
|
19 |
+
def build_container_map(self) -> ContainerMap:
|
20 |
+
map = ContainerMap()
|
21 |
+
|
22 |
+
trans_container_cls = PhiTransformerContainer
|
23 |
+
transformer_containers = [trans_container_cls(self.model) for _ in range(self.model.num_layers)]
|
24 |
+
|
25 |
+
map.set_transformer_params(['model.layers'], transformer_containers)
|
26 |
+
|
27 |
+
map.set_non_transformer_params(PhiNonTransformerContainer(self.model))
|
28 |
+
|
29 |
+
map.set_unmapped_params(
|
30 |
+
[f'model.layers.{i}.self_attn.rotary_emb.inv_freq' for i in range(self.model.num_layers)])
|
31 |
+
|
32 |
+
return map
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
from .policy import QwenPolicy
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (258 Bytes). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/container.cpython-310.pyc
ADDED
Binary file (1.66 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/model.cpython-310.pyc
ADDED
Binary file (7.4 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/policy.cpython-310.pyc
ADDED
Binary file (1.57 kB). View file
|
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/container.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
# Create a container object to save model-specific tensors using the policy file above.
|
7 |
+
|
8 |
+
from ..common_parameters import *
|
9 |
+
from ..layer_container_base import LayerContainer
|
10 |
+
'''
|
11 |
+
# HF Qwen model looks like this:
|
12 |
+
|
13 |
+
QWenLMHeadModel(
|
14 |
+
(transformer): QWenModel(
|
15 |
+
(wte): Embedding(151936, 4096)
|
16 |
+
(drop): Dropout(p=0.0, inplace=False)
|
17 |
+
(rotary_emb): RotaryEmbedding()
|
18 |
+
(h): ModuleList(
|
19 |
+
(0-31): 32 x QWenBlock(
|
20 |
+
(ln_1): RMSNorm()
|
21 |
+
(attn): QWenAttention(
|
22 |
+
(c_attn): Linear(in_features=4096, out_features=12288, bias=True)
|
23 |
+
(c_proj): Linear(in_features=4096, out_features=4096, bias=False)
|
24 |
+
(attn_dropout): Dropout(p=0.0, inplace=False)
|
25 |
+
)
|
26 |
+
(ln_2): RMSNorm()
|
27 |
+
(mlp): QWenMLP(
|
28 |
+
(w1): Linear(in_features=4096, out_features=11008, bias=False)
|
29 |
+
(w2): Linear(in_features=4096, out_features=11008, bias=False)
|
30 |
+
(c_proj): Linear(in_features=11008, out_features=4096, bias=False)
|
31 |
+
)
|
32 |
+
)
|
33 |
+
)
|
34 |
+
(ln_f): RMSNorm()
|
35 |
+
)
|
36 |
+
(lm_head): Linear(in_features=4096, out_features=151936, bias=False)
|
37 |
+
)
|
38 |
+
'''
|
39 |
+
|
40 |
+
|
41 |
+
class QwenTransformerContainer(LayerContainer):
|
42 |
+
"""
|
43 |
+
Transformer layer container for the Qwen model.
|
44 |
+
"""
|
45 |
+
qkv_w: FusedQKVParameter
|
46 |
+
qkv_b: FusedQKVParameter
|
47 |
+
attn_out_w: AttentionOutputParameter
|
48 |
+
mlp_1_w: GatedMLPParameter
|
49 |
+
mlp_2_w: MLP2Parameter
|
50 |
+
attn_norm_gamma: NormParameter
|
51 |
+
mlp_norm_gamma: NormParameter
|
52 |
+
|
53 |
+
PARAM_MAPPING = {
|
54 |
+
"attn.c_attn.weight": "qkv_w.params",
|
55 |
+
"attn.c_attn.bias": "qkv_b.params",
|
56 |
+
"attn.c_proj.weight": "attn_out_w.params",
|
57 |
+
"mlp.w1.weight": "mlp_1_w.up_params",
|
58 |
+
"mlp.w2.weight": "mlp_1_w.gate_params",
|
59 |
+
"mlp.c_proj.weight": "mlp_2_w.params",
|
60 |
+
"ln_1.weight": "attn_norm_gamma.params",
|
61 |
+
"ln_2.weight": "mlp_norm_gamma.params",
|
62 |
+
}
|
63 |
+
|
64 |
+
|
65 |
+
class QwenNonTransformerContainer(LayerContainer):
|
66 |
+
"""
|
67 |
+
Non-Transformer layer container for the Qwen model.
|
68 |
+
"""
|
69 |
+
word_emb: EmbeddingParameter
|
70 |
+
word_unembed: UnembedParameter
|
71 |
+
final_norm: NormParameter
|
72 |
+
|
73 |
+
PARAM_MAPPING = {
|
74 |
+
"transformer.wte.weight": "word_emb.params",
|
75 |
+
"transformer.ln_f.weight": "final_norm.params",
|
76 |
+
"lm_head.weight": "word_unembed.params",
|
77 |
+
}
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/model.py
ADDED
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
from typing import Iterable, Optional, Tuple
|
7 |
+
|
8 |
+
import torch
|
9 |
+
|
10 |
+
import deepspeed.comm as dist
|
11 |
+
|
12 |
+
from ...allocator import empty_from
|
13 |
+
from ...inference_utils import ActivationType, DtypeEnum
|
14 |
+
from .. import *
|
15 |
+
from ...modules.configs import *
|
16 |
+
from ...modules.interfaces import *
|
17 |
+
from ...modules import heuristics
|
18 |
+
from ...ragged import RaggedBatchWrapper
|
19 |
+
|
20 |
+
from .container import QwenNonTransformerContainer, QwenTransformerContainer
|
21 |
+
|
22 |
+
|
23 |
+
class QwenInferenceModel(DSTransformerModelBase):
|
24 |
+
"""
|
25 |
+
Inference model implementation for ragged batching for Llama-2 models.
|
26 |
+
"""
|
27 |
+
|
28 |
+
_non_transformer: Optional[QwenNonTransformerContainer]
|
29 |
+
"""
|
30 |
+
Embed + unembed container. Specializing the type annotation.
|
31 |
+
"""
|
32 |
+
|
33 |
+
_transformer: Optional[Iterable[QwenTransformerContainer]]
|
34 |
+
"""
|
35 |
+
Per-layer transformer container. Specializing the type annotation.
|
36 |
+
"""
|
37 |
+
"""
|
38 |
+
Properties ineherited from `DSInferenceModelBase`
|
39 |
+
"""
|
40 |
+
|
41 |
+
@property
|
42 |
+
def max_sequence_length(self) -> int:
|
43 |
+
return self._config.max_seq_length
|
44 |
+
|
45 |
+
"""
|
46 |
+
Properties ineherited from `DSTransformerModelBase`
|
47 |
+
"""
|
48 |
+
|
49 |
+
@property
|
50 |
+
def num_layers(self) -> int:
|
51 |
+
return self._config.num_hidden_layers
|
52 |
+
|
53 |
+
@property
|
54 |
+
def model_dim(self) -> int:
|
55 |
+
return self._config.hidden_size
|
56 |
+
|
57 |
+
@property
|
58 |
+
def vocab_size(self) -> int:
|
59 |
+
return self._config.vocab_size
|
60 |
+
|
61 |
+
@property
|
62 |
+
def head_size(self) -> int:
|
63 |
+
return self.model_dim // self.n_heads
|
64 |
+
|
65 |
+
@property
|
66 |
+
def n_heads(self) -> int:
|
67 |
+
return self._config.num_attention_heads
|
68 |
+
|
69 |
+
@property
|
70 |
+
def intermediate_dim(self) -> int:
|
71 |
+
return self._config.intermediate_size // 2
|
72 |
+
|
73 |
+
@property
|
74 |
+
def n_heads_kv(self) -> int:
|
75 |
+
return self._config.hidden_size // self._config.kv_channels
|
76 |
+
|
77 |
+
@property
|
78 |
+
def activation_dtype(self) -> DtypeEnum:
|
79 |
+
autoset_precision = self._config.bf16 + self._config.fp16 == 0
|
80 |
+
if autoset_precision:
|
81 |
+
return DtypeEnum.fp16
|
82 |
+
if self._config.fp16:
|
83 |
+
return DtypeEnum.fp16
|
84 |
+
elif self._config.bf16:
|
85 |
+
# TODO(ZonePG): bf16 inference results may be different from huggingface bf16,
|
86 |
+
# because in rms_norm, Qwen still use float() instead of bf16
|
87 |
+
return DtypeEnum.bf16
|
88 |
+
else:
|
89 |
+
raise NotImplementedError("Only fp16 and bf16 are supported")
|
90 |
+
|
91 |
+
@property
|
92 |
+
def mlp_activation_fn(self) -> ActivationType:
|
93 |
+
return ActivationType.SiGLU
|
94 |
+
|
95 |
+
@property
|
96 |
+
def norm_type(self) -> NormTypeEnum:
|
97 |
+
return NormTypeEnum.RMSNorm
|
98 |
+
|
99 |
+
@property
|
100 |
+
def positional_embedding_type(self) -> PositionalEmbeddingType:
|
101 |
+
return PositionalEmbeddingType.rotate_half
|
102 |
+
|
103 |
+
@property
|
104 |
+
def positional_embedding_config(self) -> Optional[RotateHalfConfig]:
|
105 |
+
return RotateHalfConfig(theta_base=self._config.rotary_emb_base)
|
106 |
+
|
107 |
+
def make_norm_layer(self) -> None:
|
108 |
+
"""
|
109 |
+
Instantiates the normalization layer for the model. This sets the `self.norm` attribute.
|
110 |
+
|
111 |
+
TODO(cmikeh2): In the future we'll distinguish between the different norm objects,
|
112 |
+
but for now we'll just use the same one for all of them.
|
113 |
+
"""
|
114 |
+
norm_config = DSNormConfig(
|
115 |
+
max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
|
116 |
+
type=self.norm_type,
|
117 |
+
channels=self.model_dim,
|
118 |
+
residual_dtype=self.activation_dtype,
|
119 |
+
input_dtype=self.activation_dtype,
|
120 |
+
output_dtype=self.activation_dtype,
|
121 |
+
eps=self._config.layer_norm_epsilon,
|
122 |
+
)
|
123 |
+
|
124 |
+
self.norm = heuristics.instantiate_pre_norm(norm_config, self._engine_config)
|
125 |
+
|
126 |
+
"""
|
127 |
+
Forward implementations
|
128 |
+
"""
|
129 |
+
|
130 |
+
def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor:
|
131 |
+
"""
|
132 |
+
Performs the embedding lookup prior to running the transformer of the model.
|
133 |
+
|
134 |
+
Arguments:
|
135 |
+
ragged_batch (RaggedBatchWrapper): The batch to embed.
|
136 |
+
|
137 |
+
Returns:
|
138 |
+
torch.Tensor: The embedded batch.
|
139 |
+
"""
|
140 |
+
embed = self.embed(ragged_batch, self._non_transformer.word_emb)
|
141 |
+
|
142 |
+
if embed.shape[-1] != self.model_dim:
|
143 |
+
raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}")
|
144 |
+
|
145 |
+
return embed
|
146 |
+
|
147 |
+
def _forward_transformer_layer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor,
|
148 |
+
ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]:
|
149 |
+
"""
|
150 |
+
Executes one (slightly offset) layer of the transformer. This implementation does a peak-ahead
|
151 |
+
optimization to fuse the layer norm of the next layer into the current layer.
|
152 |
+
|
153 |
+
Arguments:
|
154 |
+
layer_idx (int): The index of the layer to execute.
|
155 |
+
residual (torch.Tensor): The residual tensor from the previous layer.
|
156 |
+
hidden_states (torch.Tensor): The hidden states from the previous layer. This is the
|
157 |
+
hidden states after pre normalization.
|
158 |
+
ragged_batch_info (RaggedBatchWrapper): The batch metadata.
|
159 |
+
"""
|
160 |
+
# TODO(cmikeh2): Distribute ragged_batch_info to all modules
|
161 |
+
|
162 |
+
cur_params = self._transformer[layer_idx]
|
163 |
+
kv_cache = self.state_manager.get_cache(layer_idx)
|
164 |
+
|
165 |
+
hidden_states = self.qkv(hidden_states, cur_params.qkv_w, b=cur_params.qkv_b)
|
166 |
+
hidden_states = self.attn(hidden_states, kv_cache, ragged_batch_info)
|
167 |
+
hidden_states = self.attn_out(hidden_states, cur_params.attn_out_w, b=None)
|
168 |
+
|
169 |
+
if self.tp_size > 1:
|
170 |
+
dist.all_reduce(hidden_states, group=self._base_mp_group)
|
171 |
+
|
172 |
+
residual, hidden_states = self.norm(residual, hidden_states, cur_params.mlp_norm_gamma, beta=None)
|
173 |
+
|
174 |
+
# Should be configurable in the future
|
175 |
+
hidden_states = self.mlp_1(hidden_states, cur_params.mlp_1_w, b=None)
|
176 |
+
hidden_states = self.mlp_2(hidden_states, cur_params.mlp_2_w, b=None)
|
177 |
+
|
178 |
+
if self.tp_size > 1:
|
179 |
+
dist.all_reduce(hidden_states, group=self._base_mp_group)
|
180 |
+
|
181 |
+
if layer_idx != self.num_layers - 1:
|
182 |
+
next_params = self._transformer[layer_idx + 1]
|
183 |
+
residual, hidden_states = self.norm(residual, hidden_states, next_params.attn_norm_gamma, beta=None)
|
184 |
+
else:
|
185 |
+
# On last layer, we just need to perform the residual add. Adding into the residual
|
186 |
+
# here is safe.
|
187 |
+
residual.add_(hidden_states)
|
188 |
+
|
189 |
+
return residual, hidden_states
|
190 |
+
|
191 |
+
def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor:
|
192 |
+
"""
|
193 |
+
Performs unembedding of the hidden states to logits. This will only sample the final
|
194 |
+
token of each sequence.
|
195 |
+
"""
|
196 |
+
logits = self.unembed(hidden_states,
|
197 |
+
self._non_transformer.word_unembed,
|
198 |
+
ragged_batch_info,
|
199 |
+
gamma=self._non_transformer.final_norm)
|
200 |
+
|
201 |
+
if self.tp_size > 1:
|
202 |
+
comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1]))
|
203 |
+
full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size))
|
204 |
+
|
205 |
+
dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group)
|
206 |
+
|
207 |
+
full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size))
|
208 |
+
|
209 |
+
return full_logits
|
210 |
+
else:
|
211 |
+
return logits
|
212 |
+
|
213 |
+
def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor:
|
214 |
+
|
215 |
+
residual = self._forward_embed(wrapped_batch)
|
216 |
+
|
217 |
+
residual, hidden_states = self.norm(residual, None, self._transformer[0].attn_norm_gamma, beta=None)
|
218 |
+
|
219 |
+
for layer_idx in range(self.num_layers):
|
220 |
+
residual, hidden_states = self._forward_transformer_layer(layer_idx, residual, hidden_states,
|
221 |
+
wrapped_batch)
|
222 |
+
|
223 |
+
return self._forward_unembed(residual, wrapped_batch)
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/policy.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
from typing import Any
|
7 |
+
|
8 |
+
from ...config_v2 import RaggedInferenceEngineConfig
|
9 |
+
from ..inference_policy_base import ContainerMap, InferenceV2Policy
|
10 |
+
from .container import QwenNonTransformerContainer, QwenTransformerContainer
|
11 |
+
from .model import QwenInferenceModel
|
12 |
+
|
13 |
+
|
14 |
+
class QwenPolicy(InferenceV2Policy):
|
15 |
+
|
16 |
+
def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> QwenInferenceModel:
|
17 |
+
return QwenInferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group)
|
18 |
+
|
19 |
+
def build_container_map(self) -> ContainerMap:
|
20 |
+
map = ContainerMap()
|
21 |
+
|
22 |
+
transformer_containers = [QwenTransformerContainer(self.model) for _ in range(self.model.num_layers)]
|
23 |
+
|
24 |
+
map.set_transformer_params(['transformer.h'], transformer_containers)
|
25 |
+
|
26 |
+
map.set_non_transformer_params(QwenNonTransformerContainer(self.model))
|
27 |
+
|
28 |
+
map.set_unmapped_params(['transformer.rotary_emb.inv_freq'])
|
29 |
+
|
30 |
+
return map
|
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Copyright (c) Microsoft Corporation.
|
2 |
+
# SPDX-License-Identifier: Apache-2.0
|
3 |
+
|
4 |
+
# DeepSpeed Team
|
5 |
+
|
6 |
+
from .policy import Qwen2Policy
|