diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82e9f0c6cc0ea21e95b420a3e9d5971bc66625c2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/flat_model_helpers.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/flat_model_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da4ba6a4395f6f5b8183c865c7a1339b02ae2ffb Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/flat_model_helpers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_model_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_model_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bddaa30797f8e2a300215e95be93063417535b5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_model_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_policy_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_policy_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e424907ae623b66a593fded975b17a327a271451 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_policy_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_transformer_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_transformer_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7cea11e421f1d2c7625223fbf03be376a105e4c Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/inference_transformer_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/layer_container_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/layer_container_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a62f1e270afad6060ee830bd5afd699bd4de158 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/layer_container_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/parameter_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/parameter_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e443bc45532af0f978b77386bfc6a8e97c112151 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__pycache__/parameter_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..60963011cd660fe5e43b9a90efdacec4b16651b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .attn_output_parameters import * +from .embedding_parameters import * +from .mlp_parameters import * +from .moe_parameters import * +from .norm_parameters import * +from .qkv_parameters import * +from .unembed_parameters import * +from .invfreq_parameters import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/mlp_parameters.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/mlp_parameters.py new file mode 100644 index 0000000000000000000000000000000000000000..ddb8996e03a37ef1213c64b1139c93c8a7909b62 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/mlp_parameters.py @@ -0,0 +1,81 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ...model_implementations.parameter_base import ParameterBase +""" +MLP Parameter Containers +""" + + +class MLP1Parameter(ParameterBase): + """ + First MLP projection weight container. This performs a straight pass-through to the + model implementation for transformation. + """ + params: torch.Tensor + + def finalize(self) -> torch.Tensor: + # NOTE(cmikeh2): If we are gated but not in the format specified below, we should trigger a permutation here. + # I am not currently aware of any models that use this format (or how we should even detect it; probably should + # just be a different param entirely, but until then we'll just assume the format is correct). + return self.inference_model.transform_mlp_1_param(self.params) + + +class GatedMLPParameter(ParameterBase): + """ + Gated MLP projection container. + """ + + gate_params: torch.Tensor + """ + Weight parameter for the gating matrix. + """ + + up_params: torch.Tensor + """ + For lack of a better name, the non-gating weight parameters. + """ + + def finalize(self) -> torch.Tensor: + """ + Our gated format (this is different from InferenceV1!) is to have the gate and activated neurons + interleaved. So if we have 4 output neurons (two effective neurons) with 4 input neurons, the finalized + parameter will look like: + [g0_0, g0_1, g0_2, g0_3] + [a0_0, a0_1, a0_2, a0_3] + [g1_0, g1_1, g1_2, g1_3] + [a1_0, a1_1, a1_2, a1_3] + + As a reference, in inference v1, the format is: + [g0_0, g0_1, g0_2, g0_3] + [g1_0, g1_1, g1_2, g1_3] + [a0_0, a0_1, a0_2, a0_3] + [a1_0, a1_1, a1_2, a1_3] + """ + assert self.gate_params.shape[0] == self.up_params.shape[ + 0], "Gated MLP parameters must have the same number of neurons." + total_neurons = self.gate_params.shape[0] + self.up_params.shape[0] + + # flip the order if even with the correct tokenizer we get wrong output + #fused_param = torch.cat([self.up_params, self.gate_params], dim=-1).reshape(total_neurons, -1) + fused_param = torch.cat([self.gate_params, self.up_params], dim=-1).reshape(total_neurons, -1) + return self.inference_model.transform_mlp_1_param(fused_param) + + +class MLP2Parameter(ParameterBase): + """ + Second MLP projection weight container. This performs a straight pass-through to the + model implementation for transformation. + """ + + params: torch.Tensor + """ + Full weight parameter. + """ + + def finalize(self) -> torch.Tensor: + return self.inference_model.transform_mlp_2_param(self.params) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/norm_parameters.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/norm_parameters.py new file mode 100644 index 0000000000000000000000000000000000000000..81ffcc3221df2dd2c05f9d8739a905a5ea2399a5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/norm_parameters.py @@ -0,0 +1,22 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ...model_implementations.parameter_base import ParameterBase +""" +Common Attention Output Parameter Patterns +""" + + +class NormParameter(ParameterBase): + """ + Simple normalization container. + """ + + params: torch.Tensor + + def finalize(self) -> torch.Tensor: + return self.inference_model.transform_norm_param(self.params) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/qkv_parameters.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/qkv_parameters.py new file mode 100644 index 0000000000000000000000000000000000000000..e240137186fe6114cb58ea66c1a25da59184d8f7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/qkv_parameters.py @@ -0,0 +1,115 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ...model_implementations.parameter_base import ParameterBase +""" +Common QKV Parameter Patterns +""" + + +class FusedQKVParameter(ParameterBase): + """ + Traditional fused QKV parameters for QKV projection. This is functionally + a direct copy. + + src_qkv_w shape: [3 * out_features, in_features] + qkv_w shape: [3 * out_features, in_features] + """ + + params: torch.Tensor + + def finalize(self) -> torch.Tensor: + return self.inference_model.transform_qkv_param(self.params) + + +class UnfusedQKVParameter(ParameterBase): + """ + QKV parameter container for unfused QKV projection. + + src_param shapes: 3 x [out_features, in_features] + dst_param shape: [3 x out_features, in_features] + """ + + q_params: torch.Tensor + + k_params: torch.Tensor + + v_params: torch.Tensor + + def finalize(self): + fused_param = torch.cat([self.q_params, self.k_params, self.v_params], dim=0) + return self.inference_model.transform_qkv_param(fused_param) + + +def megatron_qkv_reshape(param: torch.Tensor, head_size: int, n_heads: int) -> torch.Tensor: + assert param.shape[0] == 3 * n_heads * head_size + + all_heads = torch.chunk(param, chunks=3 * n_heads, dim=0) + q_heads = all_heads[::3] + k_heads = all_heads[1::3] + v_heads = all_heads[2::3] + return torch.cat([q_heads, k_heads, v_heads], dim=0) + + +class MegatronQKVParameter(ParameterBase): + """ + QKV parameter container for Megatron-style QKV projection. Megatron stores the parameter + as [n_heads, 3, head_size, in_features] whereas our inference system is built around + [3, n_heads, head_size, in_features]. This container handles the conversion. + + Note: this container expects the model implementation to implement properties for + `head_size` and `n_heads`. + + src_qkv_w shape: [3 * out_features, in_features] + qkv_w shape: [3 * out_features, in_features] + """ + + params: torch.Tensor + + def finalize(self) -> torch.Tensor: + head_size = self.inference_model.head_size + n_heads = self.inference_model.n_heads + + transposed_param = megatron_qkv_reshape(self.params, head_size, n_heads) + return self.inference_model.transform_qkv_param(transposed_param) + + +def transform_gqa_megatron(src_param: torch.Tensor, head_size: int, n_q_heads: int, n_kv_heads: int) -> torch.Tensor: + assert src_param.shape[0] == (2 * n_kv_heads + n_q_heads) * head_size + + head_ratio = n_q_heads // n_kv_heads + + # Reshape to get the groups as the leading dimension + groups_leading_view = src_param.reshape(n_kv_heads, 2 + head_ratio, head_size, -1) + q_heads = groups_leading_view[:, :head_ratio, :, :].reshape(-1, groups_leading_view.shape[-1]) + k_heads = groups_leading_view[:, head_ratio, :, :].reshape(-1, groups_leading_view.shape[-1]) + v_heads = groups_leading_view[:, head_ratio + 1, :, :].reshape(-1, groups_leading_view.shape[-1]) + # Squeeze will remove extra dimension for bias + return torch.cat([q_heads, k_heads, v_heads], dim=0).squeeze() + + +class GQAMegatronQKVParameter(ParameterBase): + """ + QKV parameter for Megatron-style QKV projection with GQA-style QKV projection. In this + storage format each of the groups is stored consecutively, so there will be multiple q_heads, + then one k head, and one v head. + + Note: this container expects the model implementation to implement properties for + `head_size`, `n_q_heads`, and `n_kv_heads`. + + src_qkv_w shape: [(2 * n_kv_heads + n_q_heads) * head_size, in_features] + qkv_w shape: [(2 * n_kv_heads + n_q_heads) * head_size, in_features] + """ + + params: torch.Tensor + + def finalize(self) -> torch.Tensor: + head_size = self.inference_model.head_size + n_q_heads = self.inference_model.n_heads_q + n_kv_heads = self.inference_model.n_heads_kv + transposed_param = transform_gqa_megatron(self.params, head_size, n_q_heads, n_kv_heads) + return self.inference_model.transform_qkv_param(transposed_param) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..20f37538274ccda7ab68ceb7e82c87675120b7e7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .policy import FalconPolicy diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65fcb6a11d6574430df1c433e076102bec3f7fdb Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/container.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/container.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2435ceb2e5e000700b3c2a54872e4d270e9d1ef6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/container.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/model.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d3e9bbe5a4f2fb205a6cdfb67082fa3cbaa8bc6a Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/policy.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/policy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d86c5a03d13aeb312f0611b7a7632b0e35a94fe Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/__pycache__/policy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/container.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/container.py new file mode 100644 index 0000000000000000000000000000000000000000..caccfe1ecb00c37b08121decb250b2625c64eb9a --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/container.py @@ -0,0 +1,129 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# Create a container object to save model-specific tensors using the policy file above. + +from ..common_parameters import * +from ..layer_container_base import LayerContainer +''' + # HF Falcon 7b model looks like this: + +FalconForCausalLM( + (transformer): FalconModel( + (word_embeddings): Embedding(65024, 4544) + (h): ModuleList( + (0-31): 32 x FalconDecoderLayer( + (self_attention): FalconAttention( + (maybe_rotary): FalconRotaryEmbedding() + (query_key_value): FalconLinear(in_features=4544, out_features=4672, bias=False) + (dense): FalconLinear(in_features=4544, out_features=4544, bias=False) + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (mlp): FalconMLP( + (dense_h_to_4h): FalconLinear(in_features=4544, out_features=18176, bias=False) + (act): GELU(approximate='none') + (dense_4h_to_h): FalconLinear(in_features=18176, out_features=4544, bias=False) + ) + (input_layernorm): LayerNorm((4544,), eps=1e-05, elementwise_affine=True) + ) + ) + (ln_f): LayerNorm((4544,), eps=1e-05, elementwise_affine=True) + ) + (lm_head): Linear(in_features=4544, out_features=65024, bias=False) +) +''' + + +class FalconTransformerContainer(LayerContainer): + """ + Transformer layer container for the Falcon model. + """ + qkv_w: FusedQKVParameter + attn_out_w: AttentionOutputParameter + mlp_1_w: MLP1Parameter + mlp_2_w: MLP2Parameter + ln_attn_gamma: NormParameter + ln_attn_beta: NormParameter + + PARAM_MAPPING = { + "self_attention.query_key_value.weight": "qkv_w.params", + "self_attention.dense.weight": "attn_out_w.params", + "mlp.dense_h_to_4h.weight": "mlp_1_w.params", + "mlp.dense_4h_to_h.weight": "mlp_2_w.params", + "input_layernorm.weight": "ln_attn_gamma.params", + "input_layernorm.bias": "ln_attn_beta.params", + } + + +class FalconNonTransformerContainer(LayerContainer): + """ + Non-Transformer layer container for the Falcon model. + """ + word_emb: EmbeddingParameter + word_unembed: UnembedParameter + final_norm_gamma: NormParameter + final_norm_beta: NormParameter + + PARAM_MAPPING = { + "transformer.word_embeddings.weight": "word_emb.params", + "transformer.ln_f.weight": "final_norm_gamma.params", + "transformer.ln_f.bias": "final_norm_beta.params", + "lm_head.weight": "word_unembed.params", + } + + +''' + # HF Falcon 40b model looks like this: + + FalconForCausalLM( + (transformer): FalconModel( + (word_embeddings): Embedding(65024, 8192) + (h): ModuleList( + (0-59): 60 x FalconDecoderLayer( + (self_attention): FalconAttention( + (maybe_rotary): FalconRotaryEmbedding() + (query_key_value): FalconLinear(in_features=8192, out_features=9216, bias=False) + (dense): FalconLinear(in_features=8192, out_features=8192, bias=False) + (attention_dropout): Dropout(p=0.0, inplace=False) + ) + (mlp): FalconMLP( + (dense_h_to_4h): FalconLinear(in_features=8192, out_features=32768, bias=False) + (act): GELU(approximate='none') + (dense_4h_to_h): FalconLinear(in_features=32768, out_features=8192, bias=False) + ) + (ln_attn): LayerNorm((8192,), eps=1e-05, elementwise_affine=True) + (ln_mlp): LayerNorm((8192,), eps=1e-05, elementwise_affine=True) + ) + ) + (ln_f): LayerNorm((8192,), eps=1e-05, elementwise_affine=True) + ) + (lm_head): Linear(in_features=8192, out_features=65024, bias=False) +) +''' + + +class FalconNewArchTransformerContainer(LayerContainer): + """ + Transformer layer container for the Falcon model. + """ + qkv_w: GQAMegatronQKVParameter + attn_out_w: AttentionOutputParameter + mlp_1_w: MLP1Parameter + mlp_2_w: MLP2Parameter + ln_attn_gamma: NormParameter + ln_attn_beta: NormParameter + ln_mlp_gamma: NormParameter + ln_mlp_beta: NormParameter + + PARAM_MAPPING = { + "self_attention.query_key_value.weight": "qkv_w.params", + "self_attention.dense.weight": "attn_out_w.params", + "mlp.dense_h_to_4h.weight": "mlp_1_w.params", + "mlp.dense_4h_to_h.weight": "mlp_2_w.params", + "ln_attn.weight": "ln_attn_gamma.params", + "ln_attn.bias": "ln_attn_beta.params", + "ln_mlp.weight": "ln_mlp_gamma.params", + "ln_mlp.bias": "ln_mlp_beta.params", + } diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/model.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/model.py new file mode 100644 index 0000000000000000000000000000000000000000..b2830c80b562546d138430da702b5c9882ab00b6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/model.py @@ -0,0 +1,213 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Iterable, Optional, Tuple + +import torch + +import deepspeed.comm as dist + +from ...allocator import empty_from +from ...inference_utils import ActivationType, DtypeEnum +from .. import * +from ...modules.configs import * +from ...modules.interfaces import * +from ...ragged import RaggedBatchWrapper + +from .container import FalconNonTransformerContainer, FalconTransformerContainer + + +class FalconInferenceModel(DSTransformerModelBase): + """ + Inference model implementation for ragged batching for Llama-2 models. + """ + + _non_transformer: Optional[FalconNonTransformerContainer] + """ + Embed + unembed container. Specializing the type annotation. + """ + + _transformer: Optional[Iterable[FalconTransformerContainer]] + """ + Per-layer transformer container. Specializing the type annotation. + """ + """ + Properties inherited from `DSInferenceModelBase` + """ + + @property + def max_sequence_length(self) -> int: + return self._config.max_seq_length + + """ + Properties inherited from `DSTransformerModelBase` + """ + + @property + def num_layers(self) -> int: + return self._config.num_hidden_layers + + @property + def model_dim(self) -> int: + return self._config.hidden_size + + @property + def vocab_size(self) -> int: + return self._config.vocab_size + + @property + def head_size(self) -> int: + return self.model_dim // self.n_heads + + @property + def n_heads(self) -> int: + return self._config.num_attention_heads + + @property + def intermediate_dim(self) -> int: + return 4 * self._config.hidden_size + + @property + def n_heads_kv(self) -> int: + return self._config.num_kv_heads if (self._config.new_decoder_architecture + or not self._config.multi_query) else 1 + + @property + def activation_dtype(self) -> DtypeEnum: + if self._config.torch_dtype == torch.float16: + return DtypeEnum.fp16 + elif self._config.torch_dtype == torch.bfloat16: + return DtypeEnum.bf16 + else: + raise NotImplementedError("Only fp16 and bf16 are supported") + + @property + def mlp_activation_fn(self) -> ActivationType: + return ActivationType.GELU + + @property + def norm_type(self) -> NormTypeEnum: + return NormTypeEnum.LayerNorm + + @property + def positional_embedding_type(self) -> PositionalEmbeddingType: + return PositionalEmbeddingType.rotate_half + + @property + def positional_embedding_config(self) -> RotateHalfConfig: + """ + The positional embedding configuration for the model. + """ + return RotateHalfConfig() + + """ + Forward implementations + """ + + def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor: + """ + Performs the embedding lookup prior to running the transformer of the model. + + Arguments: + ragged_batch (RaggedBatchWrapper): The batch to embed. + + Returns: + torch.Tensor: The embedded batch. + """ + embed = self.embed(ragged_batch, self._non_transformer.word_emb) + + if embed.shape[-1] != self.model_dim: + raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}") + + return embed + + def _forward_transformer_layer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor, + ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Executes one (slightly offset) layer of the transformer. This implementation does a peak-ahead + optimization to fuse the layer norm of the next layer into the current layer. + + Arguments: + layer_idx (int): The index of the layer to execute. + residual (torch.Tensor): The residual tensor from the previous layer. + hidden_states (torch.Tensor): The hidden states from the previous layer. This is the + hidden states after pre normalization. + ragged_batch_info (RaggedBatchWrapper): The batch metadata. + """ + assert self.config.parallel_attn, "Only parallel attention implementation is supported" + + cur_params = self._transformer[layer_idx] + kv_cache = self.state_manager.get_cache(layer_idx) + + attn_ln_out = hidden_states + attn_hidden_state = self.qkv(attn_ln_out, cur_params.qkv_w, b=None) + attn_hidden_state = self.attn(attn_hidden_state, kv_cache, ragged_batch_info) + attention_output = self.attn_out(attn_hidden_state, cur_params.attn_out_w, b=None) + + if self.config.new_decoder_architecture: + residual, mlp_ln_out = self.norm(residual, + None, + gamma=cur_params.ln_mlp_gamma, + beta=cur_params.ln_mlp_beta) + else: + mlp_ln_out = hidden_states + + mlp_hidden_state = self.mlp_1(mlp_ln_out, cur_params.mlp_1_w, b=None) + mlp_output = self.mlp_2(mlp_hidden_state, cur_params.mlp_2_w, b=None) + + mlp_output.add_(attention_output) + + if self.tp_size > 1: + dist.all_reduce(mlp_output, group=self._base_mp_group) + + if layer_idx != self.num_layers - 1: + next_params = self._transformer[layer_idx + 1] + residual, mlp_output = self.norm(residual, + mlp_output, + next_params.ln_attn_gamma, + beta=next_params.ln_attn_beta) + else: + # On last layer, we just need to perform the residual add. Adding into the residual + # here is safe. + residual.add_(mlp_output) + + return residual, mlp_output + + def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor: + """ + Performs unembedding of the hidden states to logits. This will only sample the final + token of each sequence. + """ + logits = self.unembed(hidden_states, + self._non_transformer.word_unembed, + ragged_batch_info, + gamma=self._non_transformer.final_norm_gamma, + beta=self._non_transformer.final_norm_beta) + + if self.tp_size > 1: + comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1])) + full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size)) + + dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group) + + full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size)) + + return full_logits + else: + return logits + + def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor: + residual = self._forward_embed(wrapped_batch) + + residual, hidden_states = self.norm(residual, + None, + gamma=self._transformer[0].ln_attn_gamma, + beta=self._transformer[0].ln_attn_beta) + + for layer_idx in range(self.num_layers): + residual, hidden_states = self._forward_transformer_layer(layer_idx, residual, hidden_states, + wrapped_batch) + + return self._forward_unembed(residual, wrapped_batch) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4b9091b9808c9331467c82250b72753984233b51 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/model.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b80bffe2153f48c25851f8ab001ba9515a7c5e9b Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/policy.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/policy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d741310483643fa8a5451246ee14426d4cdcc41f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/policy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2cb1aa889291d63dc93bba27494c42e11aa9139f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .policy import MixtralPolicy diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f362176feb4ebb120904df8cc011c9b444abbdd Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/container.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/container.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94c865de2bddbd3dc3fbefda691e2ab5ac2c7217 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/container.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/model.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4d666acec997373f9481c6107a78153362d2c14 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/policy.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/policy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae47c7d3aa18e97a0edf2cdeb1b4244115bf452c Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/__pycache__/policy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/container.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/container.py new file mode 100644 index 0000000000000000000000000000000000000000..6ec4a0552b8f53430083e045b913ef5db412f277 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/container.py @@ -0,0 +1,46 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# Create a container object to save model-specific tensors using the policy file above. + +from deepspeed.inference.v2.model_implementations.common_parameters import * +from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer + + +class MixtralTransformerContainer(LayerContainer): + + qkv_w: UnfusedQKVParameter + attn_out_w: AttentionOutputParameter + moe_gate: MoEGatingWeightParameter + moe_mlp_1: UnfusedMoEGatedMLPParameter + moe_mlp_2: UnfusedMoEMLP2Parameter + attn_norm_gamma: NormParameter + mlp_norm_gamma: NormParameter + + PARAM_MAPPING = { + "input_layernorm.weight": "attn_norm_gamma.params", + "post_attention_layernorm.weight": "mlp_norm_gamma.params", + "self_attn.q_proj.weight": "qkv_w.q_params", + "self_attn.k_proj.weight": "qkv_w.k_params", + "self_attn.v_proj.weight": "qkv_w.v_params", + "self_attn.o_proj.weight": "attn_out_w.params", + "block_sparse_moe.gate.weight": "moe_gate.params", + "block_sparse_moe.experts.*.w1.weight": "moe_mlp_1.gating_experts", + "block_sparse_moe.experts.*.w3.weight": "moe_mlp_1.up_experts", + "block_sparse_moe.experts.*.w2.weight": "moe_mlp_2.experts", + } + + +class MixtralNonTransformerContainer(LayerContainer): + + word_emb: EmbeddingParameter + word_unembed: UnembedParameter + final_norm: NormParameter + + PARAM_MAPPING = { + "model.embed_tokens.weight": "word_emb.params", + "lm_head.weight": "word_unembed.params", + "model.norm.weight": "final_norm.params", + } diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/model.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/model.py new file mode 100644 index 0000000000000000000000000000000000000000..878cd8e31cec07f3955928e2ac49acdbe97b47b5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/model.py @@ -0,0 +1,261 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Iterable, Optional, Tuple + +import torch + +import deepspeed.comm as dist + +from ...allocator import empty_from +from ...config_v2 import RaggedInferenceEngineConfig +from ...inference_utils import ActivationType, DtypeEnum +from ...model_implementations import * +from ...modules.configs import * +from ...modules.interfaces import * +from ...ragged import RaggedBatchWrapper +from ..inference_model_base import ( + DSModelImplementationConfig, + MPType, +) + +from .container import MixtralNonTransformerContainer, MixtralTransformerContainer + + +class MixtralInferenceModel(DSMoETransformerModelBase): + """ + Inference model implementation for Mixtral models. + """ + + _non_transformer: Optional[MixtralNonTransformerContainer] + """ + Embed + unembed container. Specializing the type annotation. + """ + + _transformer: Optional[Iterable[MixtralTransformerContainer]] + """ + Per-layer transformer container. Specializing the type annotation. + """ + """ + Properties ineherited from `DSInferenceModelBase` + """ + + @property + def max_sequence_length(self) -> int: + return self._config.max_position_embeddings + + """ + Properties ineherited from `DSTransformerModelBase` + """ + + @property + def num_layers(self) -> int: + return self._config.num_hidden_layers + + @property + def model_dim(self) -> int: + return self._config.hidden_size + + @property + def vocab_size(self) -> int: + return self._config.vocab_size + + @property + def head_size(self) -> int: + return self.model_dim // self.n_heads + + @property + def n_heads(self) -> int: + return self._config.num_attention_heads + + @property + def intermediate_dim(self) -> int: + return self._config.intermediate_size + + @property + def n_heads_kv(self) -> int: + return self._config.num_key_value_heads + + @property + def activation_dtype(self) -> DtypeEnum: + if self._config.torch_dtype == torch.float16: + return DtypeEnum.fp16 + elif self._config.torch_dtype == torch.bfloat16: + return DtypeEnum.bf16 + else: + raise NotImplementedError("Only fp16 and bf16 are supported") + + @property + def mlp_activation_fn(self) -> ActivationType: + activation = self._config.hidden_act.lower() + if activation == "gelu": + return ActivationType.GEGLU + elif activation == "relu": + return ActivationType.ReGLU + elif activation == "gegelu": + return ActivationType.GEGLU + elif activation == "silu": + return ActivationType.SiGLU + else: + raise NotImplementedError(f"Activation {activation} not supported") + + @property + def norm_type(self) -> NormTypeEnum: + return NormTypeEnum.RMSNorm + + @property + def positional_embedding_type(self) -> PositionalEmbeddingType: + return PositionalEmbeddingType.rotate_half + + @property + def positional_embedding_config(self) -> Optional[RotateHalfConfig]: + """ + The positional embedding configuration for the model. + """ + return RotateHalfConfig(theta_base=self._config.rope_theta) + + """ + Inherited from `DSMoETransformerModelBase` + """ + + @property + def n_experts(self) -> int: + return self._config.num_local_experts + + @property + def n_top_k(self) -> int: + return self._config.num_experts_per_tok + + @property + def normalize_expert_scores(self) -> bool: + return True + + """ + Model implementation + """ + + def __init__(self, config: DSModelImplementationConfig, engine_config: RaggedInferenceEngineConfig, + base_mp_group: MPType) -> None: + """ + Base implementation for initialization. By default, this will initialize + the traditional components of a transformer model: + - Embedding + - QKV projection + - Self attention + - Attention output projection + - Feed forward network + - Normalization + - Unembedding + + Arguments: + config (DSModelImplementationConfig): Model-specific configuration. No assumptions + should be made about this config that are not closely tied to the specific + model implementation. + engine_config (RaggedInferenceEngineConfig): Engine configuration. + base_mp_group (MPType): Base communication group for Tensor-parallel inference. + """ + super().__init__(config, engine_config, base_mp_group) + + self.make_norm_layer() + self.make_qkv_layer() + self.make_attn_layer() + self.make_attn_out_layer() + self.make_moe_layer() + self.make_embedding_layer() + self.make_unembedding_layer() + self._kv_cache_config = None + + def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor: + """ + Performs the embedding lookup prior to running the transformer of the model. + + Arguments: + ragged_batch (RaggedBatchWrapper): The batch to embed. + + Returns: + torch.Tensor: The embedded batch. + """ + embed = self.embed(ragged_batch, self._non_transformer.word_emb) + + if embed.shape[-1] != self.model_dim: + raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}") + + return embed + + def _forward_transformer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor, + ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Executes one (slightly offset) layer of the transformer. This implementation does a peak-ahead + optimization to fuse the layer norm of the next layer into the current layer. + + Arguments: + layer_idx (int): The index of the layer to execute. + residual (torch.Tensor): The residual tensor from the previous layer. + hidden_states (torch.Tensor): The hidden states from the previous layer. This is the + hidden states after pre normalization. + ragged_batch_info (RaggedBatchWrapper): The batch metadata. + """ + # TODO(cmikeh2): Distribute ragged_batch_info to all modules + + cur_params = self._transformer[layer_idx] + kv_cache = self.state_manager.get_cache(layer_idx) + + hidden_states = self.qkv(hidden_states, cur_params.qkv_w) + hidden_states = self.attn(hidden_states, kv_cache, ragged_batch_info) + hidden_states = self.attn_out(hidden_states, cur_params.attn_out_w) + + if self.tp_size > 1: + dist.all_reduce(hidden_states, group=self._base_mp_group) + + residual, hidden_states = self.norm(residual, hidden_states, cur_params.mlp_norm_gamma) + + hidden_states = self.moe(hidden_states, ragged_batch_info, cur_params.moe_gate, cur_params.moe_mlp_1, + cur_params.moe_mlp_2) + + if self.tp_size > 1: + dist.all_reduce(hidden_states, group=self._base_mp_group) + + if layer_idx != self.num_layers - 1: + next_params = self._transformer[layer_idx + 1] + residual, hidden_states = self.norm(residual, hidden_states, next_params.attn_norm_gamma) + else: + # On last layer, we just need to perform the residual add. Adding into the residual + # here is safe. + residual.add_(hidden_states) + + return residual, hidden_states + + def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor: + """ + Performs unembedding of the hidden states to logits. This will only sample the final + token of each sequence. + """ + logits = self.unembed(hidden_states, + self._non_transformer.word_unembed, + ragged_batch_info, + gamma=self._non_transformer.final_norm) + + if self.tp_size > 1: + comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1])) + full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size)) + + dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group) + + full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size)) + + return full_logits + else: + return logits + + def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor: + + residual = self._forward_embed(wrapped_batch) + + residual, hidden_states = self.norm(residual, None, self._transformer[0].attn_norm_gamma, beta=None) + + for layer_idx in range(self.num_layers): + residual, hidden_states = self._forward_transformer(layer_idx, residual, hidden_states, wrapped_batch) + + return self._forward_unembed(residual, wrapped_batch) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/policy.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/policy.py new file mode 100644 index 0000000000000000000000000000000000000000..2f0087919720d040ee53eda7723268f2635db585 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mixtral/policy.py @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Any + +from ...config_v2 import RaggedInferenceEngineConfig +from ..inference_policy_base import ContainerMap, InferenceV2Policy +from .container import MixtralTransformerContainer, MixtralNonTransformerContainer +from .model import MixtralInferenceModel + + +class MixtralPolicy(InferenceV2Policy): + + def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> MixtralInferenceModel: + return MixtralInferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group) + + def build_container_map(self) -> ContainerMap: + + map = ContainerMap() + + transformer_containers = [MixtralTransformerContainer(self.model) for _ in range(self.model.num_layers)] + + map.set_transformer_params(['model.layers'], transformer_containers) + + map.set_non_transformer_params(MixtralNonTransformerContainer(self.model)) + + map.set_unmapped_params([]) + + return map diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c0f24d5243b820748a9ddb9a1380fd51644ae917 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .policy import OPTPolicy diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/container.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/container.py new file mode 100644 index 0000000000000000000000000000000000000000..e97599ef8e50e3dc30ce5540bd4721bb055df87e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/container.py @@ -0,0 +1,94 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# Create a container object to save model-specific tensors using the policy file above. + +from ..common_parameters import * +from ..layer_container_base import LayerContainer +''' + # HF OPT model looks like this: + +OPTForCausalLM( + (model): OPTModel( + (decoder): OPTDecoder( + (embed_tokens): Embedding(50272, 768, padding_idx=1) + (embed_positions): OPTLearnedPositionalEmbedding(2050, 768) + (final_layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True) + (layers): ModuleList( + (0-11): 12 x OPTDecoderLayer( + (self_attn): OPTAttention( + (k_proj): Linear(in_features=768, out_features=768, bias=True) + (v_proj): Linear(in_features=768, out_features=768, bias=True) + (q_proj): Linear(in_features=768, out_features=768, bias=True) + (out_proj): Linear(in_features=768, out_features=768, bias=True) + ) + (activation_fn): ReLU() + (self_attn_layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True) + (fc1): Linear(in_features=768, out_features=3072, bias=True) + (fc2): Linear(in_features=3072, out_features=768, bias=True) + (final_layer_norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True) + ) + ) + ) + ) + (lm_head): Linear(in_features=768, out_features=50272, bias=False) +) + +''' + + +class OPTTransformerContainer(LayerContainer): + """ + Transformer layer container for the OPT model. + """ + qkv_w: UnfusedQKVParameter + qkv_b: UnfusedQKVParameter + attn_out_w: AttentionOutputParameter + attn_out_b: AttentionOutputParameter + mlp_1_w: MLP1Parameter + mlp_1_b: MLP1Parameter + mlp_2_w: MLP2Parameter + mlp_2_b: MLP2Parameter + attn_norm_beta: NormParameter + attn_norm_gamma: NormParameter + mlp_norm_beta: NormParameter + mlp_norm_gamma: NormParameter + + PARAM_MAPPING = { + "self_attn.q_proj.weight": "qkv_w.q_params", + "self_attn.q_proj.bias": "qkv_b.q_params", + "self_attn.k_proj.weight": "qkv_w.k_params", + "self_attn.k_proj.bias": "qkv_b.k_params", + "self_attn.v_proj.weight": "qkv_w.v_params", + "self_attn.v_proj.bias": "qkv_b.v_params", + "self_attn.out_proj.weight": "attn_out_w.params", + "self_attn.out_proj.bias": "attn_out_b.params", + "fc1.weight": "mlp_1_w.params", + "fc1.bias": "mlp_1_b.params", + "fc2.weight": "mlp_2_w.params", + "fc2.bias": "mlp_2_b.params", + "self_attn_layer_norm.weight": "attn_norm_gamma.params", + "self_attn_layer_norm.bias": "attn_norm_beta.params", + "final_layer_norm.weight": "mlp_norm_gamma.params", + "final_layer_norm.bias": "mlp_norm_beta.params", + } + + +class OPTNonTransformerContainer(LayerContainer): + """ + Non-Transformer layer container for the OPT model. + """ + word_emb: EmbeddingParameter + word_emb_pos: EmbeddingParameter + word_unembed: UnembedParameter + final_norm_w: NormParameter + final_norm_b: NormParameter + + PARAM_MAPPING = { + "*decoder.embed_tokens.weight": ["word_emb.params", "word_unembed.params"], + "*decoder.embed_positions.weight": "word_emb_pos.params", + "*decoder.final_layer_norm.weight": "final_norm_w.params", + "*decoder.final_layer_norm.bias": "final_norm_b.params", + } diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/model.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/model.py new file mode 100644 index 0000000000000000000000000000000000000000..adf011d8f1a7884f47569ee186336dcc77b355bb --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/model.py @@ -0,0 +1,197 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Iterable, Optional, Tuple + +import torch + +import deepspeed.comm as dist + +from ...allocator import empty_from +from ...inference_utils import ActivationType, DtypeEnum +from ...model_implementations import * +from ...modules.configs import * +from ...ragged import RaggedBatchWrapper +from .container import OPTNonTransformerContainer, OPTTransformerContainer + +from ...modules.heuristics import instantiate_embed + + +class OPTInferenceModel(DSTransformerModelBase): + """ + Inference model implementation for ragged batching for OPT models. + """ + + _non_transformer: Optional[OPTNonTransformerContainer] + """ + Embed + unembed container. Specializing the type annotation. + """ + + _transformer: Optional[Iterable[OPTTransformerContainer]] + """ + Per-layer transformer container. Specializing the type annotation. + """ + """ + Properties ineherited from `DSInferenceModelBase` + """ + + @property + def max_sequence_length(self) -> int: + return self._config.max_seq_length + + """ + Properties ineherited from `DSTransformerModelBase` + """ + + @property + def num_layers(self) -> int: + return self._config.num_hidden_layers + + @property + def model_dim(self) -> int: + return self._config.hidden_size + + @property + def vocab_size(self) -> int: + return self._config.vocab_size + + @property + def head_size(self) -> int: + return self.model_dim // self.n_heads + + @property + def n_heads(self) -> int: + return self._config.num_attention_heads + + @property + def intermediate_dim(self) -> int: + return self._config.ffn_dim + + @property + def activation_dtype(self) -> DtypeEnum: + if self._config.torch_dtype == torch.float16: + return DtypeEnum.fp16 + elif self._config.torch_dtype == torch.bfloat16: + return DtypeEnum.bf16 + else: + raise NotImplementedError("Only fp16 and bf16 are supported") + + @property + def mlp_activation_fn(self) -> ActivationType: + return ActivationType.RELU + + @property + def norm_type(self) -> NormTypeEnum: + return NormTypeEnum.LayerNorm + + @property + def positional_embedding_type(self) -> PositionalEmbeddingType: + return PositionalEmbeddingType.none + + @property + def positional_embedding_config(self) -> Optional[RotateHalfConfig]: + return None + + """ + Overrides of ``DSTransformerModelBase`` methods + """ + + def make_embedding_layer(self) -> None: + """ + Performs setup and creates embedding DSModule. Since OPT includes trained + positional embeddings, we will override the base model implementation. + """ + + embed_config = DSEmbeddingsConfig(max_tokens=self._engine_config.state_manager.max_ragged_batch_size, + residual_dtype=self.activation_dtype, + embedding_dim=self.model_dim, + positional_embedding=True, + positional_offset=2) + + self.embed = instantiate_embed(embed_config, self._engine_config) + + """ + Forward implementations + """ + + def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor: + embed = self.embed(ragged_batch, self._non_transformer.word_emb, self._non_transformer.word_emb_pos) + if embed.shape[-1] != self.model_dim: + raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}") + + return embed + + def _forward_transformer_layer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor, + ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]: + # TODO(cmikeh2): Distribute ragged_batch_info to all modules + + cur_params = self._transformer[layer_idx] + kv_cache = self.state_manager.get_cache(layer_idx) + + hidden_states = self.qkv(hidden_states, cur_params.qkv_w, b=cur_params.qkv_b) + hidden_states = self.attn(hidden_states, kv_cache, ragged_batch_info) + hidden_states = self.attn_out(hidden_states, cur_params.attn_out_w, b=cur_params.attn_out_b) + + if self.tp_size > 1: + dist.all_reduce(hidden_states, group=self._base_mp_group) + + residual, hidden_states = self.norm(residual, + hidden_states, + cur_params.mlp_norm_gamma, + beta=cur_params.mlp_norm_beta) + + # Should be configurable in the future + hidden_states = self.mlp_1(hidden_states, cur_params.mlp_1_w, b=cur_params.mlp_1_b) + hidden_states = self.mlp_2(hidden_states, cur_params.mlp_2_w, b=cur_params.mlp_2_b) + + if self.tp_size > 1: + dist.all_reduce(hidden_states, group=self._base_mp_group) + + if layer_idx != self.num_layers - 1: + next_params = self._transformer[layer_idx + 1] + residual, hidden_states = self.norm(residual, + hidden_states, + next_params.attn_norm_gamma, + beta=next_params.attn_norm_beta) + else: + # On last layer, we just need to perform the residual add. Adding into the residual + # here is safe. + residual.add_(hidden_states) + + return residual, hidden_states + + def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor: + logits = self.unembed(hidden_states, + self._non_transformer.word_unembed, + ragged_batch_info, + gamma=self._non_transformer.final_norm_w, + beta=self._non_transformer.final_norm_b) + + if self.tp_size > 1: + comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1])) + full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size)) + + dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group) + + full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size)) + + return full_logits + else: + return logits + + def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor: + + residual = self._forward_embed(wrapped_batch) + + residual, hidden_states = self.norm(residual, + None, + self._transformer[0].attn_norm_gamma, + beta=self._transformer[0].attn_norm_beta) + + for layer_idx in range(self.num_layers): + residual, hidden_states = self._forward_transformer_layer(layer_idx, residual, hidden_states, + wrapped_batch) + + return self._forward_unembed(residual, wrapped_batch) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/policy.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/policy.py new file mode 100644 index 0000000000000000000000000000000000000000..d57d5beb48d515fbd6d4c5a0741edf1a4867136f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/policy.py @@ -0,0 +1,30 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Any + +from ...config_v2 import RaggedInferenceEngineConfig +from ..inference_policy_base import ContainerMap, InferenceV2Policy +from .container import OPTNonTransformerContainer, OPTTransformerContainer +from .model import OPTInferenceModel + + +class OPTPolicy(InferenceV2Policy): + + def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> OPTInferenceModel: + return OPTInferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group) + + def build_container_map(self) -> ContainerMap: + map = ContainerMap() + + transformer_containers = [OPTTransformerContainer(self.model) for _ in range(self.model.num_layers)] + + map.set_transformer_params(['model.decoder.layers', 'decoder.layers'], transformer_containers) + + map.set_non_transformer_params(OPTNonTransformerContainer(self.model)) + + map.set_unmapped_params(['lm_head.weight']) + + return map diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3ab107e75a9147f30176f9e3f6bc575898d8e572 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .policy import PhiPolicy diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0c6cbec56eea9fe208c44377b6987e06de5bbba Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__pycache__/containers.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__pycache__/containers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..09032973e2424bf7e2295999f20a4c6298c5f4fb Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__pycache__/containers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__pycache__/model.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2a3bb2ba69b960afcb68c9e4a9b5fc0abe160259 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__pycache__/model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__pycache__/policy.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__pycache__/policy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8c0b1f57442ba65de730bee1d8bfe72ddc7c33f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/__pycache__/policy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/containers.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/containers.py new file mode 100644 index 0000000000000000000000000000000000000000..21f07eb8c99a037243086688b551bbe3fa9ec3d6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/containers.py @@ -0,0 +1,91 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# Create a container object to save model-specific tensors using the policy file above. + +from ..common_parameters import * +from ..layer_container_base import LayerContainer +''' + # HF Phi-2 model looks like this: + +PhiForCausalLM( + (model): PhiModel( + (embed_tokens): Embedding(51200, 2560) + (embed_dropout): Dropout(p=0.0, inplace=False) + (layers): ModuleList( + (0-31): 32 x PhiDecoderLayer( + (self_attn): PhiAttention( + (q_proj): Linear(in_features=2560, out_features=2560, bias=True) + (k_proj): Linear(in_features=2560, out_features=2560, bias=True) + (v_proj): Linear(in_features=2560, out_features=2560, bias=True) + (dense): Linear(in_features=2560, out_features=2560, bias=True) + (rotary_emb): PhiRotaryEmbedding() + ) + (mlp): PhiMLP( + (activation_fn): NewGELUActivation() + (fc1): Linear(in_features=2560, out_features=10240, bias=True) + (fc2): Linear(in_features=10240, out_features=2560, bias=True) + ) + (input_layernorm): LayerNorm((2560,), eps=1e-05, elementwise_affine=True) + (resid_dropout): Dropout(p=0.1, inplace=False) + ) + ) + (final_layernorm): LayerNorm((2560,), eps=1e-05, elementwise_affine=True) + ) + (lm_head): Linear(in_features=2560, out_features=51200, bias=True) +) +''' + + +class PhiTransformerContainer(LayerContainer): + """ + Transformer layer container for the Phi model. + """ + qkv_w: UnfusedQKVParameter + qkv_b: UnfusedQKVParameter + attn_out_w: AttentionOutputParameter + attn_out_b: AttentionOutputParameter + mlp_1_w: MLP1Parameter + mlp_1_b: MLP1Parameter + mlp_2_w: MLP2Parameter + mlp_2_b: MLP2Parameter + ln_gamma: NormParameter + ln_beta: NormParameter + + PARAM_MAPPING = { + "self_attn.q_proj.weight": "qkv_w.q_params", + "self_attn.k_proj.weight": "qkv_w.k_params", + "self_attn.v_proj.weight": "qkv_w.v_params", + "self_attn.q_proj.bias": "qkv_b.q_params", + "self_attn.k_proj.bias": "qkv_b.k_params", + "self_attn.v_proj.bias": "qkv_b.v_params", + "self_attn.dense.weight": "attn_out_w.params", + "self_attn.dense.bias": "attn_out_b.params", + "mlp.fc1.weight": "mlp_1_w.params", + "mlp.fc1.bias": "mlp_1_b.params", + "mlp.fc2.weight": "mlp_2_w.params", + "mlp.fc2.bias": "mlp_2_b.params", + "input_layernorm.weight": "ln_gamma.params", + "input_layernorm.bias": "ln_beta.params", + } + + +class PhiNonTransformerContainer(LayerContainer): + """ + Non-Transformer layer container for the Phi model. + """ + word_emb: EmbeddingParameter + word_unembed_w: UnembedParameter + word_unembed_b: UnembedParameter + final_norm_gamma: NormParameter + final_norm_beta: NormParameter + + PARAM_MAPPING = { + "model.embed_tokens.weight": "word_emb.params", + "model.final_layernorm.weight": "final_norm_gamma.params", + "model.final_layernorm.bias": "final_norm_beta.params", + "lm_head.weight": "word_unembed_w.params", + "lm_head.bias": "word_unembed_b.params", + } diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/model.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/model.py new file mode 100644 index 0000000000000000000000000000000000000000..2d5826810cb57bfb7fe9b7df75edf1cf947bd1f8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/model.py @@ -0,0 +1,199 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Iterable, Optional, Tuple + +import torch + +import deepspeed.comm as dist + +from ...allocator import empty_from +from ...inference_utils import ActivationType, DtypeEnum +from .. import * +from ...modules.configs import * +from ...modules.interfaces import * +from ...ragged import RaggedBatchWrapper + +from .containers import PhiNonTransformerContainer, PhiTransformerContainer + + +class PhiInferenceModel(DSTransformerModelBase): + """ + Inference model implementation for ragged batching for Llama-2 models. + """ + + _non_transformer: Optional[PhiNonTransformerContainer] + """ + Embed + unembed container. Specializing the type annotation. + """ + + _transformer: Optional[Iterable[PhiTransformerContainer]] + """ + Per-layer transformer container. Specializing the type annotation. + """ + """ + Properties inherited from `DSInferenceModelBase` + """ + + @property + def max_sequence_length(self) -> int: + return self._config.max_seq_length + + """ + Properties inherited from `DSTransformerModelBase` + """ + + @property + def num_layers(self) -> int: + return self._config.num_hidden_layers + + @property + def model_dim(self) -> int: + return self._config.hidden_size + + @property + def vocab_size(self) -> int: + return self._config.vocab_size + + @property + def head_size(self) -> int: + return self.model_dim // self.n_heads + + @property + def n_heads(self) -> int: + return self._config.num_attention_heads + + @property + def intermediate_dim(self) -> int: + return self._config.intermediate_size + + @property + def n_heads_kv(self) -> int: + return self._config.num_key_value_heads + + @property + def activation_dtype(self) -> DtypeEnum: + if self._config.torch_dtype == torch.float16: + return DtypeEnum.fp16 + elif self._config.torch_dtype == torch.bfloat16: + return DtypeEnum.bf16 + else: + raise NotImplementedError("Only fp16 and bf16 are supported") + + @property + def mlp_activation_fn(self) -> ActivationType: + return ActivationType.GELU + + @property + def norm_type(self) -> NormTypeEnum: + return NormTypeEnum.LayerNorm + + @property + def positional_embedding_type(self) -> PositionalEmbeddingType: + return PositionalEmbeddingType.rotate_half + + @property + def positional_embedding_config(self) -> Optional[RotateHalfConfig]: + rotary_dim = int(self._config.partial_rotary_factor * self.head_size) + return RotateHalfConfig(rotate_dim=rotary_dim, theta_base=self._config.rope_theta) + + """ + Forward implementations + """ + + def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor: + """ + Performs the embedding lookup prior to running the transformer of the model. + + Arguments: + ragged_batch (RaggedBatchWrapper): The batch to embed. + + Returns: + torch.Tensor: The embedded batch. + """ + embed = self.embed(ragged_batch, self._non_transformer.word_emb) + + if embed.shape[-1] != self.model_dim: + raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}") + + return embed + + def _forward_transformer_layer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor, + ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Executes one (slightly offset) layer of the transformer. This implementation does a peak-ahead + optimization to fuse the layer norm of the next layer into the current layer. + + Arguments: + layer_idx (int): The index of the layer to execute. + residual (torch.Tensor): The residual tensor from the previous layer. + hidden_states (torch.Tensor): The hidden states from the previous layer. This is the + hidden states after pre normalization. + ragged_batch_info (RaggedBatchWrapper): The batch metadata. + """ + cur_params = self._transformer[layer_idx] + kv_cache = self.state_manager.get_cache(layer_idx) + + attn_ln_out = hidden_states + attn_hidden_state = self.qkv(attn_ln_out, cur_params.qkv_w, b=cur_params.qkv_b) + attn_hidden_state = self.attn(attn_hidden_state, kv_cache, ragged_batch_info) + attention_output = self.attn_out(attn_hidden_state, cur_params.attn_out_w, b=cur_params.attn_out_b) + + mlp_ln_out = hidden_states + mlp_hidden_state = self.mlp_1(mlp_ln_out, cur_params.mlp_1_w, b=cur_params.mlp_1_b) + mlp_output = self.mlp_2(mlp_hidden_state, cur_params.mlp_2_w, b=cur_params.mlp_2_b) + + mlp_output.add_(attention_output) + + if self.tp_size > 1: + dist.all_reduce(mlp_output, group=self._base_mp_group) + + if layer_idx != self.num_layers - 1: + next_params = self._transformer[layer_idx + 1] + residual, mlp_output = self.norm(residual, mlp_output, next_params.ln_gamma, beta=next_params.ln_beta) + else: + # On last layer, we just need to perform the residual add. Adding into the residual + # here is safe. + residual.add_(mlp_output) + + return residual, mlp_output + + def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor: + """ + Performs unembedding of the hidden states to logits. This will only sample the final + token of each sequence. + """ + logits = self.unembed(hidden_states, + self._non_transformer.word_unembed_w, + ragged_batch_info, + bias=self._non_transformer.word_unembed_b, + gamma=self._non_transformer.final_norm_gamma, + beta=self._non_transformer.final_norm_beta) + + if self.tp_size > 1: + comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1])) + full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size)) + + dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group) + + full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size)) + + return full_logits + else: + return logits + + def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor: + residual = self._forward_embed(wrapped_batch) + + residual, hidden_states = self.norm(residual, + None, + gamma=self._transformer[0].ln_gamma, + beta=self._transformer[0].ln_beta) + + for layer_idx in range(self.num_layers): + residual, hidden_states = self._forward_transformer_layer(layer_idx, residual, hidden_states, + wrapped_batch) + + return self._forward_unembed(residual, wrapped_batch) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/policy.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/policy.py new file mode 100644 index 0000000000000000000000000000000000000000..4b081a8e61bde9304c8f1bee920bb8729f1c6aef --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/phi/policy.py @@ -0,0 +1,32 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Any + +from ...config_v2 import RaggedInferenceEngineConfig +from ..inference_policy_base import ContainerMap, InferenceV2Policy +from .containers import PhiNonTransformerContainer, PhiTransformerContainer +from .model import PhiInferenceModel + + +class PhiPolicy(InferenceV2Policy): + + def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> PhiInferenceModel: + return PhiInferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group) + + def build_container_map(self) -> ContainerMap: + map = ContainerMap() + + trans_container_cls = PhiTransformerContainer + transformer_containers = [trans_container_cls(self.model) for _ in range(self.model.num_layers)] + + map.set_transformer_params(['model.layers'], transformer_containers) + + map.set_non_transformer_params(PhiNonTransformerContainer(self.model)) + + map.set_unmapped_params( + [f'model.layers.{i}.self_attn.rotary_emb.inv_freq' for i in range(self.model.num_layers)]) + + return map diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..18206048fa299b8ced074aba9ea7f5212d3e0e3b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .policy import QwenPolicy diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f50761871eaa65c8be756f9adbf8dde9de068950 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/container.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/container.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bd8df1681e159403eea976a880a41c44814b5982 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/container.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/model.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c62561c2075bd57151047b541df724f05c11a60 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/policy.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/policy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ff3e250a553dd6fb9ab37ec2174009d2555824a Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/__pycache__/policy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/container.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/container.py new file mode 100644 index 0000000000000000000000000000000000000000..313de68555b90f1dd38935de286d93f9cbd8b382 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/container.py @@ -0,0 +1,77 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# Create a container object to save model-specific tensors using the policy file above. + +from ..common_parameters import * +from ..layer_container_base import LayerContainer +''' + # HF Qwen model looks like this: + +QWenLMHeadModel( + (transformer): QWenModel( + (wte): Embedding(151936, 4096) + (drop): Dropout(p=0.0, inplace=False) + (rotary_emb): RotaryEmbedding() + (h): ModuleList( + (0-31): 32 x QWenBlock( + (ln_1): RMSNorm() + (attn): QWenAttention( + (c_attn): Linear(in_features=4096, out_features=12288, bias=True) + (c_proj): Linear(in_features=4096, out_features=4096, bias=False) + (attn_dropout): Dropout(p=0.0, inplace=False) + ) + (ln_2): RMSNorm() + (mlp): QWenMLP( + (w1): Linear(in_features=4096, out_features=11008, bias=False) + (w2): Linear(in_features=4096, out_features=11008, bias=False) + (c_proj): Linear(in_features=11008, out_features=4096, bias=False) + ) + ) + ) + (ln_f): RMSNorm() + ) + (lm_head): Linear(in_features=4096, out_features=151936, bias=False) +) +''' + + +class QwenTransformerContainer(LayerContainer): + """ + Transformer layer container for the Qwen model. + """ + qkv_w: FusedQKVParameter + qkv_b: FusedQKVParameter + attn_out_w: AttentionOutputParameter + mlp_1_w: GatedMLPParameter + mlp_2_w: MLP2Parameter + attn_norm_gamma: NormParameter + mlp_norm_gamma: NormParameter + + PARAM_MAPPING = { + "attn.c_attn.weight": "qkv_w.params", + "attn.c_attn.bias": "qkv_b.params", + "attn.c_proj.weight": "attn_out_w.params", + "mlp.w1.weight": "mlp_1_w.up_params", + "mlp.w2.weight": "mlp_1_w.gate_params", + "mlp.c_proj.weight": "mlp_2_w.params", + "ln_1.weight": "attn_norm_gamma.params", + "ln_2.weight": "mlp_norm_gamma.params", + } + + +class QwenNonTransformerContainer(LayerContainer): + """ + Non-Transformer layer container for the Qwen model. + """ + word_emb: EmbeddingParameter + word_unembed: UnembedParameter + final_norm: NormParameter + + PARAM_MAPPING = { + "transformer.wte.weight": "word_emb.params", + "transformer.ln_f.weight": "final_norm.params", + "lm_head.weight": "word_unembed.params", + } diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/model.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/model.py new file mode 100644 index 0000000000000000000000000000000000000000..e867e4be67133cb512c737db3a3c45cb2294e404 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/model.py @@ -0,0 +1,223 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Iterable, Optional, Tuple + +import torch + +import deepspeed.comm as dist + +from ...allocator import empty_from +from ...inference_utils import ActivationType, DtypeEnum +from .. import * +from ...modules.configs import * +from ...modules.interfaces import * +from ...modules import heuristics +from ...ragged import RaggedBatchWrapper + +from .container import QwenNonTransformerContainer, QwenTransformerContainer + + +class QwenInferenceModel(DSTransformerModelBase): + """ + Inference model implementation for ragged batching for Llama-2 models. + """ + + _non_transformer: Optional[QwenNonTransformerContainer] + """ + Embed + unembed container. Specializing the type annotation. + """ + + _transformer: Optional[Iterable[QwenTransformerContainer]] + """ + Per-layer transformer container. Specializing the type annotation. + """ + """ + Properties ineherited from `DSInferenceModelBase` + """ + + @property + def max_sequence_length(self) -> int: + return self._config.max_seq_length + + """ + Properties ineherited from `DSTransformerModelBase` + """ + + @property + def num_layers(self) -> int: + return self._config.num_hidden_layers + + @property + def model_dim(self) -> int: + return self._config.hidden_size + + @property + def vocab_size(self) -> int: + return self._config.vocab_size + + @property + def head_size(self) -> int: + return self.model_dim // self.n_heads + + @property + def n_heads(self) -> int: + return self._config.num_attention_heads + + @property + def intermediate_dim(self) -> int: + return self._config.intermediate_size // 2 + + @property + def n_heads_kv(self) -> int: + return self._config.hidden_size // self._config.kv_channels + + @property + def activation_dtype(self) -> DtypeEnum: + autoset_precision = self._config.bf16 + self._config.fp16 == 0 + if autoset_precision: + return DtypeEnum.fp16 + if self._config.fp16: + return DtypeEnum.fp16 + elif self._config.bf16: + # TODO(ZonePG): bf16 inference results may be different from huggingface bf16, + # because in rms_norm, Qwen still use float() instead of bf16 + return DtypeEnum.bf16 + else: + raise NotImplementedError("Only fp16 and bf16 are supported") + + @property + def mlp_activation_fn(self) -> ActivationType: + return ActivationType.SiGLU + + @property + def norm_type(self) -> NormTypeEnum: + return NormTypeEnum.RMSNorm + + @property + def positional_embedding_type(self) -> PositionalEmbeddingType: + return PositionalEmbeddingType.rotate_half + + @property + def positional_embedding_config(self) -> Optional[RotateHalfConfig]: + return RotateHalfConfig(theta_base=self._config.rotary_emb_base) + + def make_norm_layer(self) -> None: + """ + Instantiates the normalization layer for the model. This sets the `self.norm` attribute. + + TODO(cmikeh2): In the future we'll distinguish between the different norm objects, + but for now we'll just use the same one for all of them. + """ + norm_config = DSNormConfig( + max_tokens=self._engine_config.state_manager.max_ragged_batch_size, + type=self.norm_type, + channels=self.model_dim, + residual_dtype=self.activation_dtype, + input_dtype=self.activation_dtype, + output_dtype=self.activation_dtype, + eps=self._config.layer_norm_epsilon, + ) + + self.norm = heuristics.instantiate_pre_norm(norm_config, self._engine_config) + + """ + Forward implementations + """ + + def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor: + """ + Performs the embedding lookup prior to running the transformer of the model. + + Arguments: + ragged_batch (RaggedBatchWrapper): The batch to embed. + + Returns: + torch.Tensor: The embedded batch. + """ + embed = self.embed(ragged_batch, self._non_transformer.word_emb) + + if embed.shape[-1] != self.model_dim: + raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}") + + return embed + + def _forward_transformer_layer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor, + ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Executes one (slightly offset) layer of the transformer. This implementation does a peak-ahead + optimization to fuse the layer norm of the next layer into the current layer. + + Arguments: + layer_idx (int): The index of the layer to execute. + residual (torch.Tensor): The residual tensor from the previous layer. + hidden_states (torch.Tensor): The hidden states from the previous layer. This is the + hidden states after pre normalization. + ragged_batch_info (RaggedBatchWrapper): The batch metadata. + """ + # TODO(cmikeh2): Distribute ragged_batch_info to all modules + + cur_params = self._transformer[layer_idx] + kv_cache = self.state_manager.get_cache(layer_idx) + + hidden_states = self.qkv(hidden_states, cur_params.qkv_w, b=cur_params.qkv_b) + hidden_states = self.attn(hidden_states, kv_cache, ragged_batch_info) + hidden_states = self.attn_out(hidden_states, cur_params.attn_out_w, b=None) + + if self.tp_size > 1: + dist.all_reduce(hidden_states, group=self._base_mp_group) + + residual, hidden_states = self.norm(residual, hidden_states, cur_params.mlp_norm_gamma, beta=None) + + # Should be configurable in the future + hidden_states = self.mlp_1(hidden_states, cur_params.mlp_1_w, b=None) + hidden_states = self.mlp_2(hidden_states, cur_params.mlp_2_w, b=None) + + if self.tp_size > 1: + dist.all_reduce(hidden_states, group=self._base_mp_group) + + if layer_idx != self.num_layers - 1: + next_params = self._transformer[layer_idx + 1] + residual, hidden_states = self.norm(residual, hidden_states, next_params.attn_norm_gamma, beta=None) + else: + # On last layer, we just need to perform the residual add. Adding into the residual + # here is safe. + residual.add_(hidden_states) + + return residual, hidden_states + + def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor: + """ + Performs unembedding of the hidden states to logits. This will only sample the final + token of each sequence. + """ + logits = self.unembed(hidden_states, + self._non_transformer.word_unembed, + ragged_batch_info, + gamma=self._non_transformer.final_norm) + + if self.tp_size > 1: + comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1])) + full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size)) + + dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group) + + full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size)) + + return full_logits + else: + return logits + + def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor: + + residual = self._forward_embed(wrapped_batch) + + residual, hidden_states = self.norm(residual, None, self._transformer[0].attn_norm_gamma, beta=None) + + for layer_idx in range(self.num_layers): + residual, hidden_states = self._forward_transformer_layer(layer_idx, residual, hidden_states, + wrapped_batch) + + return self._forward_unembed(residual, wrapped_batch) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/policy.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/policy.py new file mode 100644 index 0000000000000000000000000000000000000000..a9263f553621ec715cc5527e95034b842be0132b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen/policy.py @@ -0,0 +1,30 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Any + +from ...config_v2 import RaggedInferenceEngineConfig +from ..inference_policy_base import ContainerMap, InferenceV2Policy +from .container import QwenNonTransformerContainer, QwenTransformerContainer +from .model import QwenInferenceModel + + +class QwenPolicy(InferenceV2Policy): + + def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> QwenInferenceModel: + return QwenInferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group) + + def build_container_map(self) -> ContainerMap: + map = ContainerMap() + + transformer_containers = [QwenTransformerContainer(self.model) for _ in range(self.model.num_layers)] + + map.set_transformer_params(['transformer.h'], transformer_containers) + + map.set_non_transformer_params(QwenNonTransformerContainer(self.model)) + + map.set_unmapped_params(['transformer.rotary_emb.inv_freq']) + + return map diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..80b09757c74db181e5a7729579c89d530f65f25c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .policy import Qwen2Policy diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..029c8f4b7649b4df5673e1065748d30c7e3e1405 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__pycache__/container.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__pycache__/container.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a4a6f73a552d06485a8498c41ff87613e1f9273d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__pycache__/container.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__pycache__/model.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eaea6ef8af570ce4d1f553f0734e9b2c20f9dc90 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__pycache__/model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__pycache__/policy.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__pycache__/policy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b13179e1b4ed7971eb76c631c4c33a6c67ea883d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/__pycache__/policy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/container.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/container.py new file mode 100644 index 0000000000000000000000000000000000000000..6556d87d6afb2c2d36781d9af8c4a09ce035aee1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/container.py @@ -0,0 +1,82 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# Create a container object to save model-specific tensors using the policy file above. + +from ..common_parameters import * +from ..layer_container_base import LayerContainer +''' + # HF Qwen2 model looks like this: + +Qwen2ForCausalLM( + (model): Qwen2Model( + (embed_tokens): Embedding(151936, 1024) + (layers): ModuleList( + (0-23): 24 x Qwen2DecoderLayer( + (self_attn): Qwen2SdpaAttention( + (q_proj): Linear(in_features=1024, out_features=1024, bias=True) + (k_proj): Linear(in_features=1024, out_features=1024, bias=True) + (v_proj): Linear(in_features=1024, out_features=1024, bias=True) + (o_proj): Linear(in_features=1024, out_features=1024, bias=False) + (rotary_emb): Qwen2RotaryEmbedding() + ) + (mlp): Qwen2MLP( + (gate_proj): Linear(in_features=1024, out_features=2816, bias=False) + (up_proj): Linear(in_features=1024, out_features=2816, bias=False) + (down_proj): Linear(in_features=2816, out_features=1024, bias=False) + (act_fn): SiLU() + ) + (input_layernorm): Qwen2RMSNorm() + (post_attention_layernorm): Qwen2RMSNorm() + ) + ) + (norm): Qwen2RMSNorm() + ) + (lm_head): Linear(in_features=1024, out_features=151936, bias=False) +) +''' + + +class Qwen2TransformerContainer(LayerContainer): + """ + Transformer layer container for the Qwen2 model. + """ + qkv_w: UnfusedQKVParameter + qkv_b: UnfusedQKVParameter + attn_out_w: AttentionOutputParameter + mlp_1_w: GatedMLPParameter + mlp_2_w: MLP2Parameter + attn_norm_gamma: NormParameter + mlp_norm_gamma: NormParameter + + PARAM_MAPPING = { + "self_attn.q_proj.weight": "qkv_w.q_params", + "self_attn.k_proj.weight": "qkv_w.k_params", + "self_attn.v_proj.weight": "qkv_w.v_params", + "self_attn.q_proj.bias": "qkv_b.q_params", + "self_attn.k_proj.bias": "qkv_b.k_params", + "self_attn.v_proj.bias": "qkv_b.v_params", + "self_attn.o_proj.weight": "attn_out_w.params", + "mlp.gate_proj.weight": "mlp_1_w.gate_params", + "mlp.up_proj.weight": "mlp_1_w.up_params", + "mlp.down_proj.weight": "mlp_2_w.params", + "input_layernorm.weight": "attn_norm_gamma.params", + "post_attention_layernorm.weight": "mlp_norm_gamma.params", + } + + +class Qwen2NonTransformerContainer(LayerContainer): + """ + Non-Transformer layer container for the Qwen2 model. + """ + word_emb: EmbeddingParameter + word_unembed: UnembedParameter + final_norm: NormParameter + + PARAM_MAPPING = { + "model.embed_tokens.weight": "word_emb.params", + "model.norm.weight": "final_norm.params", + "lm_head.weight": "word_unembed.params", + } diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/model.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/model.py new file mode 100644 index 0000000000000000000000000000000000000000..d535462a954d4156ff32f2d3667a6a17713b4871 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/model.py @@ -0,0 +1,221 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Iterable, Optional, Tuple + +import torch + +import deepspeed.comm as dist + +from ...allocator import empty_from +from ...inference_utils import ActivationType, DtypeEnum +from .. import * +from ...modules.configs import * +from ...modules.interfaces import * +from ...modules import heuristics +from ...ragged import RaggedBatchWrapper + +from .container import Qwen2NonTransformerContainer, Qwen2TransformerContainer + + +class Qwen2InferenceModel(DSTransformerModelBase): + """ + Inference model implementation for ragged batching for Llama-2 models. + """ + + _non_transformer: Optional[Qwen2NonTransformerContainer] + """ + Embed + unembed container. Specializing the type annotation. + """ + + _transformer: Optional[Iterable[Qwen2TransformerContainer]] + """ + Per-layer transformer container. Specializing the type annotation. + """ + """ + Properties ineherited from `DSInferenceModelBase` + """ + + @property + def max_sequence_length(self) -> int: + return self._config.max_seq_length + + """ + Properties ineherited from `DSTransformerModelBase` + """ + + @property + def num_layers(self) -> int: + return self._config.num_hidden_layers + + @property + def model_dim(self) -> int: + return self._config.hidden_size + + @property + def vocab_size(self) -> int: + return self._config.vocab_size + + @property + def head_size(self) -> int: + return self.model_dim // self.n_heads + + @property + def n_heads(self) -> int: + return self._config.num_attention_heads + + @property + def intermediate_dim(self) -> int: + return self._config.intermediate_size + + @property + def n_heads_kv(self) -> int: + return self._config.num_key_value_heads + + @property + def activation_dtype(self) -> DtypeEnum: + # TODO(ZonePG): bf16 inference results may be different from huggingface bf16, + # because in rms_norm, Qwen still use float() instead of bf16 + # if self._config.torch_dtype == torch.float16: + # return DtypeEnum.fp16 + # elif self._config.torch_dtype == torch.bfloat16: + # return DtypeEnum.bf16 + # else: + # raise NotImplementedError("Only fp16 and bf16 are supported") + return DtypeEnum.fp16 + + @property + def mlp_activation_fn(self) -> ActivationType: + return ActivationType.SiGLU + + @property + def norm_type(self) -> NormTypeEnum: + return NormTypeEnum.RMSNorm + + @property + def positional_embedding_type(self) -> PositionalEmbeddingType: + return PositionalEmbeddingType.rotate_half + + @property + def positional_embedding_config(self) -> Optional[RotateHalfConfig]: + return RotateHalfConfig(theta_base=self._config.rope_theta) + + def make_norm_layer(self) -> None: + """ + Instantiates the normalization layer for the model. This sets the `self.norm` attribute. + + TODO(cmikeh2): In the future we'll distinguish between the different norm objects, + but for now we'll just use the same one for all of them. + """ + norm_config = DSNormConfig( + max_tokens=self._engine_config.state_manager.max_ragged_batch_size, + type=self.norm_type, + channels=self.model_dim, + residual_dtype=self.activation_dtype, + input_dtype=self.activation_dtype, + output_dtype=self.activation_dtype, + eps=self._config.rms_norm_eps, + ) + + self.norm = heuristics.instantiate_pre_norm(norm_config, self._engine_config) + + """ + Forward implementations + """ + + def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor: + """ + Performs the embedding lookup prior to running the transformer of the model. + + Arguments: + ragged_batch (RaggedBatchWrapper): The batch to embed. + + Returns: + torch.Tensor: The embedded batch. + """ + embed = self.embed(ragged_batch, self._non_transformer.word_emb) + + if embed.shape[-1] != self.model_dim: + raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}") + + return embed + + def _forward_transformer_layer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor, + ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Executes one (slightly offset) layer of the transformer. This implementation does a peak-ahead + optimization to fuse the layer norm of the next layer into the current layer. + + Arguments: + layer_idx (int): The index of the layer to execute. + residual (torch.Tensor): The residual tensor from the previous layer. + hidden_states (torch.Tensor): The hidden states from the previous layer. This is the + hidden states after pre normalization. + ragged_batch_info (RaggedBatchWrapper): The batch metadata. + """ + # TODO(cmikeh2): Distribute ragged_batch_info to all modules + + cur_params = self._transformer[layer_idx] + kv_cache = self.state_manager.get_cache(layer_idx) + + hidden_states = self.qkv(hidden_states, cur_params.qkv_w, b=cur_params.qkv_b) + hidden_states = self.attn(hidden_states, kv_cache, ragged_batch_info) + hidden_states = self.attn_out(hidden_states, cur_params.attn_out_w, b=None) + + if self.tp_size > 1: + dist.all_reduce(hidden_states, group=self._base_mp_group) + + residual, hidden_states = self.norm(residual, hidden_states, cur_params.mlp_norm_gamma, beta=None) + + # Should be configurable in the future + hidden_states = self.mlp_1(hidden_states, cur_params.mlp_1_w, b=None) + hidden_states = self.mlp_2(hidden_states, cur_params.mlp_2_w, b=None) + + if self.tp_size > 1: + dist.all_reduce(hidden_states, group=self._base_mp_group) + + if layer_idx != self.num_layers - 1: + next_params = self._transformer[layer_idx + 1] + residual, hidden_states = self.norm(residual, hidden_states, next_params.attn_norm_gamma, beta=None) + else: + # On last layer, we just need to perform the residual add. Adding into the residual + # here is safe. + residual.add_(hidden_states) + + return residual, hidden_states + + def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor: + """ + Performs unembedding of the hidden states to logits. This will only sample the final + token of each sequence. + """ + logits = self.unembed(hidden_states, + self._non_transformer.word_unembed, + ragged_batch_info, + gamma=self._non_transformer.final_norm) + + if self.tp_size > 1: + comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1])) + full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size)) + + dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group) + + full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size)) + + return full_logits + else: + return logits + + def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor: + + residual = self._forward_embed(wrapped_batch) + + residual, hidden_states = self.norm(residual, None, self._transformer[0].attn_norm_gamma, beta=None) + + for layer_idx in range(self.num_layers): + residual, hidden_states = self._forward_transformer_layer(layer_idx, residual, hidden_states, + wrapped_batch) + + return self._forward_unembed(residual, wrapped_batch) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/policy.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/policy.py new file mode 100644 index 0000000000000000000000000000000000000000..9c5db2ba0065e0b545464c3ffe89095cb0e03148 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/qwen_v2/policy.py @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Any + +from ...config_v2 import RaggedInferenceEngineConfig +from ..inference_policy_base import ContainerMap, InferenceV2Policy +from .container import Qwen2NonTransformerContainer, Qwen2TransformerContainer +from .model import Qwen2InferenceModel + + +class Qwen2Policy(InferenceV2Policy): + + def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> Qwen2InferenceModel: + return Qwen2InferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group) + + def build_container_map(self) -> ContainerMap: + map = ContainerMap() + + transformer_containers = [Qwen2TransformerContainer(self.model) for _ in range(self.model.num_layers)] + + map.set_transformer_params(['model.layers'], transformer_containers) + + map.set_non_transformer_params(Qwen2NonTransformerContainer(self.model)) + + map.set_unmapped_params( + [f'model.layers.{i}.self_attn.rotary_emb.inv_freq' for i in range(self.model.num_layers)]) + + return map diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..63421bc1c622822f3975680bdbbcffe66b261c4d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .attn import * +from .attn_out import * +from .embedding import * +from .mlp import * +from .qkv import * +from .types import * +from .unembed import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/attn.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/attn.py new file mode 100644 index 0000000000000000000000000000000000000000..de8d6f6ac4c56dc591ea1dce59ef3b7e4f8b2b8b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/attn.py @@ -0,0 +1,63 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Optional, Tuple + + +def get_local_heads(shard_rank: int, + num_shards: int, + n_heads_q: int, + n_heads_kv: Optional[int] = None) -> Tuple[int, int]: + """ + Helper to determine the number of local heads of a given shard. + + Args: + shard_rank (int): The rank of the shard. + num_shards (int): The total number of shards that attention is distributed over. + n_heads_q (int): The number of query heads. + n_heads_kv (int): The number of key/value heads. If not passed, it is assumed that + the number of query and key/value heads are the same. + """ + if n_heads_q < num_shards: + raise ValueError("There must be at least as many attention heads as there are shards.") + + if n_heads_kv is None or n_heads_kv == n_heads_q: + # MHA attention + base_heads = n_heads_q // num_shards + extra_heads = n_heads_q % num_shards + + if shard_rank < extra_heads: + return (base_heads + 1), (base_heads + 1) + else: + return base_heads, base_heads + else: + # GQA attention + if n_heads_q % n_heads_kv != 0: + raise ValueError("Must be an even ratio between query and key/value heads.") + + if n_heads_kv < num_shards and num_shards % n_heads_kv != 0: + raise ValueError( + "If splitting a group across multiple shards, we must be able to distribute the groups evenly.") + + if n_heads_kv >= num_shards and n_heads_kv % num_shards != 0: + raise ValueError("If parallelizing groups, must be able to evenly distribute them.") + + q_ratio = n_heads_q // n_heads_kv + + if n_heads_kv >= num_shards: + local_kv_heads = n_heads_kv // num_shards + local_q_heads = local_kv_heads * q_ratio + return local_q_heads, local_kv_heads + else: + group_sharding_size = num_shards // n_heads_kv + group_rank_idx = shard_rank % group_sharding_size + + base_heads = q_ratio // group_sharding_size + extra_heads = q_ratio % group_sharding_size + + if group_rank_idx < extra_heads: + return (base_heads + 1), 1 + else: + return base_heads, 1 diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/attn_out.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/attn_out.py new file mode 100644 index 0000000000000000000000000000000000000000..ce7c105531eaba2c3948b385282b2bd1c3358b5f --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/attn_out.py @@ -0,0 +1,111 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Optional + +import torch + +from .types import ShardingType +from .utils import shard_param, get_shard_endpoints + + +def shard_attn_out_param(param: torch.Tensor, + shard_rank: int, + num_shards: int, + head_size: int, + n_heads_q: Optional[int] = None, + n_heads_kv: Optional[int] = None) -> Optional[torch.Tensor]: + """ + Utility method for sharding an attention output parameter. + """ + if len(param.shape) == 1: + # We will do the bias addition on the 0th rank only rather than scale the parameter and + # implicitly reconstruct this in the distributed reduce. + return param if shard_rank == 0 else None + + assert n_heads_kv is None or (n_heads_q is not None + and n_heads_kv is not None), "n_heads_kv should not be passed without n_heads_q" + + mha_sharding = n_heads_kv is None or n_heads_q == n_heads_kv + + if mha_sharding: + return shard_param(param, ShardingType.INNER_DIMENSION, shard_rank, num_shards, granularity=head_size) + else: + assert param.shape[0] == head_size * n_heads_q, "GQA param shape is not correct" + + # 32 KV heads, 16 shards for example + even_kv_sharding = n_heads_kv % num_shards == 0 + + # 8 KV heads, 16 shards for example + even_kv_distribution = num_shards % n_heads_kv == 0 + + assert even_kv_sharding or even_kv_distribution, "No partitioning algorithm for this yet." + + if even_kv_sharding: + # Same as original sharding scenario + return shard_param(param, ShardingType.INNER_DIMENSION, shard_rank, num_shards, granularity=head_size) + else: + # We will first do a sharding on the KV and Q to map to the one KV shard per group of Q. + q_sharding_degree = num_shards // n_heads_kv + + kv_head = shard_rank // q_sharding_degree + + q_sharding_rank = shard_rank % q_sharding_degree + q_factor = n_heads_q // n_heads_kv + + q_chunk = param[..., q_factor * kv_head * head_size:q_factor * (kv_head + 1) * head_size] + + return shard_param(q_chunk, + ShardingType.INNER_DIMENSION, + q_sharding_rank, + q_sharding_degree, + granularity=head_size) + + +def attn_out_in_features(out_features: int, + shard_rank: int, + num_shards: int, + head_size: int, + n_heads_q: Optional[int] = None, + n_heads_kv: Optional[int] = None) -> int: + """ + Helper to calculate the expected output projection dimension of a QKV projection matrix. + + Args: + in_features (int): The model dimension. + shard_rank (int): Which rank to return the corresponding size for. + num_shards (int): The total number of shards the parameter is distributed across. + head_size (int): The size of each attention head. + n_heads_q (int): The number of query heads on the model. This only needs to be passed if the number + of query and key/value heads are different. If passed without n_heads_kv, default + MHA partitioning will be used. + n_heads_kv (int): The number of key and value heads on the model. This only needs to be passed + if the number of query and key/value heads are different. This argument cannot be passed without + also passing n_heads_q (we want to explicitly opt into GQA sharding). + """ + assert n_heads_kv is None or (n_heads_q is not None + and n_heads_kv is not None), "n_heads_kv should not be passed without n_heads_q" + + mha_sharding = n_heads_kv is None or n_heads_q == n_heads_kv + + if mha_sharding: + endpoints = get_shard_endpoints(out_features, shard_rank, num_shards, granularity=head_size) + return endpoints[1] - endpoints[0] + else: + if n_heads_kv >= num_shards: + assert n_heads_kv % num_shards == 0, "No partitioning algorithm for this yet." + n_local_groups = n_heads_kv // num_shards + group_size = n_heads_q // n_heads_kv + + return n_local_groups * head_size * group_size + else: + assert num_shards % n_heads_kv == 0, "No partitioning algorithm for this yet." + q_split_degree = num_shards // n_heads_kv + q_split_rank = shard_rank % q_split_degree + split_granularity = (n_heads_q // n_heads_kv) * head_size + + q_endpoints = get_shard_endpoints(split_granularity, q_split_rank, q_split_degree, granularity=head_size) + + return q_endpoints[1] - q_endpoints[0] diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/mlp.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/mlp.py new file mode 100644 index 0000000000000000000000000000000000000000..8abd0ff8622df3bd302f13292b48e9be9cae9782 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/mlp.py @@ -0,0 +1,75 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Optional + +import torch + +from .types import ShardingType, DEFAULT_SHARD_GRANULARITY +from .utils import shard_param, get_shard_endpoints + + +def shard_mlp_1_param(param: torch.Tensor, + shard_rank: int, + num_shards: int, + gated: bool = False, + is_moe: bool = False) -> torch.Tensor: + """ + Utility method for sharding an MLP 1 parameter. Both biases and weights are supported, as well + as for fused weights for MoE. + + Args: + param (torch.Tensor): The parameter to shard. + shard_rank (int): Which shard of the partitioned tensor to return. + num_shards (int): The total number of shards the parameter is distributed across. + gated (bool): Whether or not the parameter is from a gated MLP. + """ + bias_dims = 2 if is_moe else 1 + + if gated: + return shard_param(param, + ShardingType.OUTER_DIMENSION, + shard_rank, + num_shards, + granularity=DEFAULT_SHARD_GRANULARITY * 2, + bias_dims=bias_dims) + else: + return shard_param(param, ShardingType.OUTER_DIMENSION, shard_rank, num_shards, bias_dims=bias_dims) + + +def shard_mlp_2_param(param: torch.Tensor, + shard_rank: int, + num_shards: int, + is_moe: bool = False) -> Optional[torch.Tensor]: + """ + Utility method for sharding an MLP 2 parameter. + + Args: + param (torch.Tensor): The parameter to shard. + shard_rank (int): Which shard of the partitioned tensor to return. + num_shards (int): The total number of shards the parameter is distributed across. + is_moe (bool): Whether or not the parameter is from a MoE model. + """ + bias_dim_size = 2 if is_moe else 1 + + if len(param.shape) == bias_dim_size: + # We will do the bias addition on the 0th rank only rather than scale the parameter and + # implicitly reconstruct this in the distributed reduce. + return param if shard_rank == 0 else None + + return shard_param(param, ShardingType.INNER_DIMENSION, shard_rank, num_shards) + + +def sharded_intermediate_dim(intermediate_size: int, num_shards: int, shard_rank: int) -> int: + """ + Utility method for getting the size of the intermediate dimension of a sharded MLP. + + Args: + intermediate_size (int): The size of the intermediate dimension. + num_shards (int): The total number of shards the parameter is distributed across. + shard_rank (int): Which shard of the partitioned tensor to return. + """ + endpoints = get_shard_endpoints(intermediate_size, shard_rank, num_shards) + return endpoints[1] - endpoints[0] diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/qkv.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/qkv.py new file mode 100644 index 0000000000000000000000000000000000000000..2b6d7f40836e8ac72d479b2c2792551e38e3d379 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/qkv.py @@ -0,0 +1,166 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Optional + +import torch + +from .types import ShardingType +from .utils import shard_param, get_shard_endpoints + + +def shard_qkv_param(param: torch.Tensor, + shard_rank: int, + num_shards: int, + head_size: int, + n_heads_q: Optional[int] = None, + n_heads_kv: Optional[int] = None) -> Optional[torch.Tensor]: + """ + Utility method for sharding a QKV parameter. Both biases and weights are supported. It is assumed + that the layout of the parameter is such that all Q heads, all K heads, and all V heads + are contiguous with respect to each other. + + Args: + param (torch.Tensor): The parameter to shard. + shard_rank (int): Which shard of the partitioned tensor to return. + num_shards (int): The total number of shards the parameter is distributed across. + head_size (int): The size of each head. + n_heads_q (int): The number of query heads. This only needs to be passed if the number + of query and key/value heads are different. If passed without n_heads_kv, default + MHA partitioning will be used. + n_heads_kv (int): The number of key/value heads. This only needs to be passed if the number + of query and key/value heads are different. This argument should not be passed without + n_heads_q (we want to explicitly opt into GQA sharding). + """ + if n_heads_kv is not None and n_heads_q is None: + raise ValueError("n_heads_kv should not be passed without n_heads_q") + + if n_heads_q is None: + # Guaranteed to be in MHA + if param.shape[0] // 3 % head_size != 0: + raise ValueError("MHA param shape is not correct") + n_heads_q = param.shape[0] // head_size // 3 + mha_sharding = True + else: + mha_sharding = n_heads_q == n_heads_kv + + if n_heads_q < num_shards: + raise ValueError("There must be at least as many query heads as there are shards.") + + if mha_sharding: + return shard_param(param, + ShardingType.OUTER_DIMENSION, + shard_rank, + num_shards, + num_concatenated_matrices=3, + granularity=head_size) + else: + if n_heads_q % n_heads_kv != 0: + raise ValueError("Must be an even ratio between query and key/value heads.") + + if param.shape[0] != head_size * (n_heads_q + 2 * n_heads_kv): + raise ValueError("GQA param shape is not correct") + + # 32 KV heads, 16 shards for example + if n_heads_kv >= num_shards and n_heads_kv % num_shards != 0: + raise ValueError("Currently do not support uneven partitioning of KV heads for GQA.") + + # 8 KV heads, 16 shards for example + if n_heads_kv < num_shards and num_shards % n_heads_kv != 0: + raise ValueError("Currently do not support distributing KV heads across different numbers of shards.") + else: + even_kv_sharding = n_heads_kv >= num_shards + + if param is None: + return None + + q_param = param[:head_size * n_heads_q] + kv_param = param[head_size * n_heads_q:] + + if even_kv_sharding: + # This is equivalent to the original sharding algorithm since n_heads_q = C * n_heads_kv. + # If n_heads_kv % num_shards == 0, then n_heads_q % num_shards == 0. + q_param = shard_param(q_param, ShardingType.OUTER_DIMENSION, shard_rank, num_shards, granularity=head_size) + kv_param = shard_param(kv_param, + ShardingType.OUTER_DIMENSION, + shard_rank, + num_shards, + num_concatenated_matrices=2, + granularity=head_size) + return torch.cat([q_param, kv_param], dim=0) + else: + # We will first do a sharding on the KV and Q to map to the one KV shard per group of Q. + q_sharding_degree = num_shards // n_heads_kv + + kv_head = shard_rank // q_sharding_degree + k_param = kv_param[kv_head * head_size:(kv_head + 1) * head_size] + v_param = kv_param[(n_heads_kv + kv_head) * head_size:(n_heads_kv + kv_head + 1) * head_size] + + q_sharding_rank = shard_rank % q_sharding_degree + q_factor = n_heads_q // n_heads_kv + + q_chunk = q_param[q_factor * kv_head * head_size:q_factor * (kv_head + 1) * head_size] + + q_param = shard_param(q_chunk, + ShardingType.OUTER_DIMENSION, + q_sharding_rank, + q_sharding_degree, + granularity=head_size) + + return torch.cat([q_param, k_param, v_param], dim=0) + + +def qkv_out_features(in_features: int, + shard_rank: int, + num_shards: int, + head_size: int, + n_heads_q: Optional[int] = None, + n_heads_kv: Optional[int] = None) -> int: + """ + Helper to calculate the expected output projection dimension of a QKV projection matrix. + + Args: + in_features (int): The model dimension. + shard_rank (int): Which rank to return the corresponding size for. + num_shards (int): The total number of shards the parameter is distributed across. + head_size (int): The size of each head. + n_heads_q (int): The number of query heads. This only needs to be passed if the number + of query and key/value heads are different. If passed without n_heads_kv, default + MHA partitioning will be used. + n_heads_kv (int): The number of key/value heads. This only needs to be passed if the number + of query and key/value heads are different. This argument cannot be passed without also + passing n_heads_q (we want to explicitly opt into GQA sharding). + """ + if n_heads_kv is not None and n_heads_q is None: + raise ValueError("n_heads_kv should not be passed without n_heads_q") + + mha_sharding = n_heads_kv is None or n_heads_q == n_heads_kv + + if n_heads_q is not None and in_features != head_size * n_heads_q: + raise ValueError("in_features is not consistent with n_heads_q and head_size") + + if mha_sharding: + endpoints = get_shard_endpoints(in_features, shard_rank, num_shards, granularity=head_size) + return (endpoints[1] - endpoints[0]) * 3 + else: + if n_heads_kv >= num_shards: + if n_heads_kv % num_shards != 0: + raise ValueError("The KV heads must be evenly distributed across the shards.") + + n_local_groups = n_heads_kv // num_shards + group_size = n_heads_q // n_heads_kv + + return n_local_groups * head_size * (2 + group_size) + else: + if num_shards % n_heads_kv != 0: + raise ValueError("A shared KV head must always partition across the same number of shards.") + + q_split_degree = num_shards // n_heads_kv + q_split_rank = shard_rank % q_split_degree + split_granularity = (n_heads_q // n_heads_kv) * head_size + + q_endpoints = get_shard_endpoints(split_granularity, q_split_rank, q_split_degree, granularity=head_size) + + return (q_endpoints[1] - q_endpoints[0]) + 2 * head_size diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/types.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/types.py new file mode 100644 index 0000000000000000000000000000000000000000..01dce0db523a9e904052c8336fd14278c5162b2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/types.py @@ -0,0 +1,18 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from enum import Enum + +DEFAULT_SHARD_GRANULARITY = 32 + + +class ShardingType(Enum): + # Inner dimension sharding corresponds to splitting the Tensor along the K-dimension + # of a matrix multiplication. This would be used for attention_output or MLP2. + INNER_DIMENSION = 1 + + # Outer dimension sharding corresponds to splitting the Tensor along the N-dimension + # of a matrix multiplication. This would be used for the QKV and MLP1 projections. + OUTER_DIMENSION = 0 diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/unembed.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/unembed.py new file mode 100644 index 0000000000000000000000000000000000000000..6cc771969ad9e195f04216e1374ea0ced6fb7065 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/unembed.py @@ -0,0 +1,41 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from .types import ShardingType +from .utils import shard_param, get_shard_endpoints + + +def shard_unembed_param(param: torch.Tensor, shard_rank: int, num_shards: int) -> torch.Tensor: + """ + Utility method for sharding an unembed parameter. We shard unembeddings on the vocab dimension + with the expectation of an all-gather to produce the full results. + + TODO(cmikeh2): Really ideal would be if MII could have access to the comm and we would do + an A2A and sharded sampling. + + Args: + param (torch.Tensor): The parameter to shard. Should be of shape [vocab_size, model_dim] + shard_rank (int): Which shard of the partitioned tensor to return. + num_shards (int): The total number of shards the parameter is distributed across. + + Returns: + torch.Tensor: The sharded parameter of shape [sharded_vocab_size, model_dim] + """ + return shard_param(param, ShardingType.OUTER_DIMENSION, shard_rank, num_shards, granularity=1) + + +def sharded_unembed_dim(vocab_size: int, shard_rank: int, num_shards: int) -> int: + """ + Utility method for determining the sharded vocab size of a sharded unembed parameter. + + Args: + vocab_size (int): The size of the vocabulary. + shard_rank (int): Which shard of the partitioned tensor to return. + num_shards (int): The total number of shards the parameter is distributed across. + """ + start_idx, end_idx = get_shard_endpoints(vocab_size, shard_rank, num_shards, granularity=1) + return end_idx - start_idx diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/utils.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fd0eb51873f83834f99bf14b084cf62efbde05f3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/utils.py @@ -0,0 +1,104 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Optional, Tuple + +import torch + +from .types import ShardingType, DEFAULT_SHARD_GRANULARITY + + +def get_shard_endpoints(dim_size: int, + shard_rank: int, + num_shards: int, + granularity: int = DEFAULT_SHARD_GRANULARITY) -> Tuple[int, int]: + """ + Given a dimension to shard with size dim_size, return the start and end indices of the slice + that belong to the given rank. + + The typical use of this is as an internal helper function, so see if there is a higher level + API that better suits the application. + + Args: + dim_size (int): The size of the dimension to shard. + shard_rank (int): The rank of the shard to return. + num_shards (int): Total number of shards the dimension will be distributed across. + granularity (int): The minimum alignment of the shard endpoints. This is used to support + non-even head counts as well as align dimensions to cleaner GEMM boundaries. + """ + assert dim_size % granularity == 0, "Dimension size must be divisible by granularity" + + total_chunks = dim_size // granularity + base_chunks_per_rank = total_chunks // num_shards + remainder_chunks = total_chunks % num_shards + + start_chunk_id = shard_rank * base_chunks_per_rank + min(shard_rank, remainder_chunks) + end_chunk_id = start_chunk_id + base_chunks_per_rank + (1 if shard_rank < remainder_chunks else 0) + + return start_chunk_id * granularity, end_chunk_id * granularity + + +def shard_param(param: Optional[torch.Tensor], + shard_mode: ShardingType, + shard_rank: int, + num_shards: int, + num_concatenated_matrices: int = 1, + granularity: int = 32, + bias_dims: int = 1) -> torch.Tensor: + """ + Utility for sharding a parameter. This will return the slice of the parameter that should + exist on the given shard_rank given the sharding configuration. The workflow here is + to find the minimum bounded Tensor to shard, get the slicing endpoints, and then concatenate + as needed. + + The typical use of this is as an internal helper function, so see if there is a higher level + API that better suits the application. + + Args: + param (torch.Tensor): The parameter to shard. + shard_mode (ShardingType): The type of sharding to apply. See ShardingType for more context. + shard_rank (int): The rank of the shard to return. + num_shards (int): Total number of shards the parameter will be distrbuted across. + num_concatenated_matrices (int): The number of matrices that have been concatenated together in the original + parameter. An example of this is a fused QKV projection matrix, where the `num_concatenated_matrices` + argument would be 3. + granularity (int): The minimum alignment of the shard endpoints. For attention projection matrices, this + should be set to the head size to support non-even sharding. + bias_dims (int): The number of dimensions that are considered bias dimensions. This is used to support + sharding of MoE and non-MoE biases on the same codepath. + """ + assert shard_rank < num_shards, "Shard rank must be less than num_shards" + + # Easier to hide this inside of the sharding logic than to add checks in every model + # implementation. + if param is None: + return None + + if num_shards == 1: + # Trivial case of no sharding. + return param + + if shard_mode == ShardingType.OUTER_DIMENSION: + + def get_matrices(dim_idx: int) -> torch.Tensor: + dim_size = param.size(dim_idx) // num_concatenated_matrices + start_channel_id, end_channel_id = get_shard_endpoints(dim_size, shard_rank, num_shards, granularity) + return torch.chunk(param, num_concatenated_matrices, dim=dim_idx), start_channel_id, end_channel_id + + if param.ndim == bias_dims: + # Special case for bias parameters. + matrices, start_channel_id, end_channel_id = get_matrices(dim_idx=-1) + return torch.cat([mat[..., start_channel_id:end_channel_id] for mat in matrices], dim=-1) + else: + # General case for weight parameters. This assumes MoE parameters are stored in the format of + # [num_experts, out_features, in_features] + matrices, start_channel_id, end_channel_id = get_matrices(dim_idx=-2) + return torch.cat([mat[..., start_channel_id:end_channel_id, :] for mat in matrices], dim=-2) + + elif shard_mode == ShardingType.INNER_DIMENSION: + dim_size = param.size(-1) // num_concatenated_matrices + start_channel_id, end_channel_id = get_shard_endpoints(dim_size, shard_rank, num_shards, granularity) + matrices = torch.chunk(param, num_concatenated_matrices, dim=-1) + return torch.cat([mat[..., start_channel_id:end_channel_id] for mat in matrices], dim=-1) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/linear/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/linear/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0501af54c4e6dfb89ee0db49d56e46809ae7945c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/linear/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .blas_fp_linear import BlasFPLinear +from .quantized_linear import QuantizedWf6Af16Linear, fp_quantize diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/linear/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/linear/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..892abcaec7075f7013c8e154446253a936708d74 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/linear/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/linear/__pycache__/blas_fp_linear.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/linear/__pycache__/blas_fp_linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..782ebf560942428611d97e573ba880c47889ca3b Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/linear/__pycache__/blas_fp_linear.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/linear/__pycache__/quantized_linear.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/linear/__pycache__/quantized_linear.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdee28b66d91a7988c7e3bd8e9a3a56f8d8b5de3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/linear/__pycache__/quantized_linear.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/linear/blas_fp_linear.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/linear/blas_fp_linear.py new file mode 100644 index 0000000000000000000000000000000000000000..c58dab0b826b48ed5ac4b58217936a2f9de4ae29 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/linear/blas_fp_linear.py @@ -0,0 +1,103 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Any, Dict, Optional + +import torch + +from deepspeed.accelerator import get_accelerator +from ....allocator import empty_from +from ....inference_utils import is_gated +from ....kernels.core_ops import ( + BlasLibLinear, + CUDABiasActivation, + CUDAGatedActivation, +) + +from ...interfaces import DSLinearBase, DSLinearRegistry +from ...configs import DSLinearConfig +from ....inference_parameter import InferenceParameter + + +@DSLinearRegistry.register_module +class BlasFPLinear(DSLinearBase): + """ + Linear DSModule based on BLAS library and standalone bias + activation kernel implementation. + """ + + @staticmethod + def name(): + return 'blas_fp_linear' + + @staticmethod + def supports_config(config: DSLinearConfig) -> bool: + if config.input_dtype != config.output_dtype: + return False + + if config.input_dtype != torch.float16 and config.input_dtype != torch.bfloat16: + return False + + if is_gated(config.activation): + try: + _ = CUDAGatedActivation(config.out_channels, config.output_dtype, config.activation) + except ValueError: + return False + else: + try: + _ = CUDABiasActivation(config.out_channels, config.output_dtype, config.activation) + except ValueError: + return False + + return True + + def __init__(self, config: DSLinearConfig, implementation_config: Dict[str, Any]) -> None: + super().__init__(config, implementation_config) + + self._linear_impl = BlasLibLinear(self._config.input_dtype) + + if is_gated(config.activation): + self._is_gated = True + self._act_fn = CUDAGatedActivation(config.out_channels, config.output_dtype, config.activation) + self._double_buffer = torch.empty((config.max_tokens, config.out_channels * 2), + dtype=config.output_dtype, + device=get_accelerator().current_device()) + else: + self._is_gated = False + self._act_fn = CUDABiasActivation(config.out_channels, config.output_dtype, config.activation) + + self._output = torch.empty((config.max_tokens, config.out_channels), + dtype=config.output_dtype, + device=get_accelerator().current_device()) + + def transform_param(self, param: torch.Tensor) -> InferenceParameter: + """ + Converts param to same data type as input and output. + + Parameters: + param (torch.Tensor): Weight or bias tensor. + """ + param = param.to(self._config.output_dtype) + return InferenceParameter.initialize(param) + + def forward(self, hidden_states: torch.Tensor, w: torch.Tensor, b: Optional[torch.Tensor] = None) -> torch.Tensor: + + output = empty_from(self._output, (hidden_states.shape[0], self._config.out_channels)) + + if self._is_gated: + staging_output = empty_from(self._double_buffer, (hidden_states.shape[0], self._config.out_channels * 2)) + self._linear_impl(staging_output, hidden_states, w) + self._act_fn(output, staging_output, b) + else: + self._linear_impl(output, hidden_states, w) + self._act_fn(output, b) + + return output + + @property + def output(self) -> torch.Tensor: + """ + Return the padded, pre-allocated output Tensor. + """ + return self._output diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/linear/quantized_linear.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/linear/quantized_linear.py new file mode 100644 index 0000000000000000000000000000000000000000..933cf55b2391b0132f392e770d0b747f357f8ca0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/linear/quantized_linear.py @@ -0,0 +1,205 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Any, Dict, Optional + +import torch + +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import InferenceCoreBuilder +from ....allocator import empty_from +from ....inference_utils import is_gated +from ....kernels.core_ops import ( + CUDAWf6Af16Linear, + CUDABiasActivation, + CUDAGatedActivation, +) + +from ...interfaces import DSLinearBase, DSLinearRegistry +from ...configs import DSLinearConfig +from ....inference_parameter import InferenceParameter + + +def fp_quantize(input: torch.FloatTensor, + num_bits: int = 6, + exp_bits: int = 3, + min_value: torch.FloatTensor = None, + max_value: torch.FloatTensor = None, + group_size: int = -1): + """ + Args: + inputs (`torch.FloatTensor`) + The input which needs to be quantized + num_bits (int, >=4) + Number of bits to use for quantization + exp_bits: + fp exp_bits + min_value/max_vlue (torch.FloatTensor) + Used for static activation quantization + group_size (int) N + The quantization block size, each N numbers has its own scaling + factor and off-site. -1 means use the last dim as the group_size + Returns: + quantized_fake_fp6 + The quantized weights, in fp16 format and contains fp6 value. + scales + Quantization scales + """ + + try: + from qtorch.quant import float_quantize + except ImportError: + raise ImportError("Please install qtorch to use this function") + + assert (min_value is None and max_value is None) or (min_value is not None and max_value is not None) + + assert input.dtype == torch.float16 + + orig_device = input.device + input = input.to(torch.float32).to(get_accelerator().current_device()) + if num_bits == 6 and exp_bits == 3: # this is default + q_range = 28 + else: + raise NotImplementedError + + man_bits = num_bits - exp_bits - 1 + input_shape = input.shape + + if group_size == -1: + group_size = input_shape[-1] + else: + # Only support per-channel quantization + raise NotImplementedError + num_groups = input.numel() // group_size + input = input.reshape(num_groups, -1) + + if min_value is None: + max_input = torch.amax(torch.abs(input), dim=-1).view(num_groups, -1) + else: + max_input = torch.max(min_value.abs(), max_value) # .view(-1) + scales = max_input / q_range # q_range + 1 + scales[scales == 0] = 1 # avoid zero scales + scaled_input = input / scales + + quantized_fake_fp6 = float_quantize(scaled_input, exp_bits, man_bits, rounding="nearest") + + quantized_fake_fp6 = quantized_fake_fp6.reshape(input_shape).contiguous().to(torch.float16).to(orig_device) + scales = scales.to(torch.float16).to(orig_device) + # Now the dequantized value is quantized_fake_fp6 * scales + + return quantized_fake_fp6, scales + + +@DSLinearRegistry.register_module +class QuantizedWf6Af16Linear(DSLinearBase): + """ + Linear DSModule for FP6 weight-only quantization kernel, where weight is FP6 + and activation is FP16. + """ + + @staticmethod + def name(): + return 'quantized_wf6af16_linear' + + @staticmethod + def supports_config(config: DSLinearConfig) -> bool: + if config.input_dtype != config.output_dtype: + return False + + # As for fp6 data items, they are packed and stored in a set of fp16 + # tensors. E.g., 8 fp6 data items are stored in 3 fp16 tensor. + if config.input_dtype != torch.float16: + return False + + if is_gated(config.activation): + try: + _ = CUDAGatedActivation(config.out_channels, config.output_dtype, config.activation) + except ValueError: + return False + else: + try: + _ = CUDABiasActivation(config.out_channels, config.output_dtype, config.activation) + except ValueError: + return False + + return True + + def __init__(self, config: DSLinearConfig, implementation_config: Dict[str, Any]) -> None: + super().__init__(config, implementation_config) + + self._linear_impl = CUDAWf6Af16Linear() + + if is_gated(config.activation): + # In the FP6 kernel implementation, the MatMul is W * A, where W is + # the weight and A is activation. M is the output channel size. + self.out_channels = self._config.out_channels * 2 + self.in_channels = self._config.in_channels + self._is_gated = True + self._act_fn = CUDAGatedActivation(config.out_channels, config.output_dtype, config.activation) + self._double_buffer = torch.empty((config.max_tokens, config.out_channels * 2), + dtype=config.output_dtype, + device=get_accelerator().current_device()) + else: + self.out_channels = self._config.out_channels + self.in_channels = self._config.in_channels + self._is_gated = False + self._act_fn = CUDABiasActivation(config.out_channels, config.output_dtype, config.activation) + + self._output = torch.empty((config.max_tokens, config.out_channels), + dtype=config.output_dtype, + device=get_accelerator().current_device()) + + self.inf_module = InferenceCoreBuilder().load() + self.inf_module.create_handle() + self.preprocess_weight = self.inf_module.preprocess_weight + + self.quantizer = fp_quantize + + def transform_param(self, param: torch.Tensor) -> InferenceParameter: + """ + Converts param to same data type as input and output. + + Parameters: + param (torch.Tensor): Weight or bias tensor. + """ + # It expects that the quantization scales are store in the attribute `scales`. + + if param.ndim == 1: # bias, do nothing + return InferenceParameter.initialize(param) + + quantized_fake_fp6, scales = self.quantizer(param, num_bits=6, exp_bits=3) + + # This is for debugging, will delete before release. + assert (quantized_fake_fp6.dtype == torch.float16) + assert quantized_fake_fp6.shape[0] == self.out_channels + assert scales.numel() == self.out_channels + + weights_2bit, weights_4bit = self.preprocess_weight(quantized_fake_fp6) + + return InferenceParameter.initialize(weights_2bit, weights_4bit=weights_4bit, scales=scales) + + def forward(self, hidden_states: torch.Tensor, w: torch.Tensor, b: Optional[torch.Tensor] = None) -> torch.Tensor: + weights_2bit = w + weights_4bit = w.weights_4bit + scales = w.scales + output = empty_from(self._output, (hidden_states.shape[0], self._config.out_channels)) + if self._is_gated: + staging_output = empty_from(self._double_buffer, (hidden_states.shape[0], self.out_channels)) + self._linear_impl(staging_output, hidden_states, weights_2bit, weights_4bit, scales, self.out_channels, + hidden_states.shape[0], self.in_channels) + self._act_fn(output, staging_output, b) + else: + self._linear_impl(output, hidden_states, weights_2bit, weights_4bit, scales, self.out_channels, + hidden_states.shape[0], self.in_channels) + self._act_fn(output, b) + + return output + + @property + def output(self) -> torch.Tensor: + """ + Return the padded, pre-allocated output Tensor. + """ + return self._output diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/unembed/__pycache__/ragged_unembed.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/unembed/__pycache__/ragged_unembed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c3b2a316abd1f99edc07aef2efa2caa64ef7f82 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/unembed/__pycache__/ragged_unembed.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/moe_base.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/moe_base.py new file mode 100644 index 0000000000000000000000000000000000000000..78bdc0700f63b9aa532fc62b66b3e9c19261061c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/moe_base.py @@ -0,0 +1,91 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from abc import abstractmethod +from typing import Any, Dict, Optional, Type + +import torch + +from deepspeed.runtime.config_utils import DeepSpeedConfigModel +from ..ds_module import DSModuleBase +from ..module_registry import DSModuleRegistryBase +from ..configs import DSMoEConfig +from ...inference_parameter import InferenceParameter + + +class DSMoEBase(DSModuleBase): + """ + Base mixing for MoE modules. The interface represented by this module is: + + expert_assignments = gate(hidden_states) + intermediate = ragged_linear(hidden_states, expert_assignments) + output = ragged_linear(intermediate, expert_assignments) + """ + + @staticmethod + def config_class() -> Type[DeepSpeedConfigModel]: + return DSMoEConfig + + def __init__(self, config: DSMoEConfig, implementation_config: Dict[str, Any]) -> None: + super().__init__(config, implementation_config) + + @abstractmethod + def transform_gate_param(self, param: torch.Tensor) -> InferenceParameter: + """ + Perform any necessary transformations of the gate parameter. + + Args: + param (torch.Tensor): gate_w (shape: [num_experts, model_dim]) + """ + ... + + @abstractmethod + def transform_moe_mlp_1_param(self, param: torch.Tensor) -> InferenceParameter: + """ + Perform any necessary transformations of the parameter. The specific component + being transformed should be inferred from the shape of the parameter. + + Args: + param (torch.Tensor): One of either mlp_1_w, mlp_1_b + """ + ... + + @abstractmethod + def transform_moe_mlp_2_param(self, param: torch.Tensor) -> InferenceParameter: + """ + Perform any necessary transformations of the parameter. The specified component being + transformed should be inferred from the shape of the parameter. This interface is + separate from transform_moe_1_param because the two components may have identical + shapes. + + Args: + param (torch.Tensor): One of either mlp_2_w or mlp_2_b + """ + ... + + def forward(self, + hidden_states: torch.Tensor, + gate_w: torch.Tensor, + mlp_1_w: torch.Tensor, + mlp_2_w: torch.Tensor, + mlp_1_b: Optional[torch.Tensor] = None, + mlp_2_b: Optional[torch.Tensor] = None) -> torch.Tensor: + raise NotImplementedError() + + @property + @abstractmethod + def output(self) -> torch.Tensor: + """ + Returns the pre-allocated, padded output Tensor. + """ + ... + + +class DSMoERegistry(DSModuleRegistryBase): + registry: Dict = {} + + @staticmethod + def associated_class() -> Type[DSModuleBase]: + return DSMoEBase diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/post_norm_base.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/post_norm_base.py new file mode 100644 index 0000000000000000000000000000000000000000..cc80e5c94bf73e7e2173906bd220fc39ff7df610 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/post_norm_base.py @@ -0,0 +1,69 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from abc import abstractmethod +from typing import Any, Dict, Optional, Tuple, Type + +import torch + +from deepspeed.runtime.config_utils import DeepSpeedConfigModel +from ..ds_module import DSModuleBase +from ..configs.norm_config import DSNormConfig +from ..module_registry import DSModuleRegistryBase +from ...inference_parameter import InferenceParameter + + +class DSPostNormBase(DSModuleBase): + """ + Base MixIn for all Post-Normalization modules. The interface represented by this + module is: + + residual, hidden_out = norm(residual + hidden_in) + + If residual and hidden_out are the same data type, then they may alias each other. + Furthermore, residual should be updated in-place. + """ + + @staticmethod + def config_class() -> Type[DeepSpeedConfigModel]: + return DSNormConfig + + def __init__(self, config: DSNormConfig, implementation_config: Dict[str, Any]) -> None: + super().__init__(config, implementation_config) + + @abstractmethod + def transform_param(self, param: torch.Tensor) -> InferenceParameter: + """ + Transform a gamma/beta parameter. It is assumed that both transformations are + the same. + + Parameters: + param (torch.Tensor): Gamma or beta parameter. + """ + ... + + def forward(self, + residual: torch.Tensor, + hidden_states: torch.Tensor, + gamma: torch.Tensor, + beta: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Parameters: + residual (torch.Tensor): Residual tensor. + hidden_states (torch.Tensor): Hidden states tensor. + + Returns: + (torch.Tensor, torch.Tensor): Tuple of residual and hidden states. + Hidden states may alias with residual. + """ + raise NotImplementedError() + + +class DSPostNormRegistry(DSModuleRegistryBase): + registry: Dict = {} + + @staticmethod + def associated_class() -> Type[DSModuleBase]: + return DSPostNormBase diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/pre_norm_base.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/pre_norm_base.py new file mode 100644 index 0000000000000000000000000000000000000000..84f51cff6947ecf8ee8563c289aec39324f37e7b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/pre_norm_base.py @@ -0,0 +1,73 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from abc import abstractmethod +from typing import Any, Dict, Optional, Tuple, Type + +import torch + +from deepspeed.runtime.config_utils import DeepSpeedConfigModel +from ..ds_module import DSModuleBase +from ..configs.norm_config import DSNormConfig +from ..module_registry import DSModuleRegistryBase +from ...inference_parameter import InferenceParameter + + +class DSPreNormBase(DSModuleBase): + """ + Base mixin for all Pre-Normalization modules. The interface represented by this module + is: + + if hidden_in is not None: + residual_out = residual + hidden_in + else: + residual_out = residual + + hidden_out = normalize(residual_out) + return residual_out, hidden_out + + Residual should be updated in-place. + """ + + @staticmethod + def config_class() -> Type[DeepSpeedConfigModel]: + return DSNormConfig + + def __init__(self, config: DSNormConfig, implementation_config: Dict[str, Any]): + super().__init__(config, implementation_config) + + @abstractmethod + def transform_param(self, param: torch.Tensor) -> InferenceParameter: + """ + Transform a gamma/beta parameter. It is assumed that both transformations are + the same. + + Parameters: + param (torch.Tensor): Gamma or beta parameter. + """ + ... + + def forward(self, + residual: torch.Tensor, + hidden_states: Optional[torch.Tensor], + gamma: torch.Tensor, + beta: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Parameters: + residual (torch.Tensor): Residual tensor. + hidden_states (torch.Tensor): Hidden states tensor. + + Returns: + (torch.Tensor, torch.Tensor): Tuple of residual and hidden states. + """ + raise NotImplementedError() + + +class DSPreNormRegistry(DSModuleRegistryBase): + registry: Dict = {} + + @staticmethod + def associated_class() -> Type[DSModuleBase]: + return DSPreNormBase diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jdq-1590.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jdq-1590.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..d738f893891ff3d747ee71e709301a481d09430e --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jdq-1590.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44b9b0d290a1e339695a431438f84080071c5635161c3977dd17f4c27b00a34a +size 1507