diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..14b0654a8c36c63e8666e9392e83c9a82478cce8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__init__.py @@ -0,0 +1,19 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .inference_model_base import DSInferenceModelBase +from .inference_transformer_base import DSTransformerModelBase, DSMoETransformerModelBase +from .inference_policy_base import InferenceV2Policy, ContainerMap +from .sharding import * + +# Model Implementations +from .llama_v2 import * +from .opt import * +from .mistral import * +from .mixtral import * +from .falcon import * +from .phi import * +from .qwen import * +from .qwen_v2 import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c02d15d11f7ed33fb6a048667511dd5196396e7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/attn_output_parameters.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/attn_output_parameters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2cd4325fb5fb30c119398f3906cbc73db3633b1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/attn_output_parameters.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/embedding_parameters.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/embedding_parameters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cde93ef8d3ef1026e359504db340b941249731a9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/embedding_parameters.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/invfreq_parameters.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/invfreq_parameters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c841794a2278fcbfeda4b63ddd44b5cde5553c2 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/invfreq_parameters.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/mlp_parameters.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/mlp_parameters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b154050dae213aca6224437e000f759152f64baf Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/mlp_parameters.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/moe_parameters.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/moe_parameters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..167ff304dac269d9a79389bc4322dd88fe56df56 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/moe_parameters.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/norm_parameters.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/norm_parameters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3671f7ed6b6a2f8caf35a0f9cad08f608ba7201 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/norm_parameters.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/qkv_parameters.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/qkv_parameters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd0fa9a3e3a0e1780f1e94f9f1e74b1d57622c23 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/qkv_parameters.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/unembed_parameters.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/unembed_parameters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e83aee37e18d995601a75d68e03d7c1b5c609359 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/unembed_parameters.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/attn_output_parameters.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/attn_output_parameters.py new file mode 100644 index 0000000000000000000000000000000000000000..f220cf7a7125d030b05829d0615210c79e9d562d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/attn_output_parameters.py @@ -0,0 +1,29 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ...model_implementations.parameter_base import ParameterBase +""" +Common Attention Output Parameter Patterns +""" + + +class AttentionOutputParameter(ParameterBase): + """ + Attention output parameter container. + + Note: The differentiation for something like GQA for this matrix is primarily + encompassed in the sharding logic, which is currently expected to be performed by + the model implementation. + """ + + params: torch.Tensor + """ + Unsharded attention output parameter of shape [model_dim, model_dim] + """ + + def finalize(self) -> torch.Tensor: + return self.inference_model.transform_attn_out_param(self.params) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/embedding_parameters.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/embedding_parameters.py new file mode 100644 index 0000000000000000000000000000000000000000..2ed34b5fd259a77e858073a32deef1f805dd1325 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/embedding_parameters.py @@ -0,0 +1,26 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ...model_implementations.parameter_base import ParameterBase +""" +Embedding containers. +""" + + +class EmbeddingParameter(ParameterBase): + """ + Embedding container. This should be safe to use for all types of embeddings (i.e. word, position, + and token type). + """ + + params: torch.Tensor + """ + Vocabulary parameter of shape [vocab_size, model_dim]. + """ + + def finalize(self) -> torch.Tensor: + return self.inference_model.transform_embedding_param(self.params) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/invfreq_parameters.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/invfreq_parameters.py new file mode 100644 index 0000000000000000000000000000000000000000..163f9de81d98bc30e8fb7bbf3b0b724dd74cd0c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/invfreq_parameters.py @@ -0,0 +1,19 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ...model_implementations.parameter_base import ParameterBase +""" +Common InvFreq Parameter Patterns +""" + + +class InvFreqParameter(ParameterBase): + + params: torch.Tensor + + def finalize(self) -> torch.Tensor: + return self.params.to(self.inference_model.activation_dtype.value) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/moe_parameters.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/moe_parameters.py new file mode 100644 index 0000000000000000000000000000000000000000..8ababf567ba9a499624c5924dd32564ad82922fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/moe_parameters.py @@ -0,0 +1,78 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ...model_implementations.parameter_base import ParameterBase, ParamList +""" +Moe Parameters + +These parameters are compatible with any model inheriting from ``DSMoETransformerModelBase``. +""" + + +class MoEGatingWeightParameter(ParameterBase): + """ + Gating weight matrix. + """ + + params: torch.Tensor + """ + Projection matrix from the input activations to the gate logits. + """ + + def finalize(self) -> torch.Tensor: + return self.inference_model.transform_moe_gate_param(self.params) + + +class UnfusedMoEMLP1Parameter(ParameterBase): + """ + This container should be used when the experts are held in separate parameters + and need to be joined into a single group. + """ + + experts: ParamList("n_experts") # noqa: F821 + + def finalize(self) -> torch.Tensor: + stacked_experts = torch.stack([p for p in self.experts], dim=0) + return self.inference_model.transform_moe_mlp_1_param(stacked_experts) + + +class UnfusedMoEMLP2Parameter(ParameterBase): + """ + This container should be used when the experts are held in separate parameters + and need to be joined into a single group. + """ + + experts: ParamList("n_experts") # noqa: F821 + + def finalize(self) -> torch.Tensor: + stacked_experts = torch.stack([p for p in self.experts], dim=0) + return self.inference_model.transform_moe_mlp_2_param(stacked_experts) + + +class UnfusedMoEGatedMLPParameter(ParameterBase): + """ + MoE Parameter for a gated activation function in which the gating matrix is not + fused in the same parameter as the non-gating matrix. + + This is a stacked version of the ``GatedMLPParameter``. Please see that class for more + documentation on the layout of the parameters. + """ + + gating_experts: ParamList("n_experts") # noqa: F821 + + up_experts: ParamList("n_experts") # noqa: F821 + + def finalize(self) -> torch.Tensor: + transposed_experts = [] + for gate, up in zip(self.gating_experts, self.up_experts): + assert gate.shape[0] == up.shape[0], "Gated MLP parameters must have the same number of neurons." + total_neurons = gate.shape[0] + up.shape[0] + fused_expert = torch.cat([gate, up], dim=-1).reshape(total_neurons, -1) + transposed_experts.append(fused_expert) + + stacked_experts = torch.stack(transposed_experts, dim=0) + return self.inference_model.transform_moe_mlp_1_param(stacked_experts) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/unembed_parameters.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/unembed_parameters.py new file mode 100644 index 0000000000000000000000000000000000000000..9f67c0ce3c27d2fc07fc735acba8a64bda8bcbf1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/unembed_parameters.py @@ -0,0 +1,26 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ...model_implementations.parameter_base import ParameterBase +""" +Unembedding containers. +""" + + +class UnembedParameter(ParameterBase): + """ + Unembedding parameter. This will likely be mapped to the same original weight in the model as the + embedding, but we have a different preferred sharding approach. + """ + + params: torch.Tensor + """ + Unembedding parameter of shape [vocab_size, model_dim]. + """ + + def finalize(self) -> torch.Tensor: + return self.inference_model.transform_unembed_param(self.params) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/policy.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/policy.py new file mode 100644 index 0000000000000000000000000000000000000000..c6612090a0df41228244c193b492493824a86394 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/policy.py @@ -0,0 +1,33 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Any + +from ...config_v2 import RaggedInferenceEngineConfig +from ..inference_policy_base import ContainerMap, InferenceV2Policy +from .container import FalconNonTransformerContainer, FalconTransformerContainer +from .container import FalconNewArchTransformerContainer +from .model import FalconInferenceModel + + +class FalconPolicy(InferenceV2Policy): + + def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> FalconInferenceModel: + return FalconInferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group) + + def build_container_map(self) -> ContainerMap: + map = ContainerMap() + + trans_container_cls = FalconNewArchTransformerContainer if self._model_config.new_decoder_architecture else FalconTransformerContainer + transformer_containers = [trans_container_cls(self.model) for _ in range(self.model.num_layers)] + + map.set_transformer_params(['transformer.h'], transformer_containers) + + map.set_non_transformer_params(FalconNonTransformerContainer(self.model)) + + map.set_unmapped_params( + [f'model.layers.{i}.self_attn.rotary_emb.inv_freq' for i in range(self.model.num_layers)]) + + return map diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/flat_model_helpers.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/flat_model_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..ebdb59bca9204d8895d7a6536ccb46842aed8cc0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/flat_model_helpers.py @@ -0,0 +1,282 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Dict, Iterable, Tuple, Optional +from os import path + +import torch + +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import RaggedUtilsBuilder +from deepspeed.runtime.config_utils import DeepSpeedConfigModel +from .layer_container_base import LayerContainer +from ..inference_parameter import InferenceParameter, STR_TO_DTYPE +from ..inference_utils import elem_size + + +def pad_to_aligned_offset(offset: int, alignment: int = 256) -> int: + """ + Pad the provided offset to a well-aligned value. + """ + return ((offset + alignment - 1) // alignment) * alignment + + +class TensorMetadata(DeepSpeedConfigModel): + """ + A class to represent a tensor specification. + """ + dtype: Optional[str] + shape: Optional[Tuple[int, ...]] + strides: Optional[Tuple[int, ...]] + offset: int + + +class ParameterMetadata(DeepSpeedConfigModel): + """ + A class to represent a parameter specification. + """ + core_param: TensorMetadata = None + aux_params: Dict[str, TensorMetadata] = {} + + +class LayerMetadata(DeepSpeedConfigModel): + """ + A class to represent a layer specification. + """ + params: Dict[str, ParameterMetadata] = {} + + +class ModelMetadata(DeepSpeedConfigModel): + """ + A class to represent a model specification. + """ + policy: str = "" + layers: Dict[str, LayerMetadata] = {} + + +def make_param_filename(base: str, rank: int, n_ranks: int) -> str: + """ + Make a filename for a parameter file. + + Arguments: + rank: Rank of the file. + n_ranks: Total number of ranks. + + Returns: + str: Filename. + """ + return path.join(base, f"params_rank_{rank}_of_{n_ranks}.pt") + + +def make_metadata_filename(base: str, rank: int, n_ranks: int) -> str: + """ + Make a filename for a metadata file. + + Arguments: + rank: Rank of the file. + n_ranks: Total number of ranks. + + Returns: + str: Filename. + """ + return path.join(base, f"metadata_rank_{rank}_of_{n_ranks}.json") + + +def make_model_config_filename(base: str) -> str: + """ + Make a filename for a model config file. + + Arguments: + base: Base directory. + + Returns: + str: Filename. + """ + return path.join(base, "ds_model_config.json") + + +def flatten_inference_model( + transformer_containers: Iterable[LayerContainer], + non_transformer_container: LayerContainer, + policy_name: str, +) -> Tuple[torch.Tensor, ModelMetadata]: + """ + Flatten the underlying parameters into + + Arguments: + transformer_containers: Iterable of layer containers corresponding to the transformer + parameters. + non_transformer_container: Layer container corresponding to the non-transformer parameters. + policy_name: The name of the policy class (typically accessed with `type(policy).__name__`). + + Returns: + Iterable[Any]: Flattened list of parameters. + """ + alloc_fn = RaggedUtilsBuilder().load().allocate_view_on + + total_size = 0 + metadata = ModelMetadata(policy=policy_name) + + def process_layer(layer_container: LayerContainer, l_name: str, cur_offset: int) -> int: + """ + Iterate over the parameters of a single container and collect metadata for the final + flattened buffer. + + Arguments: + layer_container: The layer container to process. + l_name: The name of the layer container to key the metadata. + cur_offset: The current offset into the flattened buffer. + + Captured Variables: + metadata: The metadata object to populate. + + Returns: + int: The updated offset into the flattened buffer. + """ + try: + _ = layer_container.is_populated + except ValueError as e: + raise ValueError(f"Layer container {l_name} is not populated.") from e + + layer_metadata = LayerMetadata() + + for p_name in layer_container.annotation_attrs: + param = getattr(layer_container, p_name) + param_metadata = ParameterMetadata() + + if param is None: + param_metadata.core_param = TensorMetadata(offset=-1) + layer_metadata.params[p_name] = param_metadata + continue + + param_metadata.core_param = TensorMetadata(dtype=str(param.dtype), + shape=param.shape, + strides=param.stride(), + offset=cur_offset) + + cur_offset += pad_to_aligned_offset(elem_size(param.dtype) * param.numel()) + + for t_name, tensor in param.aux_attrs.items(): + param_metadata.aux_params[t_name] = TensorMetadata(dtype=str(tensor.dtype), + shape=tensor.shape, + strides=tensor.stride(), + offset=cur_offset) + + cur_offset += pad_to_aligned_offset(elem_size(tensor.dtype) * tensor.numel()) + + layer_metadata.params[p_name] = param_metadata + + metadata.layers[l_name] = layer_metadata + return cur_offset + + for i, layer in enumerate(transformer_containers): + l_name = f"transformer_layer_{i}" + total_size = process_layer(layer, l_name, total_size) + + l_name = "non_transformer" + total_size = process_layer(non_transformer_container, l_name, total_size) + + buffer = torch.empty(total_size, dtype=torch.uint8, device=get_accelerator().current_device()) + + def copy_layer(layer_container: LayerContainer, l_name: str) -> None: + """ + Local method for copying from the layer container to the flattened buffer. + + Arguments: + layer_container: The layer container to copy from. + l_name: The name of the layer container to key the metadata. + + Captured Variables: + buffer: The flattened buffer to copy into. + metadata: The metadata object to populate. + """ + l_metadata = metadata.layers[l_name] + for p_name in layer_container.annotation_attrs: + p_metadata = l_metadata.params[p_name] + param = getattr(layer_container, p_name) + + if param is None: + continue + + core_param = alloc_fn(param, buffer, p_metadata.core_param.offset) + core_param.copy_(param) + + aux_params = {} + + for t_name, tensor in param.aux_attrs.items(): + t_view = alloc_fn(tensor, buffer, p_metadata.aux_params[t_name].offset) + aux_params[t_name] = t_view + t_view.copy_(tensor) + + setattr(layer_container, p_name, InferenceParameter.initialize(core_param, **aux_params)) + + for i, layer in enumerate(transformer_containers): + l_name = f"transformer_layer_{i}" + copy_layer(layer, l_name) + + l_name = "non_transformer" + copy_layer(non_transformer_container, l_name) + + return buffer, metadata + + +def restore_inference_model(buffer: torch.Tensor, metadata: ModelMetadata, + transformer_containers: Iterable[LayerContainer], + non_transformer_container: LayerContainer) -> None: + """ + Restore the model from the buffer and metadata. + + Arguments: + buffer: Buffer containing the model parameters. + metadata: Metadata for the model. + transformer_containers: Iterable of transformer layer containers. + non_transformer_container: Non-transformer layer container. + """ + alloc_fn = RaggedUtilsBuilder().load().allocate_view_like + + def restore_layer(layer_container: LayerContainer, l_name: str) -> None: + """ + Local method for restoring a layer container from a flattened buffer. This + only constructs views for the parameters onto the buffer. No data movement + is performed. + + Arguments: + layer_container: The layer container to restore. + l_name: The name of the layer container to key the metadata. + + Captured Variables: + buffer: The flattened buffer to reconstruct views on top of. + metadata: The metadata object describing the each parameter in the model. + """ + l_metadata = metadata.layers[l_name] + + for p_name in layer_container.annotation_attrs: + p_metadata = l_metadata.params[p_name] + + if p_metadata.core_param.offset == -1: + layer_container.direct_injection(p_name, None) + continue + + dummy_tensor = torch.empty([], dtype=STR_TO_DTYPE[p_metadata.core_param.dtype]) + core_param = alloc_fn(p_metadata.core_param.shape, p_metadata.core_param.strides, dummy_tensor, buffer, + p_metadata.core_param.offset) + + aux_params = {} + + for t_name, t_metadata in p_metadata.aux_params.items(): + dummy_tensor = torch.empty([], dtype=STR_TO_DTYPE[t_metadata.dtype]) + t_view = alloc_fn(t_metadata.shape, t_metadata.strides, dummy_tensor, buffer, t_metadata.offset) + + aux_params[t_name] = t_view + + restored_param = InferenceParameter.initialize(core_param, **aux_params) + layer_container.direct_injection(p_name, restored_param) + + for i, layer in enumerate(transformer_containers): + l_name = f"transformer_layer_{i}" + restore_layer(layer, l_name) + + l_name = "non_transformer" + restore_layer(non_transformer_container, l_name) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_model_base.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_model_base.py new file mode 100644 index 0000000000000000000000000000000000000000..894a4137407e9b0c7c38f18d504cebf2977649bd --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_model_base.py @@ -0,0 +1,272 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from abc import ABC, abstractmethod +from typing import Iterable, Optional, Tuple, Type + +import torch + +import deepspeed.comm as dist +from ..ragged import DSStateManager, RaggedBatchWrapper +from ..ragged.manager_configs import KVCacheConfig +from ..ragged import DSSequenceDescriptor +from ..model_implementations.layer_container_base import LayerContainer +from ..config_v2 import RaggedInferenceEngineConfig +from .flat_model_helpers import ModelMetadata + +try: + from functools import cached_property +except ImportError: + + def cached_property(func): + return property(func) + + +""" +This abstract class defines the interfaces that a model implementation should implement +in order to include anything that may be called by the engine. Most models should be able +to inherit from `DSInferenceTransformerModelBase` to reduce implementation work so it is recommended +to begin there. +""" +""" +Placeholder for typing the model config, which can vary based on model implementation/ +""" +DSModelImplementationConfig = Type['DSModelImplementationConfig'] +""" +Placeholder for typing the distributed comm object. + +TODO(cmikeh2): Replace when we have a more defined API for the inference communication system. +""" +MPType = Type["MPType"] + + +class DSInferenceModelBase(torch.nn.Module, ABC): + """ + Implementation of a model for inference composable with ragged batching. + """ + + _config: DSModelImplementationConfig + """ + Model-specific configuration. No abstraction surrounds this yet. + """ + + _engine_config: RaggedInferenceEngineConfig + """ + Engine configuration. + """ + + _base_mp_group: MPType + """ + Base communication group for Tensor-parallel inference. + """ + + _non_transformer: Optional[LayerContainer] + """ + Abstract container for storing both embedding (pre-transformer) and unembedding (post-transformer) + parameters. This attribute should be None at model instantiation until the Policy sets + the model parameters. These parameters are grouped together since many model implementations + will tie the embedding and unembedding parameters together. + """ + + _transformer: Optional[Iterable[LayerContainer]] + """ + List of abstract containers (1 per layer) for storing transformer (transformer) + parameters. This attribute should be None at model instantiation until the Policy + sets the model parameters. + """ + + state_manager: Optional[DSStateManager] + """ + Since the state manager is lazy initialized, by the engine, it is not guaranteed to be present + until full initialization. + """ + + def __init__(self, config: DSModelImplementationConfig, engine_config: RaggedInferenceEngineConfig, + base_mp_group: MPType) -> None: + """ + Minimal initialization of the model. + + Arguments: + config (DSModelImplementationConfig): Model-specific configuration. No assumptions + should be made about this config that are not closely tied to the specific + model implementation. + engine_config (RaggedInferenceEngineConfig): Engine configuration. + base_mp_group (MPType): Base communication group for Tensor-parallel inference. + """ + super().__init__() + self._config = config + self._engine_config = engine_config + self._base_mp_group = base_mp_group + + # Set to None until the Policy sets the model parameters + self._non_transformer = None + self._transformer = None + self._flattened_param_buffer = None + self._flattened_param_metadata = None + + @property + def config(self) -> DSModelImplementationConfig: + """ + The model config. + """ + return self._config + + def set_parameters(self, transformer: Iterable[LayerContainer], non_transformer: LayerContainer, + flattened_param_buffer: torch.Tensor, flattened_param_metadata: ModelMetadata): + """ + Set the model parameters for the embedding, transformer, and unembedding containers. + """ + self._transformer = transformer + self._non_transformer = non_transformer + self._flattened_param_buffer = flattened_param_buffer + self._flattened_param_metadata = flattened_param_metadata + + def set_state_manager(self, state_manager: DSStateManager): + """ + Sets the state manager attribute. This is called by the inference engine after + the model is fully initialized. + """ + self.state_manager = state_manager + + @cached_property + def tp_rank(self) -> int: + """ + The rank of the current process. + + # TODO(cmikeh2): Kind of a hack right now, but this is too verbose to use at + the frequency we need. + """ + return dist.get_rank(group=self._base_mp_group) + + @cached_property + def tp_size(self) -> int: + """ + The total number of processes. + + # TODO(cmikeh2): Kind of a hack right now, but this is too verbose to use at + the frequency we need. + """ + return dist.get_world_size(group=self._base_mp_group) + + @property + def model_config(self): + """ + The model config. + """ + return self._config + + @property + def engine_config(self): + """ + The engine config. + """ + return self._engine_config + + @property + def flattened_params(self) -> Optional[torch.Tensor]: + """ + The flattened parameter buffer. + """ + return self._flattened_param_buffer + + @property + def flattened_param_metadata(self) -> Optional[ModelMetadata]: + """ + The flattened parameter metadata. + """ + return self._flattened_param_metadata + + @abstractmethod + def get_kv_requirements(self, sequence: DSSequenceDescriptor, max_new_tokens: int, + max_new_blocks: Tuple[int, ...]) -> Tuple[int, torch.Tensor]: + """ + Given a sequence and the number of new tokens in the sequence, determine the + number of new KV blocks needed to support the sequence. This method is + used to help the engine provide schedulability APIs and can be used as a helper + for ``maybe_allocate_kv``. + + Args: + sequence (DSSequenceDescriptor): The sequence for which to allocate KV-storage. + max_new_tokens (int): Maximum number of tokens to hypothetically schedule. + max_new_blocks (int): Maximum number of blocks to hypothetically allocate. + + Returns: + Tuple[int, torch.Tensor]: The tuple of number of tokens scheduled and number + of blocks allocated (per KV cache). In general, only one of these numbers will + match the corresponding input argument, but this is not guaranteed. + """ + raise NotImplementedError() + + @abstractmethod + def get_remaining_block_capacity(self, sequence: DSSequenceDescriptor) -> int: + raise NotImplementedError() + + @abstractmethod + def maybe_allocate_kv(self, sequence: DSSequenceDescriptor, n_new_tokens: int) -> None: + """ + Given a sequence and the number of new tokens in the sequence, determine + whether or not additional KV-storage is needed and allocate it if so. + + Args: + sequence (DSSequenceDescriptor): The sequence for which to allocate KV-storage. + n_new_tokens (int): The number of new tokens in the sequence. + """ + raise NotImplementedError() + + @abstractmethod + def kv_cache_config(self) -> Tuple[KVCacheConfig, ...]: + """ + Return the KV-cache configuration for this model. This should be a tuple of one or more + KVCacheConfig objects (one for each distinct cache group). + """ + raise NotImplementedError() + + @property + @abstractmethod + def max_sequence_length(self) -> int: + """ + The maximum sequence length supported by the model. + """ + ... + + def maybe_free_kv(self, sequence: DSSequenceDescriptor) -> None: + """ + After completing a forward pass, determine whether or not the there are any KV blocks + that maybe freed since they are no longer in use. + + Consider the following example: + + We have a block size of 4 and a local window size of 8. At the beginning of the forward + pass there 10 tokens had been seen and the new forward has a size of 4. This would lend + itself to the following cache structure prior to the forward: + [[0, 1, 2*, 3*] [4*, 5*, 6*, 7*] [8*, 9*, x, x] [x x x x]] + Where x's denote empty cache locations and * denote values that are needed for attention + of the next open slot. After the forward, the cache would look like the following: + [[0, 1, 2, 3] [4, 5, 6*, 7*] [8*, 9*, 10*, 11*] [12* 13* x x]] + In this case, the first block is no longer needed since it is not needed for any future + local attention windows. This function would be responsible for freeing that block. + + Default behavior assumes no local patterns that require freeing and in general should + be sufficient. + """ + pass + + @abstractmethod + def prepare_batch(self, wrapped_batch: RaggedBatchWrapper) -> None: + """ + This will be called before each forward with the intent of building forward-specific metadata + about a batch. The intent here is to build data structures like attention atoms without necessarily + needing to implement graphable kernels to do so. + + Abstract so as to force model implementations to opt out of doing anything here explicitly. + """ + raise NotImplementedError() + + def forward(wrapped_batch: RaggedBatchWrapper) -> torch.Tensor: + """ + Complete a forward pass of the model. This interface should be graphable, so it + should not rely on the ability to use python control flow. + """ + raise NotImplementedError() diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_policy_base.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_policy_base.py new file mode 100644 index 0000000000000000000000000000000000000000..d5a326c03599ec3495bc7d2a17268a83a0f53132 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_policy_base.py @@ -0,0 +1,220 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import json +from abc import ABC, ABCMeta, abstractmethod +from typing import Any, Iterable, List, Optional, Union + +import torch + +from ..config_v2 import RaggedInferenceEngineConfig +from ..checkpoint import CheckpointEngineBase +from ..logging import inference_logger +from .layer_container_base import LayerContainer +from .inference_model_base import DSInferenceModelBase +from .flat_model_helpers import ( + flatten_inference_model, + make_param_filename, + make_metadata_filename, + ModelMetadata, + restore_inference_model, +) + +POLICIES = {} + + +class ContainerMap: + + def __init__(self) -> None: + self._prefix_map = {} + self._transformer_params = None + self._non_transformer_params = None + + @property + def transformer_params(self) -> Iterable[LayerContainer]: + return self._transformer_params + + @property + def non_transformer_params(self) -> LayerContainer: + return self._non_transformer_params + + def set_transformer_params(self, prefixes: Union[str, Iterable[str]], containers: List[LayerContainer]) -> None: + if not isinstance(containers, list): + raise ValueError( + f"The transformer containers should be a list, of one container per layer, but got {type(containers)} instead." + ) + + self._transformer_prefixes = prefixes if isinstance(prefixes, list) else [prefixes] + self._transformer_params = containers + + def set_non_transformer_params(self, container: LayerContainer) -> None: + self._non_transformer_params = container + + def set_unmapped_params(self, prefixes: Union[str, Iterable[str]]) -> None: + self._unmapped_prefixes = prefixes + + def map_param(self, name, parameter) -> None: + for unmapped_prefix in self._unmapped_prefixes: + if name.startswith(unmapped_prefix): + inference_logger().debug(f"Ignoring: {name} for {unmapped_prefix}") + return + + for transformer_prefix in self._transformer_prefixes: + if name.startswith(transformer_prefix): + popped_name = name[len(transformer_prefix) + 1:] + layer_idx = popped_name.split(".")[0] + assert layer_idx.isdigit( + ), f"expected name to start w. list index but got {layer_idx} instead, name={name}" + layer_idx = int(layer_idx) + inference_logger().debug( + f"Setting: {'.'.join(popped_name.split('.')[1:])} for layer-idx={layer_idx} to {parameter.shape}") + self._transformer_params[layer_idx].set_dependency(".".join(popped_name.split(".")[1:]), parameter) + return + + try: + inference_logger().debug(f"Setting: {name} to {parameter.shape}") + self._non_transformer_params.set_dependency(name, parameter) + except ValueError: + # Catch the ValueError here from the non_transformer_params because we are knowingly + # calling it with something that may not match. This should allow us to raise a slightly more + # informative error message. + raise ValueError(f"Cannot find container for {name}, please double check the Containers/ContainerMap") + + def validate(self) -> None: + if not self._non_transformer_params.is_initialized: + raise RuntimeError("Non-transformer parameters not fully initialized after checkpoint load.") + + for layer_idx, container in enumerate(self._transformer_params): + if not container.is_initialized: + raise RuntimeError( + f"Transformer container at index {layer_idx} not fully initialized after checkpoint load.") + + +class PolicyMeta(ABCMeta): + + def __new__(cls, name, bases, dct): + new_obj = super().__new__(cls, name, bases, dct) + if name != "InferenceV2Policy": + POLICIES[name] = new_obj + return new_obj + + +class InferenceV2Policy(ABC, metaclass=PolicyMeta): + """ + The InferenceV2Policy is the base class for all inference policies. An inference policy + is responsible for instantiating the inference model and mapping the parameters from the + checkpoint engine to the model itself. + """ + + def __init__( + self, + model_config: Any, + checkpoint_engine: Optional[CheckpointEngineBase] = None, + inf_checkpoint_path: Optional[str] = None, + ) -> None: + """ + Create the Policy with sufficient context to build the model. There are two supported + model creation mechanisms. + + The first is the generalized ``checkpoint_engine`` which + will iterate over the parameters of the model and provide them to the policy. These in + turn will be sharded/transformed by the model implementation. + + The second is used to re-create a previously serialized DeepSpeed inference model. These + checkpoints should not be used across different model backend configurations. + + TODO(cmikeh2): Enforce this in code + """ + if checkpoint_engine is None and inf_checkpoint_path is None: + raise ValueError("Either checkpoint_engine or ds_checkpoint_path must be provided.") + + if checkpoint_engine is not None and inf_checkpoint_path is not None: + raise ValueError("Only one of checkpoint_engine or ds_checkpoint_path can be provided.") + + self._checkpoint_engine = checkpoint_engine + self._inf_checkpoint_path = inf_checkpoint_path + self._model_config = model_config + + def build_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> DSInferenceModelBase: + """ + Completely instantiate the inference model. This will both create the ops needed to run the + model, as well as load the model parameters via the checkpoint engine. For more context + on each of these components please see ``instantiate_model`` and ``populate_model_parameters``. + + Arguments: + engine_config: The config that has been used to instantiate the engine. This is used + to communicate to the model implementation the limits on batches (sequences/tokens) + and bound the size of intermediate buffers. + mp_group: Object to enable communication between tensor parallel ranks. + + Returns: + DSInferenceModelBase: An implementation of the inference model abstraction that will be + run by the engine. + """ + self.model = self.instantiate_model(engine_config, mp_group) + self.populate_model_parameters() + return self.model + + @abstractmethod + def instantiate_model(self, engine_config: RaggedInferenceEngineConfig) -> DSInferenceModelBase: + """ + Instantiate the inference model. Depending on the engine/model config, this could be where + different model implementations could be selected. + + Arguments: + engine_config: The config that has been used to instantiate the engine. This is used + to communicate to the model implementation the limits on batches (sequences/tokens) + and bound the size of intermediate buffers. + + Returns: + DSInferenceModelBase: An implementation of the inference model abstraction that will be + run by the engine. + """ + ... + + @abstractmethod + def build_container_map(self) -> ContainerMap: + """ + Build a dictionary representing the structure of the string prefixes leading + to the parameters to be mapped to the container. + + Returns: + ContainerMap: An instantiated mapping describing how checkpoint prefixes map + to ``LayerContainer`` instances. + """ + raise NotImplementedError() + + def populate_model_parameters(self) -> None: + """ + This model will iterate over the parameters (as provided by the checkpoint engine) and + use the container map built by ``build_container_map`` to populate the model + """ + + container_map = self.build_container_map() + + if self._checkpoint_engine is not None: + for name, parameter in self._checkpoint_engine.parameters(): + container_map.map_param(name, parameter) + + buffer, metadata = flatten_inference_model(container_map.transformer_params, + container_map.non_transformer_params, self.__class__.__name__) + else: + + buffer_path = make_param_filename(self._inf_checkpoint_path, self.model.tp_rank, self.model.tp_size) + metadata_path = make_metadata_filename(self._inf_checkpoint_path, self.model.tp_rank, self.model.tp_size) + + buffer = torch.load(buffer_path) + metadata = json.load(open(metadata_path, "r")) + metadata = ModelMetadata.parse_raw(metadata) + + restore_inference_model(buffer, metadata, container_map.transformer_params, + container_map.non_transformer_params) + + container_map.validate() + + self.model.set_parameters(transformer=container_map.transformer_params, + non_transformer=container_map.non_transformer_params, + flattened_param_buffer=buffer, + flattened_param_metadata=metadata) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_transformer_base.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_transformer_base.py new file mode 100644 index 0000000000000000000000000000000000000000..fae67dc8fc2ad807df5ed4b337bd709cab20c9b5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_transformer_base.py @@ -0,0 +1,617 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from abc import abstractmethod +from typing import Optional + +import torch + +from deepspeed.accelerator import get_accelerator +from ..config_v2 import RaggedInferenceEngineConfig +from ..inference_utils import ActivationType, ceil_div, is_gated +from ..model_implementations import * +from ..model_implementations.sharding import * +from ..modules.configs import ( + DSEmbeddingsConfig, + DSLinearConfig, + DSMoEConfig, + DSNormConfig, + DSSelfAttentionConfig, + DSUnembedConfig, + NormTypeEnum, + PositionalEmbeddingType, + RotateHalfConfig, +) +from ..modules import heuristics +from ..ragged import ( + DSSequenceDescriptor, + KVCacheConfig, + RaggedBatchWrapper, +) +from .inference_model_base import ( + DSInferenceModelBase, + DSModelImplementationConfig, + MPType, +) +from ..inference_parameter import InferenceParameter + +try: + from functools import cached_property +except ImportError: + + def cached_property(func): + return property(func) + + +class DSTransformerModelBase(DSInferenceModelBase): + """ + Dimensioning properties + """ + + @property + @abstractmethod + def num_layers(self) -> int: + """ + Number of the layers in the model + """ + ... + + @property + @abstractmethod + def model_dim(self) -> int: + """ + Size of embedding projection and residuals. + """ + ... + + @property + @abstractmethod + def vocab_size(self) -> int: + """ + Size of the vocabulary (including padding). + """ + ... + + @property + @abstractmethod + def head_size(self) -> int: + """ + Size of each attention head. + """ + ... + + @property + @abstractmethod + def n_heads(self) -> int: + """ + The number of query heads on the model. This should not take into account + any dimension reductions from model sharding. + """ + ... + + @property + def n_heads_q(self) -> int: + """ + Alias to n_heads. + """ + return self.n_heads + + @property + def n_heads_kv(self) -> int: + """ + The number of key and value heads on the model. For GQA or MQA, overload this attribute. + Otherwise it adopts MHA formulations and uses n_heads. This should not take into account + any dimension reductions from model sharding. + """ + return self.n_heads + + @property + @abstractmethod + def intermediate_dim(self) -> int: + """ + The size of the (unsharded) intermediate projection dim. For a gated activation function + this is the size of the input to the second MLP layer. This should not take into account + any dimension reductions from model sharding. + """ + ... + + @property + @abstractmethod + def positional_embedding_type(self) -> PositionalEmbeddingType: + """ + The type of positional embedding used by the model. + """ + ... + + """ + Architectural properties + """ + + @property + @abstractmethod + def activation_dtype(self) -> torch.dtype: + """ + The activation dtype of the model. + """ + ... + + @property + @abstractmethod + def mlp_activation_fn(self) -> ActivationType: + """ + The activation function used in the MLP. + """ + ... + + @property + @abstractmethod + def norm_type(self) -> NormTypeEnum: + """ + The type of normalization used in the model. + """ + ... + + @property + @abstractmethod + def positional_embedding_config(self) -> Optional[RotateHalfConfig]: + """ + The positional embedding configuration for the model. + """ + ... + + """ + Derived helpers + """ + + @cached_property + def n_heads_q_local(self) -> int: + """ + Number of local heads post sharding. + """ + return get_local_heads(self.tp_rank, self.tp_size, self.n_heads_q, self.n_heads_kv)[0] + + @cached_property + def n_heads_kv_local(self) -> int: + """ + Number of local heads post sharding. + """ + return get_local_heads(self.tp_rank, self.tp_size, self.n_heads_q, self.n_heads_kv)[1] + + @property + def gated_mlp(self) -> bool: + """ + Return a boolean to determine whether the model uses a gated activation function. + """ + return is_gated(self.mlp_activation_fn) + + """ + Method implementations + """ + + def __init__(self, config: DSModelImplementationConfig, engine_config: RaggedInferenceEngineConfig, + base_mp_group: MPType) -> None: + """ + Base implementation for initialization. By default, this will initialize + the traditional components of a transformer model: + - Embedding + - QKV projection + - Self attention + - Attention output projection + - Feed forward network + - Normalization + - Unembedding + + Arguments: + config (DSModelImplementationConfig): Model-specific configuration. No assumptions + should be made about this config that are not closely tied to the specific + model implementation. + engine_config (RaggedInferenceEngineConfig): Engine configuration. + base_mp_group (MPType): Base communication group for Tensor-parallel inference. + """ + super().__init__(config, engine_config, base_mp_group) + + self.make_norm_layer() + self.make_qkv_layer() + self.make_attn_layer() + self.make_attn_out_layer() + self.make_mlp_1_layer() + self.make_mlp_2_layer() + self.make_embedding_layer() + self.make_unembedding_layer() + self._kv_cache_config = None + + ######### Embedding ######### + def make_embedding_layer(self) -> None: + """ + Performs setup and creates embedding DSModule. This will set the `self.embed` attribute. + """ + + embed_config = DSEmbeddingsConfig( + max_tokens=self._engine_config.state_manager.max_ragged_batch_size, + residual_dtype=self.activation_dtype, + embedding_dim=self.model_dim, + ) + + self.embed = heuristics.instantiate_embed(embed_config, self._engine_config) + + def transform_embedding_param(self, param: torch.Tensor) -> InferenceParameter: + """ + Performs embedding sharding along the channels dimension. + """ + # Until we can do non-contiguous all-gather, we won't shard the embedding parameters. + param = param.to(self.activation_dtype.value) + return InferenceParameter.initialize(param) + + ######### Unembedding ######### + def make_unembedding_layer(self) -> None: + """ + Performs setup and creates an unembedding layer. This implementation assumes + normalization prior to the LM head projection. If this does not match the model's + implementation, override this method. This will set the ``self.unembed`` attribute. + """ + unembed_dim = sharded_unembed_dim(self.vocab_size, self.tp_rank, self.tp_size) + + unembed_config = DSUnembedConfig( + max_tokens=self._engine_config.state_manager.max_ragged_batch_size, + max_sequences=self._engine_config.state_manager.max_ragged_sequence_count, + dtype=self.activation_dtype, + model_dim=self.model_dim, + vocab_size=unembed_dim, + norm_type=self.norm_type, + ) + + self.unembed = heuristics.instantiate_unembed(unembed_config, self._engine_config) + + if self.tp_size > 1: + self._comm_logits = torch.empty(self.tp_size, + self._engine_config.state_manager.max_ragged_sequence_count, + unembed_dim, + device=get_accelerator().current_device(), + dtype=self.activation_dtype.value) + self._return_logits = torch.empty(self._engine_config.state_manager.max_ragged_sequence_count, + self.vocab_size, + device=get_accelerator().current_device(), + dtype=self.activation_dtype.value) + + def transform_unembed_param(self, param: torch.Tensor) -> InferenceParameter: + """ + Performs sharding along the vocab dimension. + """ + param = shard_unembed_param(param, self.tp_rank, self.tp_size).to(self.activation_dtype.value) + return InferenceParameter.initialize(param) + + ######### QKV ######### + def make_qkv_layer(self) -> None: + """ + Instantiates the linear projection layer for the QKV linear layer. This sets the + `self.qkv` attribute. + """ + out_features = qkv_out_features(self.model_dim, self.tp_rank, self.tp_size, self.head_size, self.n_heads_q, + self.n_heads_kv) + + linear_config = DSLinearConfig( + max_tokens=self._engine_config.state_manager.max_ragged_batch_size, + in_channels=self.model_dim, + out_channels=out_features, + input_dtype=self.activation_dtype, + output_dtype=self.activation_dtype, + ) + + self.qkv = heuristics.instantiate_linear(linear_config, self._engine_config) + + def transform_qkv_param(self, param: torch.Tensor) -> InferenceParameter: + """ + Passes a QKV parameter to the underlying implementation for any necessary + transformations. + + Args: + param (torch.Tensor): The parameter to transform. This may be either a bias or weight and should have + the shape (out_neurons, in_neurons) + """ + param = shard_qkv_param(param, self.tp_rank, self.tp_size, self.head_size, self.n_heads_q, self.n_heads_kv) + return self.qkv.transform_param(param) + + ######### Attention ######### + def make_attn_layer(self) -> None: + """ + Builds the attention layer for the model. This sets the `self.attn` attribute. + """ + softmax_scale = 1.0 / (self.head_size**0.5) + + attn_config = DSSelfAttentionConfig(max_tokens=self._engine_config.state_manager.max_ragged_batch_size, + n_heads_q=self.n_heads_q_local, + n_heads_kv=self.n_heads_kv_local, + head_size=self.head_size, + max_sequences=self._engine_config.state_manager.max_ragged_sequence_count, + scale_factor=softmax_scale, + input_dtype=self.activation_dtype, + output_dtype=self.activation_dtype, + positional_embedding_type=self.positional_embedding_type, + positional_embedding_config=self.positional_embedding_config) + + self.attn = heuristics.instantiate_attention(attn_config, self._engine_config) + + def get_kv_requirements(self, sequence: DSSequenceDescriptor, max_new_tokens: int, + max_new_blocks: int) -> Tuple[int, int]: + """ + See ``DSInferenceModelBase.get_kv_requirements`` for documentation. + + This method assumes an autoregressive dense attention pattern. Override this method + if this does not match the model's attention pattern. + """ + total_tokens = sequence.seen_tokens + max_new_tokens + req_blocks = ceil_div(total_tokens, self.attn.kv_block_size) + block_lim = req_blocks - sequence.cur_allocated_blocks + + if block_lim <= max_new_blocks: + return max_new_tokens, block_lim + + token_capacity = (max_new_blocks + + sequence.cur_allocated_blocks) * self.attn.kv_block_size - sequence.seen_tokens + + return token_capacity, max_new_blocks + + def get_remaining_block_capacity(self, sequence: DSSequenceDescriptor) -> int: + return sequence.seen_tokens % self.attn.kv_block_size + + def maybe_allocate_kv(self, sequence: DSSequenceDescriptor, n_new_tokens: int) -> None: + """ + See ``DSInferenceModelBase.maybe_allocate_kv`` for documentation. + + This method assumes an autoregressive dense attention pattern. Override this method + if this does not match the model's attention pattern. + """ + free_block = self.state_manager.free_blocks[0] + _, n_needed_blocks = self.get_kv_requirements(sequence, n_new_tokens, free_block) + + if n_needed_blocks > 0: + new_blocks = self.state_manager.allocate_blocks(n_needed_blocks) + sequence.extend_kv_cache(new_blocks) + + def kv_cache_config(self) -> Tuple[KVCacheConfig, ...]: + """ + See ``DSInferenceModelBase.kv_cache_config`` for documentation. + + This method assumes an autoregressive dense attention pattern. Override this method + if this does not match the model's attention pattern. + """ + if self._kv_cache_config is None: + cache_shape = (self.num_layers, self.n_heads_kv_local, self.head_size) + max_blocks = ceil_div(self.max_sequence_length, self.attn.kv_block_size) + self._kv_cache_config = KVCacheConfig(block_size=self.attn.kv_block_size, + cache_shape=cache_shape, + cache_dtype=self.activation_dtype, + max_blocks_per_allocation_group=max_blocks) + return (self._kv_cache_config, ) + + def prepare_batch(self, wrapped_batch: RaggedBatchWrapper) -> None: + """ + See ``DSInferenceModelBase.prepare_batch`` for documentation. + + This method assumes an autoregressive dense attention pattern. Override this method + if this does not match the model's attention pattern. + """ + self.attn.build_atoms(wrapped_batch) + + ######### Attention output ######### + def make_attn_out_layer(self) -> None: + """ + Instantiates the linear projection layer for the attention output linear layer. This sets the + `self.attn_out` attribute. + """ + in_features = attn_out_in_features(self.model_dim, self.tp_rank, self.tp_size, self.head_size, self.n_heads_q, + self.n_heads_kv) + + linear_config = DSLinearConfig( + max_tokens=self._engine_config.state_manager.max_ragged_batch_size, + in_channels=in_features, + out_channels=self.model_dim, + input_dtype=self.activation_dtype, + output_dtype=self.activation_dtype, + ) + + self.attn_out = heuristics.instantiate_linear(linear_config, self._engine_config) + + def transform_attn_out_param(self, param: torch.Tensor) -> Optional[InferenceParameter]: + """ + Shards an attention output projection parameter and passes it to the underlying + implementation for any necessary transformations. This will return `None` for bias parameters + if they are not on TP rank 0. + + Args: + param (torch.Tensor): The parameter to transform. This may be either a bias or weight and should have + the shape (out_neurons, in_neurons). + """ + param = shard_attn_out_param(param, self.tp_rank, self.tp_size, self.head_size, self.n_heads_q, + self.n_heads_kv) + + if param is not None: + param = self.attn_out.transform_param(param) + + return param + + ######### MLP ######### + def make_mlp_1_layer(self) -> None: + """ + Instantiates the linear projection layer for the first MLP in the feedforward network. + This sets the `self.mlp_1` attribute. + """ + shard_size = sharded_intermediate_dim(self.intermediate_dim, self.tp_size, self.tp_rank) + + linear_config = DSLinearConfig( + max_tokens=self._engine_config.state_manager.max_ragged_batch_size, + in_channels=self.model_dim, + out_channels=shard_size, + activation=self.mlp_activation_fn, + input_dtype=self.activation_dtype, + output_dtype=self.activation_dtype, + ) + + self.mlp_1 = heuristics.instantiate_linear(linear_config, self._engine_config) + + def transform_mlp_1_param(self, param: torch.Tensor) -> InferenceParameter: + """ + Shards the first MLP parameter and passes it to the underlying implementation + for any necessary transformations. + + Args: + param (torch.Tensor): The parameter to transform. This may be either a bias or weight and should have + the shape (out_neurons, in_neurons). + """ + param = shard_mlp_1_param(param, self.tp_rank, self.tp_size, gated=self.gated_mlp) + + return self.mlp_1.transform_param(param) + + def make_mlp_2_layer(self) -> None: + """ + Instantiates the linear projection layer for the second MLP in the feedforward network. + This sets the `self.mlp_2` attribute. + """ + shard_size = sharded_intermediate_dim(self.intermediate_dim, self.tp_size, self.tp_rank) + + linear_config = DSLinearConfig( + max_tokens=self._engine_config.state_manager.max_ragged_batch_size, + in_channels=shard_size, + out_channels=self.model_dim, + input_dtype=self.activation_dtype, + output_dtype=self.activation_dtype, + ) + + self.mlp_2 = heuristics.instantiate_linear(linear_config, self._engine_config) + + def transform_mlp_2_param(self, param: torch.Tensor) -> Optional[InferenceParameter]: + """ + Shards the second MLP parameter and passes it to the underlying implementation + for any necessary transformations. This will return `None` for bias parameters + if they are not on TP rank 0. + + Args: + param (torch.Tensor): The parameter to transform. This may be either a bias or weight and should have + the shape (out_neurons, in_neurons). + """ + param = shard_mlp_2_param(param, self.tp_rank, self.tp_size) + + if param is not None: + param = self.mlp_2.transform_param(param) + + return param + + ######### Norm ######### + def make_norm_layer(self) -> None: + """ + Instantiates the normalization layer for the model. This sets the `self.norm` attribute. + + TODO(cmikeh2): In the future we'll distinguish between the different norm objects, + but for now we'll just use the same one for all of them. + """ + norm_config = DSNormConfig( + max_tokens=self._engine_config.state_manager.max_ragged_batch_size, + type=self.norm_type, + channels=self.model_dim, + residual_dtype=self.activation_dtype, + input_dtype=self.activation_dtype, + output_dtype=self.activation_dtype, + ) + + self.norm = heuristics.instantiate_pre_norm(norm_config, self._engine_config) + + def transform_norm_param(self, param: torch.Tensor) -> InferenceParameter: + """ + Passes a normalization parameter to the underlying implementation for any + necessary transformations. + + TODO(cmikeh2): In the future we'll distinguish between the different norm objects, + but for now we'll just use the same one for all of them. + + Args: + param (torch.Tensor): The parameter to transform. This may be either a bias or weight and should have + shape (model_dim,) + """ + return self.norm.transform_param(param) + + +class DSMoETransformerModelBase(DSTransformerModelBase): + + @property + def n_experts(self) -> int: + """ + Return the number of experts in the model. + """ + raise NotImplementedError("Attempted to access an unimplemented number of experts") + + @property + def n_top_k(self) -> int: + """ + Number of experts per token. + """ + raise NotImplementedError("Attempted to access an unimplemented number of experts per token") + + @property + def normalize_expert_scores(self) -> bool: + """ + Whether to normalize expert scores. If true, sum(expert_scores) = 1. + """ + raise NotImplementedError("Attempted to access an unimplemented normalization flag") + + def make_moe_layer(self) -> None: + """ + Instantiates the MoE layer for the model. This sets the `self.moe` attribute. + """ + sharded_dim = sharded_intermediate_dim(self.intermediate_dim, self.tp_size, self.tp_rank) + + moe_config = DSMoEConfig( + max_tokens=self._engine_config.state_manager.max_ragged_batch_size, + model_dim=self.model_dim, + intermediate_features=sharded_dim, + activation=self.mlp_activation_fn, + n_experts=self.n_experts, + top_k=self.n_top_k, + input_dtype=self.activation_dtype, + output_dtype=self.activation_dtype, + normalize_scores=self.normalize_expert_scores, + ) + + self.moe = heuristics.instantiate_moe(moe_config, self._engine_config) + + def transform_moe_gate_param(self, param: torch.Tensor) -> InferenceParameter: + """ + Passes a MoE gate parameter to the underlying implementation for any necessary transformations. + + TODO(cmikeh2): This will need to be updated/overridden for expert parallelism. + """ + return self.moe.transform_gate_param(param) + + def transform_moe_mlp_1_param(self, param: torch.Tensor) -> InferenceParameter: + """ + Shards the first MoE param and passes it to the underlying implementation. Since it's possible for an architecture + to have both MoE and non-MoE layers, this can't be overloaded on the MLP1 transform. Furthermore, since both + the MoE DSModule owns both MLP1 and MLP2, under certain sharding conditions it's not possible for the model implementation + to infer from the shape whether to perform a different transformation based on MLP1 or MLP2. This (and the below) + separations are intended to solve both these issues. + + Args: + param (torch.Tensor): The parameter to transform. This should have shape (n_experts, out_neurons, in_neurons). + """ + param = shard_mlp_1_param(param, self.tp_rank, self.tp_size, gated=self.gated_mlp, is_moe=True) + + return self.moe.transform_moe_mlp_1_param(param) + + def transform_moe_mlp_2_param(self, param: torch.Tensor) -> Optional[torch.Tensor]: + """ + Shards the second MoE param and passes it to the underlying implementation. See the above for context on why this API + exists. + + This will return `None` for expert bias params not on TP rank 0. NOTE(cmikeh2): Does it make sense to round-robin assign? + My intuition is that this will make debugging much more difficult for minimal memory reduction. + + Args: + param (torch.Tensor): The parameter to transform. This should have shape (n_experts, out_neurons, in_neurons). + """ + param = shard_mlp_2_param(param, self.tp_rank, self.tp_size, is_moe=True) + + if param is not None: + param = self.moe.transform_moe_mlp_2_param(param) + + return param diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/layer_container_base.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/layer_container_base.py new file mode 100644 index 0000000000000000000000000000000000000000..f26c8755666501c494878474dd73d3f6af23e66b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/layer_container_base.py @@ -0,0 +1,355 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import re +from typing import Type + +import torch + +from deepspeed.accelerator import get_accelerator +from .parameter_base import ParameterBase, ParametrizedList +from ..inference_parameter import InferenceParameter + +# Currently have dependency loops for the type hints. +InferenceModel = Type["InferenceModel"] +LayerContainer = Type["LayerContainer"] + +MAPPING_KEY = "PARAM_MAPPING" +PLIST_HELPERS = "_ds_plist_strip_vals" + + +def make_finalization_callback(all_names: str): + """ + Helper method for building the finalization callback for a LayerContainer. This + is not client code and should not be used or called directly. + """ + + def finalization_callback(self, param: ParameterBase, finalized_param: torch.Tensor) -> None: + """ + Callback for when a parameter is finalized. + """ + self._finalized_params += 1 + + for name in all_names: + if getattr(self, name) is param: + setattr(self, name, finalized_param) + + return finalization_callback + + +class LayerMetaclass(type): + """ + MetaClass for the LayerContainer base class. This class will parse the annotations + of the class that correspond to `ParameterBase` and create None initializers for each + as well as a finalization callback that for when each `ParameterBase` is finalized + and should be replaced with a Tensor. + """ + + def __new__(cls, clsname, bases, attrs): + + annotations = attrs.get("__annotations__", {}) + + for base in bases: + # We'll pick up all annotations on any base classes. This will allow us to + # to use inheritance to share common parameter groups in base classes. + if hasattr(base, "__annotations__"): + annotations.update(base.__annotations__) + + if hasattr(base, MAPPING_KEY): + if MAPPING_KEY not in attrs: + # This is likely a fail state. If a parent has MAPPING KEY but the child does + # not, then we're guaranteed only a subset of the parameters will be mapped. + attrs[MAPPING_KEY] = {} + attrs[MAPPING_KEY].update(getattr(base, MAPPING_KEY)) + + all_names = [name for name, annotation in annotations.items() if issubclass(annotation, ParameterBase)] + + if MAPPING_KEY in attrs: + # If we have a mapping key at all, then we will enter the validation mode for building + # helpers for mapping and ensuring we have complete mapping. + + # First we'll build a flat list of every dependency for this layer. + all_deps = set() + for name in all_names: + parameter_deps = [ + name for name, annotation in annotations[name].__annotations__.items() + if issubclass(annotation, (torch.Tensor, ParametrizedList)) + ] + + all_deps.update([f"{name}.{dep}" for dep in parameter_deps]) + + # Create static helper for doing the string processing only once. + attrs[PLIST_HELPERS] = [] + + # Iterate over all the mappings + for src_name, target_or_targets in attrs[MAPPING_KEY].items(): + if isinstance(target_or_targets, str): + target_or_targets = [target_or_targets] + + actual_targets = [] + for target_name in target_or_targets: + base_dependency, dependency_attr = target_name.split(".") + + # Check for invalid mappings + if base_dependency not in all_names: + raise ValueError( + "Target parameter \"{}\" not found in this layer. Valid targets are {}".format( + base_dependency, all_names)) + if dependency_attr not in annotations[base_dependency].__annotations__: + # This check is not universal (see below) if a single dependency is being + # mapped to by a single row. + raise ValueError( + "Target dependency \"{}\" not found on parameter \"{}\". Valid targets are {}".format( + dependency_attr, base_dependency, annotations[base_dependency].__annotations__.keys())) + if target_name not in all_deps: + raise ValueError( + "Target dependency \"{}\" was targeted with multiple mapping rules.".format(target_name)) + + # If we've made it this far, the dependency definitely exists. + actual_targets.append(annotations[base_dependency].__annotations__[dependency_attr]) + + all_deps.remove(target_name) + + are_plists = [issubclass(target, ParametrizedList) for target in actual_targets] + if all(are_plists): + # We can do direct sets on everything but ParametrizedLists, so we'll only explicitly + # handle these here. + # TODO(cmikeh2): SPLIT, error if more than 1 + glob_count = src_name.count("*") + if glob_count > 1: + raise ValueError( + "ParametrizedList index inference can only work with a single glob: {}".format(src_name)) + elif glob_count == 0: + raise ValueError( + "Must have wildcard (*) in source name for ParametrizedList mapping: {}".format(src_name)) + + wildcard_idx = src_name.find("*") + prefix = src_name[:wildcard_idx] + suffix = src_name[wildcard_idx + 1:] + attrs[PLIST_HELPERS].append((prefix, suffix, target_or_targets)) + elif any(are_plists): + raise ValueError("Cannot mix ParametrizedLists and Tensors in a single mapping rule.") + + if len(all_deps) > 0: + raise ValueError( + "A parameter mapping was provided for {}, but the following dependencies were not mapped: {}". + format(clsname, all_deps)) + + attrs["finalization_callback"] = make_finalization_callback(all_names) + + new_obj = super().__new__(cls, clsname, bases, attrs) + + setattr(new_obj, "_n_params", len(all_names)) + setattr(new_obj, "_annotation_attrs", all_names) + + return new_obj + + def __call__(cls, *args, **kwargs): + instance = cls.__new__(cls, *args, **kwargs) + instance.__init__(*args, **kwargs) + + for name, annotation in instance.__annotations__.items(): + if issubclass(annotation, ParameterBase): + # TODO(cmikeh2): Do we want to make this a property + # It might also make sense to do this in the base class __init__ + # but since it is tied with the changes made in __new__ it feels + # to me like it should be here. + setattr(instance, name, annotation(instance.inference_model, instance)) + + return instance + + +class LayerContainer(metaclass=LayerMetaclass): + """ + Abstract base class for containing model parameters. + + This is primarily a guidance abstraction since we do not put any restrictions + on how the parameters are stored. + + To use this class, annotate the class with `ParameterBase` subclasses and give them + names. As a checkpoint is loaded into this container, the `ParameterBase` instances + will be replaced with realized Tensors as soon as each of their dependencies are met. + + To enable automatic mapping, add a static attribute `PARAM_MAPPING` to the class + definition. This should be a dictionary mapping from a source string to one or + more dependencies. + + ```python + class MyLayer(LayerContainer): + PARAM_MAPPING = { + "path.to.param.dependency", "container_param_1.dependency", + "path.to.param2.dependency", "container_param_2.dependency", + "path.to.param3.*.dependency", "container_param_3.list_dependency" + } + + ... + ``` + """ + + def __init__(self, model: InferenceModel) -> None: + """ + Initialization of the LayerContainer. This method does not need to be overridden + for any children classes. + + Args: + model (InferenceModel): Inference model that will be used to shard and transform + parameters correctly, as well as provide specific information about the model + for `ParameterizedList`s that may be part of one of the member `ParameterBase`s. + """ + self.inference_model = model + self._finalized_params = 0 + + def _initialization_checker(self, check_device: bool = True) -> bool: + """ + Returns whether or not all parameters have been initialized and transformed by + the model. Once this returns True, all the `ParameterBase` instances will be + torch.Tensors. + """ + if self._finalized_params != self.n_params: + return False + + for name in self._annotation_attrs: + tensor = getattr(self, name) + if tensor is None: + continue + elif not isinstance(tensor, InferenceParameter): + raise ValueError("Layer should be finalized, but {} ({}) is neither InferenceParameter or None".format( + name, type(tensor))) + elif check_device and tensor.device != torch.device(get_accelerator().current_device()): + raise RuntimeError("Layer should be finalized, but {} is not on device {}".format( + name, + get_accelerator().current_device())) + return True + + @property + def is_populated(self) -> bool: + """ + Returns whether or not all parameters have been populated by the checkpoint engine, but + does not validat the parameters are on the correct device. + """ + return self._initialization_checker(check_device=False) + + @property + def is_initialized(self) -> bool: + """ + Returns whether or not all parameters have been initialized and transformed by + the model and are located on the appropriate device. Once this returns True, all + the `ParameterBase` instances ``InferenceParameter``s or explicitly set to ``None``. + """ + return self._initialization_checker() + + @property + def n_params(self) -> int: + """ + The number of parameters this container holds. This is a read-only value + that is set by the metaclass. + """ + return self._n_params + + @property + def annotation_attrs(self) -> list: + return self._annotation_attrs + + @property + def mapping_params(self) -> dict: + return getattr(self.__class__, MAPPING_KEY, {}) + + @property + def plist_helpers(self) -> list: + return getattr(self.__class__, PLIST_HELPERS, []) + + def direct_injection(self, name: str, tensor: InferenceParameter) -> None: + + if name not in self._annotation_attrs: + raise ValueError(f"Cannot directly inject {name}, not a valid parameter.") + + setattr(self, name, tensor) + self._finalized_params += 1 + + def set_dependency(self, dep_name: str, dep_value: torch.Tensor) -> None: + """ + Set dependency can be used for managing dependencies when a mapping is provided + in the class definition for the layer. The dep_name here should have any prefix + for transformer layers removed (such as model.layers.*.attn.qkv.weight -> attn.qkv.weight). + + Args: + dep_name (str): The name of the dependency to set. + dep_value (torch.Tensor): The value to set the dependency to. + """ + + def get_dep_name_target(dep_name: str) -> str: + """ + Helper method for getting the target name for a dependency from the + mapping params. Tries to match exact string first, then looks for + wildcards and attempts regex matching. Will return empty string if + no match found. + """ + if dep_name in self.mapping_params: + # If we have an exact match, it's a direct mapping and we can + # immediately set the value. + return self.mapping_params[dep_name] + + matched_targets = [] + for key, target in self.mapping_params.items(): + regex_key = key.replace("*", ".*") + if re.match(regex_key, dep_name): + matched_targets.append(target) + if len(matched_targets) > 1: + raise ValueError(f"Multiple targets matched for dependency {dep_name}: {matched_targets}") + if matched_targets: + return matched_targets[0] + return "" + + if dep_name in self.mapping_params: + # If we have an exact match, it's a direct mapping and we can immediately set + # the value. + target = self.mapping_params[dep_name] + + # Convert single targets to a list for consistency + if isinstance(target, str): + target = [target] + + for target_name in target: + # Double setting doesn't set the attribute correctly, so we do a getattr then setattr + target_param_name, target_dependency_name = target_name.split(".") + target_param = getattr(self, target_param_name) + setattr(target_param, target_dependency_name, dep_value) + return + + # Otherwise we need to map to one of the parameter lists. + for prefix, suffix, dests in self.plist_helpers: + if dep_name.startswith(prefix) and dep_name.endswith(suffix): + # We have a match, so we can set the value. + target_idx = int(dep_name[len(prefix):-len(suffix)]) + + # Convert single targets to a list for consistency + if isinstance(dests, str): + dests = [dests] + + for dest in dests: + target_param_name, target_dependency_name = dest.split(".") + target_param = getattr(self, target_param_name) + target_dependency = getattr(target_param, target_dependency_name) + target_dependency[target_idx] = dep_value + return + + # TODO: Refactor this with the help of cmikeh2 + # We should be able to combine this with the wildcard matching above. + target = get_dep_name_target(dep_name) + if target: + # Convert single targets to a list for consistency + if isinstance(target, str): + target = [target] + + for target_name in target: + # Double setting doesn't set the attribute correctly, so we do a getattr then setattr + target_param_name, target_dependency_name = target_name.split(".") + target_param = getattr(self, target_param_name) + setattr(target_param, target_dependency_name, dep_value) + return + + raise ValueError( + "Could not find a mapping for dependency \"{}\". Check that it is included in the ``MAPPING_PARAMS``. See docstring for more on ``MAPPING_PARAMS``" + .format(dep_name)) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/parameter_base.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/parameter_base.py new file mode 100644 index 0000000000000000000000000000000000000000..2dcb63c050a0b4ceea8ce7ece38b6fcd329273a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/parameter_base.py @@ -0,0 +1,255 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import weakref +from abc import abstractmethod +from typing import Type + +import torch + +# Currently have dependency loops for the type hints. +InferenceModel = Type["InferenceModel"] +LayerContainer = Type["LayerContainer"] + +MAPPING_KEY = "PARAM_MAPPING" + + +def make_param_getter(clsname, param): + """ + Normal getter implementation for a property. + """ + + def param_getter(self): + return getattr(self, f"__{clsname}__{param}") + + return param_getter + + +def make_param_setter(clsname, param): + """ + Setter implementation that will call complete component to potentially + finalize the parameter. + """ + + def param_setter(self, value): + setattr(self, f"__{clsname}__{param}", value) + self.complete_component() + + return param_setter + + +def make_readonly_setter(): + """ + Setter implementation that will raise an error if called. + """ + + def paramlist_setter(self, value): + raise ValueError("Cannot set a ParametrizedList directly.") + + return paramlist_setter + + +class ParameterMetaclass(type): + """ + MetaClass for the ParameterBase base class. This class will parse the `src_params` + attribute and create properties for each of the dependencies. A dependency can either + be represented as a string, which is interpreted as a named Tensor, or a `ParametrizedList` + subclass. + """ + + def __new__(cls, clsname, bases, attrs): + + annotations = attrs.get("__annotations__", {}) + dependencies = { + name: annotation + for name, annotation in annotations.items() if issubclass(annotation, (torch.Tensor, ParametrizedList)) + } + n_dependencies = len(dependencies) + + # Create properties for each of our dependencies + for d_name, d_type in dependencies.items(): + if issubclass(d_type, ParametrizedList): + assert hasattr( + d_type, "count_attr" + ), "ParametrizedList must have a count_attr attribute to access on the inference module." + attrs[d_name] = property(make_param_getter(clsname, d_name), make_readonly_setter()) + else: # torch.Tensor + attrs[d_name] = property(make_param_getter(clsname, d_name), make_param_setter(clsname, d_name)) + + new_cls = super().__new__(cls, clsname, bases, attrs) + new_cls.n_dependencies = n_dependencies + + return new_cls + + def __call__(cls, *args, **kwargs): + new_obj = super().__call__(*args, **kwargs) + new_obj.__init__(*args, **kwargs) + + setattr(new_obj, "dest_param", None) + + # Initialize our dependences to None/empty `ParametrizedList`s + for name, annotation in new_obj.__annotations__.items(): + if issubclass(annotation, ParametrizedList): + #TODO(jeff): update assert with this, model implementation attribute does not align or missing wrt the ParametrizedList attributes + assert hasattr( + new_obj.inference_model, annotation.count_attr + ), f"new_obj={new_obj.__class__.__name__}, name={name}, annotation.count_attr={annotation.count_attr}" + param_list = annotation(new_obj, getattr(new_obj.inference_model, annotation.count_attr)) + setattr(new_obj, f"__{new_obj.__class__.__name__}__{name}", param_list) + else: # torch.Tensor + setattr(new_obj, f"__{new_obj.__class__.__name__}__{name}", None) + + return new_obj + + +class ParameterBase(metaclass=ParameterMetaclass): + """ + A ParameterBase allows us to consolidate tracking the dependencies of loading a parameter from + a checkpoint into a single object. This class should not be used directly, but rather subclassed + and the `src_params` attribute set to a list of strings and/or `ParametrizedList`s. + """ + + # inference_model: InferenceModel + """ + Inference model that will provide context on how to shard and transform the parameter. + """ + + #completed_components: int + """ + How many of the layer dependencies have been met. This is used to determine when the parameter + is ready to be finalized. A ParametrizedList counts as a single dependency for the purposes + of this counter. + """ + + def __init__(self, model: InferenceModel, parent_container: LayerContainer) -> None: + """ + Direct constructor. This should not be called from client code. + + Args: + model (InferenceModel): Inference model that will be used to shard and transform the + parameter in `finalize`. + parent_container (LayerContainer): The parent container that this parameter is a member + of. We will build a weakref to this container to call the finalization callback. + """ + self.inference_model = model + self.completed_components = 0 + self.parent_container = weakref.ref(parent_container) + + @abstractmethod + def finalize(self) -> torch.Tensor: + """ + Finalize the parameter after all of its source parameters have been set. This method + will be automatically called when all inputs have been set. It should return the Tensor + with all transformations performed on it. + """ + pass + + def complete_component(self) -> None: + """ + Mark a component as completed. This should be called by the relevant setter of a direct + property or a ParametrizedList. This method will automatically call `finalize` when all + dependencies have been met and then call the finalization callback on the parent container. + + Once the finalization callback has been called, the parameter will be replaced with the + `dst_param` attribute on the parent container, and this instance will be destroyed. + """ + self.completed_components += 1 + + if self.completed_components != self.n_dependencies: + return + + finalized_param = self.finalize() + self.parent_container().finalization_callback(self, finalized_param) + + +class ParametrizedList: + """ + A ParametrizedList is a list of parameters that are dependencies + of a `ParameterBase` but may vary in length depending on the model + configuration (rather than architecture). For example, a MoE layer + may have different number of experts depending on the size of the model. + + This class is used to manage these lists and provide integer indexing + of a single component rather than accessing names directly. For example, + it tends to be more natural to access the 8th expert with `experts[8]` + rather than a name like `expert_8`, especially as an attribute. + + To inherit from this class, set static variables `name` and `count_attr`. + + ```python + class MyParametrizedList(ParametrizedList): + count_attr: str = "my_list_count" + ``` + + In the above example, `my_list_count` should be an accessible attribute + of the inference model (i.e. via `self.inference_model.my_list_count`). + + NOTE: There are some APIs in which this type cannot be used as if it is + just a list of Tensors. For example, `torch.cat(param_list)` will not work. + However, you can make it compatible with a tuple wrapper: + `torch.cat(tuple(param_list))` + """ + + n_params: int + """ + Number of params this list contains. + """ + + param: ParameterBase + """ + WeakRef to the owning parameter. + """ + + def __init__(self, param: ParameterBase, n_params: int) -> None: + """ + Constructor. Should not be called from client code. + + Args: + param (ParameterBase): The owning parameter. + n_params (int): The number of parameters this list contains. This should be + """ + self.n_params = n_params + self.set_params = 0 + self.param = weakref.ref(param) + self._params = [None] * n_params + + def __getitem__(self, index): + return self._params[index] + + def __setitem__(self, index, value): + if self._params[index] is not None: + raise ValueError("Cannot set a parameter twice.") + + self._params[index] = value + self.set_params += 1 + + if self.set_params != self.n_params: + return + + self.param().complete_component() + + def __iter__(self): + return iter(self._params) + + +def ParamList(attr: str): + """ + Helper to create a subclass of ParametrizedList with the desired `count_attr`. + + In this manner, we can annotate the type of a Parameter dependency with the + following: + + ```python + class CustomParameter(ParameterBase): + dependency_list: ParamList("dependencies_count_name") + ``` + + where "dependencies_count_name" is the name of the attribute on the inference model. + """ + + class ParametrizedListInstance(ParametrizedList): + count_attr: str = attr + + return ParametrizedListInstance diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce6293351da1ce199f96513284059180a6e1832f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__pycache__/ds_module.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__pycache__/ds_module.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f560a55502e06198ad76ca2d5b27c0746b7b249a Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__pycache__/ds_module.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__pycache__/heuristics.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__pycache__/heuristics.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1d93fd6fbc30a330931a61c0a3f5e1fae4eca1e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__pycache__/heuristics.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__pycache__/module_registry.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__pycache__/module_registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da03e318e8c4ecc795e8506b70457d258c9d3c20 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__pycache__/module_registry.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a30b82c487a8c88dc4c33e2eed1370d916218be Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/moe_config.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/moe_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51ed519f5ecf7ee45d3765f4b171e8b586859793 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/moe_config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/unembed_config.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/unembed_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e44a9ef9d4eab03228a6d3df96eae8082593f58 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/unembed_config.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1b500a9a0b5a25b17696bffcbcc5fa1ffd582de4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# Imports for registering ops +from .attention import * +from .linear import * +from .post_norm import * +from .pre_norm import * +from .embedding import * +from .unembed import * +from .moe import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..03b65a9eba4810254fc961adfcd2eadc000e05c5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/embedding/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/embedding/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5458a855abf45c3870c2208663dd5f49fb4fb22b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/embedding/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .ragged_embedding import DSRaggedEmbedding diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/embedding/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/embedding/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c41973e503f59c186ab2c0b805da3149d43112e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/embedding/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/embedding/__pycache__/ragged_embedding.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/embedding/__pycache__/ragged_embedding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb8eb976bd50efdf911050b409a7e9c3283a7ae7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/embedding/__pycache__/ragged_embedding.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/embedding/ragged_embedding.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/embedding/ragged_embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..90cdd39d1be7f7e51da65a1a517325e585386a72 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/embedding/ragged_embedding.py @@ -0,0 +1,77 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Any, Dict, Optional + +import torch + +from deepspeed.accelerator import get_accelerator +from ....allocator import empty_from +from ....inference_utils import DtypeEnum +from ....kernels.ragged_ops import RaggedEmbeddingKernel +from ....ragged import RaggedBatchWrapper +from ...interfaces import DSEmbeddingBase, DSEmbeddingRegistry +from ...configs import DSEmbeddingsConfig + + +@DSEmbeddingRegistry.register_module +class DSRaggedEmbedding(DSEmbeddingBase): + + @staticmethod + def name(): + return 'ragged_embedding' + + @staticmethod + def supports_config(config: DSEmbeddingsConfig) -> bool: + + if DtypeEnum(config.residual_dtype) not in [DtypeEnum.fp16, DtypeEnum.bf16, DtypeEnum.fp32]: + return False + + if config.use_token_type: + return False + + if config.output_normalization is not None: + return False + + try: + _ = RaggedEmbeddingKernel(config.residual_dtype, torch.int32, config.embedding_dim) + except ValueError: + return False + + return True + + def __init__(self, config: DSEmbeddingsConfig, implementation_config: Dict[str, Any]) -> None: + super().__init__(config, implementation_config) + + self.embed_offset = self._config.positional_offset + + # TODO(cmikeh2): How do we want to avoid the int32 vs int64 issue? + self._ragged_embed = RaggedEmbeddingKernel(self._config.residual_dtype, torch.int32, + self._config.embedding_dim) + + self._output = torch.empty((self._config.max_tokens, self._config.embedding_dim), + dtype=self._config.residual_dtype, + device=get_accelerator().current_device()) + + @property + def output(self) -> torch.Tensor: + return self._output + + def forward(self, + ragged_batch: RaggedBatchWrapper, + word_embeddings: torch.Tensor, + position_embeddings: Optional[torch.Tensor] = None) -> torch.Tensor: + """ + Parameters: + ragged_batch (RaggedBatchWrapper): The input ids and associated ragged batch metadata. + word_embeddings (torch.Tensor): The word embedding table + """ + output = empty_from(self._output, (ragged_batch.tensor_toks, self._config.embedding_dim)) + self._ragged_embed(output, + ragged_batch, + word_embeddings, + position_embed_weight=position_embeddings, + position_embed_offset=self.embed_offset) + return output diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/post_norm/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/post_norm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..653a2fe4fb5be742c873aa51e76db79ead7325df --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/post_norm/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .cuda_post_ln import DSPostLNCUDAModule diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/post_norm/cuda_post_ln.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/post_norm/cuda_post_ln.py new file mode 100644 index 0000000000000000000000000000000000000000..9b2af4bb90231cd3f37e3406fe94042c4006faa3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/post_norm/cuda_post_ln.py @@ -0,0 +1,56 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Any, Dict, Tuple + +import torch + +from deepspeed.accelerator import get_accelerator +from ...interfaces import DSPostNormBase, DSPostNormRegistry +from ...configs import DSNormConfig +from ....kernels.core_ops.cuda_layer_norm.cuda_post_ln import CUDAFPPostLN +from ....allocator import empty_from +from ....inference_parameter import InferenceParameter + + +@DSPostNormRegistry.register_module +class DSPostLNCUDAModule(DSPostNormBase): + + @staticmethod + def name(): + return 'cuda_post_ln' + + @staticmethod + def supports_config(config: DSNormConfig): + if len(set([config.residual_dtype, config.input_dtype, config.output_dtype])) != 1: + return False + + try: + _ = CUDAFPPostLN(config.channels, config.residual_dtype) + except ValueError: + return False + return True + + def __init__(self, config: DSNormConfig, implementation_config: Dict[str, Any]): + super().__init__(config, implementation_config) + self._fp_post_ln = CUDAFPPostLN(self._config.channels, self._config.residual_dtype, epsilon=self._config.eps) + + self._output = torch.empty((config.max_tokens, config.channels), + dtype=config.output_dtype, + device=get_accelerator().current_device()) + + def transform_param(self, param: torch.Tensor) -> InferenceParameter: + param = param.to(self._config.input_dtype) + return InferenceParameter.initialize(param) + + def forward(self, residual: torch.Tensor, hidden_in: torch.Tensor, gamma: torch.Tensor, + beta: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Since the CUDA FP only supports all data types being the same, we will alias the residual + with our output. + """ + self._residual_output = empty_from(self._output, residual.shape) + self._fp_post_ln(residual, residual, hidden_in, gamma, beta) + return residual, residual diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b988f8a3c561ff8bfbe4729032c8dd4eb5ef80f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/__pycache__/cuda_pre_ln.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/__pycache__/cuda_pre_ln.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95f825b53c1c607efa4800f4c276b25fce67fd94 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/__pycache__/cuda_pre_ln.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/__pycache__/cuda_pre_rms.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/__pycache__/cuda_pre_rms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a2d07c92a5639def37388c92c496e73c4237839 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/__pycache__/cuda_pre_rms.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/cuda_pre_ln.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/cuda_pre_ln.py new file mode 100644 index 0000000000000000000000000000000000000000..90783ce8c9a63320fc2047b4e8dfed9522df7899 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/cuda_pre_ln.py @@ -0,0 +1,69 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Any, Dict, Optional, Tuple + +import torch + +from deepspeed.accelerator import get_accelerator +from ...interfaces import DSPreNormBase, DSPreNormRegistry +from ...configs import DSNormConfig, NormTypeEnum +from ....kernels.core_ops.cuda_layer_norm.cuda_pre_ln import CUDAFPPreLN +from ....kernels.core_ops.cuda_layer_norm.cuda_ln import CUDAFPLN +from ....allocator import empty_from +from ....inference_parameter import InferenceParameter + + +@DSPreNormRegistry.register_module +class DSPreLNCUDAModule(DSPreNormBase): + + @staticmethod + def name(): + return 'cuda_pre_ln' + + @staticmethod + def supports_config(config: DSNormConfig): + type = NormTypeEnum(config.type) + if type != NormTypeEnum.LayerNorm: + return False + + if len(set([config.residual_dtype, config.input_dtype, config.output_dtype])) != 1: + return False + + try: + _ = CUDAFPPreLN(config.channels, config.residual_dtype) + except ValueError: + return False + return True + + def __init__(self, config: DSNormConfig, implementation_config: Dict[str, Any]): + super().__init__(config, implementation_config) + self._fp_pre_ln = CUDAFPPreLN(self._config.channels, self._config.residual_dtype, epsilon=self._config.eps) + self._fp_ln = CUDAFPLN(self._config.channels, self._config.residual_dtype, epsilon=self._config.eps) + + # Buffers for the hidden output (residual is updated in-place) + self._hidden_output = torch.empty((config.max_tokens, config.channels), + dtype=config.output_dtype, + device=get_accelerator().current_device()) + + def transform_param(self, param: torch.Tensor) -> InferenceParameter: + param = param.to(self._config.input_dtype) + return InferenceParameter.initialize(param) + + def forward(self, residual: torch.Tensor, hidden_in: Optional[torch.Tensor], gamma: torch.Tensor, + beta: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Since the CUDA FP only supports all data types being the same, we will alias the residual + with our output. + + If hidden_in is None, that means we do not need to perform the residual add and will + only return the hidden output modified. + """ + hidden_out = empty_from(self._hidden_output, residual.shape) + if hidden_in is None: + self._fp_ln(hidden_out, residual, gamma, beta) + else: + self._fp_pre_ln(residual, hidden_out, residual, hidden_in, gamma, beta) + return residual, hidden_out diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/cuda_pre_rms.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/cuda_pre_rms.py new file mode 100644 index 0000000000000000000000000000000000000000..986262b31b1f50c3c139e7f71aca94c13ed866c3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/cuda_pre_rms.py @@ -0,0 +1,79 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Any, Dict, Optional, Tuple + +import torch + +from deepspeed.accelerator import get_accelerator +from ...interfaces import DSPreNormBase, DSPreNormRegistry +from ...configs import DSNormConfig, NormTypeEnum +from ....kernels.core_ops import CUDARMSNorm, CUDARMSPreNorm +from ....allocator import empty_from +from ....inference_parameter import InferenceParameter + + +@DSPreNormRegistry.register_module +class DSPreRMSCUDAModule(DSPreNormBase): + + @staticmethod + def name(): + return 'cuda_pre_rms' + + @staticmethod + def supports_config(config: DSNormConfig): + type = NormTypeEnum(config.type) + if type != NormTypeEnum.RMSNorm: + return False + + if len(set([config.residual_dtype, config.input_dtype, config.output_dtype])) != 1: + return False + + try: + # Only need to check one since the support matrix for the two rms kernels is the same + _ = CUDARMSPreNorm(config.channels, config.residual_dtype) + except ValueError: + return False + return True + + def __init__(self, config: DSNormConfig, implementation_config: Dict[str, Any]): + super().__init__(config, implementation_config) + self._fp_rms = CUDARMSNorm(self._config.channels, self._config.residual_dtype, epsilon=self._config.eps) + self._fp_rms_pre = CUDARMSPreNorm(self._config.channels, self._config.residual_dtype, epsilon=self._config.eps) + + # Buffers for both the hidden and residual outputs + self._hidden_output = torch.empty((config.max_tokens, config.channels), + dtype=config.output_dtype, + device=get_accelerator().current_device()) + self._residual_output = torch.empty((config.max_tokens, config.channels), + dtype=config.output_dtype, + device=get_accelerator().current_device()) + + def transform_param(self, param: torch.Tensor) -> InferenceParameter: + param = param.to(self._config.input_dtype) + return InferenceParameter.initialize(param) + + def forward(self, + residual: torch.Tensor, + hidden_in: Optional[torch.Tensor], + gamma: torch.Tensor, + beta: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Since the CUDA FP only supports all data types being the same, we will alias the residual + with our output. + + If hidden_in is None, that means we do not need to perform the residual add and will + only return the hidden output modified. + """ + assert beta is None, "Beta is not supported for RMSNorm" + + hidden_out = empty_from(self._hidden_output, residual.shape) + if hidden_in is None: + self._fp_rms(hidden_out, residual, gamma) + residual_out = residual + else: + residual_out = empty_from(self._residual_output, residual.shape) + self._fp_rms_pre(residual_out, hidden_out, residual, hidden_in, gamma) + return residual_out, hidden_out diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/unembed/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/unembed/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4a5fd24d518bbf29f88771478b04da73399d4a53 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/unembed/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .ragged_unembed import DSRaggedUnembed diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/unembed/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/unembed/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f42d75ae8d464b0fcb91a19214a942a4a27020c Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/unembed/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/unembed/ragged_unembed.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/unembed/ragged_unembed.py new file mode 100644 index 0000000000000000000000000000000000000000..36130902c665c7adff419ef0ea2be7138292e193 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/unembed/ragged_unembed.py @@ -0,0 +1,120 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Any, Dict, Optional + +import torch + +from deepspeed.accelerator import get_accelerator +from ....allocator import empty_from +from ....inference_utils import DtypeEnum, ActivationType +from ....kernels.core_ops import CUDAFPLN, BlasLibLinear, CUDARMSNorm, CUDABiasActivation +from ....kernels.ragged_ops import RaggedLogitsGather +from ....ragged import RaggedBatchWrapper +from ...interfaces import DSUnembedBase, DSUnembedRegistry +from ...configs import DSUnembedConfig + + +@DSUnembedRegistry.register_module +class DSRaggedUnembed(DSUnembedBase): + """ + Ragged unembedding implementation. This implementation will gather only the last token + of each sequence in the ragged inflight batch and calculate the logits only for those rows. + """ + + @staticmethod + def name(): + return 'ragged_unembed' + + @staticmethod + def supports_config(config: DSUnembedConfig): + + if DtypeEnum(config.dtype) not in [DtypeEnum.fp16, DtypeEnum.bf16, DtypeEnum.fp32]: + return False + + try: + _ = RaggedLogitsGather(config.model_dim, config.dtype) + except ValueError: + return False + + if config.norm_type == 'rms_norm': + try: + _ = CUDARMSNorm(config.model_dim, config.dtype) + except ValueError: + return False + elif config.norm_type == 'layer_norm': + try: + _ = CUDAFPLN(config.model_dim, config.dtype) + except ValueError: + return False + + return True + + def __init__(self, config: DSUnembedConfig, implementation_config: Dict[str, Any]) -> None: + super().__init__(config, implementation_config) + + self._logits_gather = RaggedLogitsGather(config.model_dim, self._config.dtype) + + if self._config.norm_type == 'layer_norm': + self._norm = CUDAFPLN(self._config.model_dim, self._config.dtype) + elif self._config.norm_type == 'rms_norm': + self._norm = CUDARMSNorm(self._config.model_dim, self._config.dtype) + else: + self._norm = None + + self._linear = BlasLibLinear(self._config.dtype) + # Here the activation kernel is being used to apply bias, hence the identity activation type! + self._act_fn = CUDABiasActivation(self._config.vocab_size, self._config.dtype, ActivationType.IDENTITY) + + self._intermediate = torch.empty((self._config.max_sequences, self._config.model_dim), + dtype=self._config.dtype, + device=get_accelerator().current_device()) + + self._output = torch.empty((self._config.max_sequences, self._config.vocab_size), + dtype=self._config.dtype, + device=get_accelerator().current_device()) + + @property + def output(self) -> torch.Tensor: + return self._output + + def forward(self, + hidden_states: torch.Tensor, + vocab_embedding: torch.Tensor, + ragged_metadata: RaggedBatchWrapper, + bias: Optional[torch.Tensor] = None, + gamma: Optional[torch.Tensor] = None, + beta: Optional[torch.Tensor] = None) -> torch.Tensor: + """ + Return final model logits. + + Args: + hidden_states (torch.Tensor): The hidden states from the model. This is the output of the + final layer of the model. + vocab_embedding (torch.Tensor): The vocab embedding table. + raged_metadata (RaggedBatchWrapper): The ragged batch metadata. + gamma (Optional[torch.Tensor]): The gamma tensor for normalization. + beta (Optional[torch.Tensor]): The beta tensor for normalization. + """ + + cut_down_hidden_states = empty_from(self._intermediate, + (ragged_metadata.current_sequences, self._config.model_dim)) + self._logits_gather(cut_down_hidden_states, hidden_states, ragged_metadata) + + if self._config.norm_type == 'rms_norm': + if gamma is None: + raise ValueError('RMS Normalization enabled but gamma not provided.') + self._norm(cut_down_hidden_states, cut_down_hidden_states, gamma) + elif self._config.norm_type == 'layer_norm': + if gamma is None or beta is None: + raise ValueError('Normalization enabled but gamma and/or beta not provided.') + self._norm(cut_down_hidden_states, cut_down_hidden_states, gamma, beta) + + output = empty_from(self._output, (ragged_metadata.current_sequences, self._config.vocab_size)) + self._linear(output, cut_down_hidden_states, vocab_embedding) + if bias is not None: + self._act_fn(output, bias) + + return output diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..13b556789e4e43c4e816319cd48e95e9e51afb0c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__init__.py @@ -0,0 +1,12 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .attention_base import DSSelfAttentionRegistry, DSSelfAttentionBase +from .embedding_base import DSEmbeddingRegistry, DSEmbeddingBase +from .linear_base import DSLinearRegistry, DSLinearBase +from .moe_base import DSMoERegistry, DSMoEBase +from .post_norm_base import DSPostNormRegistry, DSPostNormBase +from .pre_norm_base import DSPreNormRegistry, DSPreNormBase +from .unembed_base import DSUnembedRegistry, DSUnembedBase diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60adb3a5969b428dfec2c2351fbb7f0c5de7c9a5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/attention_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/attention_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..173906f256a381275d43f4da9091dced15295a5c Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/attention_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/embedding_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/embedding_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ab09a2f1ebe0f431488ef9ac0e68e58c0098367c Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/embedding_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/linear_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/linear_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..39e51a093536ed19523bdea5f2e3fa24e01b8098 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/linear_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/moe_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/moe_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8a8a722a7223fa4248010b26740eeae812dd329 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/moe_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/post_norm_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/post_norm_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a45ca1080360fd1bfd59d75eae5c61aac252093 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/post_norm_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/pre_norm_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/pre_norm_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9d999b0ee09a863a5ae383b5cc4c7adc0f0149d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/pre_norm_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/unembed_base.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/unembed_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b65c07a393253b57257d9c58f10d3c101297e0d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/unembed_base.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/attention_base.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/attention_base.py new file mode 100644 index 0000000000000000000000000000000000000000..c67dc033f92ada844c1c5d27bde5ac0cf7f3f0ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/attention_base.py @@ -0,0 +1,97 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Any, Dict, Optional, Tuple, Type + +import torch + +from ...ragged import RaggedBatchWrapper +from deepspeed.runtime.config_utils import DeepSpeedConfigModel +from ..ds_module import DSModuleBase +from ..module_registry import DSModuleRegistryBase +from ..configs import DSSelfAttentionConfig + + +class DSSelfAttentionBase(DSModuleBase): + """ + Base mixin for all attention modules. The interface represented by this module + is broadly: + + output = attention(query_key_value, + Optional[kv_cache], + Optional[attention_mask], + Optional[attention_bias]) + """ + + @staticmethod + def config_class() -> Type[DeepSpeedConfigModel]: + return DSSelfAttentionConfig + + def __init__(self, config: DSSelfAttentionConfig, implementation_config: Dict[str, Any]) -> None: + super().__init__(config, implementation_config) + + @property + def kv_block_size(self) -> int: + """ + Return preferred granulatity for blocked KV-cache implementation. + """ + raise NotImplementedError() + + @property + def q_block_size(self) -> int: + """ + Property to calculate blocking granularity for the query dimension. + This has no impact on the KV-cache structure, but will affect the + number of attention atoms associated with a batch. + """ + raise NotImplementedError() + + def build_atoms(self, ragged_batch: RaggedBatchWrapper) -> None: + """ + Build the atoms for this module. This is not a strict requirement for the class, + so this method is a no-op by default rather than abstract. + """ + pass + + def forward(self, + q_k_v: torch.Tensor, + kv_cache: torch.Tensor, + batch: RaggedBatchWrapper, + attention_mask: Optional[torch.Tensor] = None, + attention_bias: Optional[torch.Tensor] = None, + inv_freqs: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Parameters: + q_k_v (torch.Tensor): Query, key, and value tensors. Expected shape is: + [ + batch, + seq_len, + 2 * self._config.n_heads_kv + self._config.n_heads_q, + self._config.head_size + ]. + kv_cache (Optional[torch.Tensor]): Key and value cache tensor. Expected shape is + [ + 2, + batch, + kv_cache_len, + self._config.n_heads_kv, + self._config.head_size + ]. If None, cache is disabled. The `kv_cache_len` dimension does not need to + be contiguous (it should expand stride by `max_out_tokens`). + batch (RaggedBatchWrapper): Ragged batch metadata. + attention_mask (Optional[torch.Tensor]): Attention mask tensor. If None, masking is + disabled. This will defer to the config in the case of conflicting information. + This means if the config class is implying causal attention, the mask will be ignored. + attention_bias (Optional[torch.Tensor]): Attention bias tensor. If None, bias is disabled. + """ + raise NotImplementedError() + + +class DSSelfAttentionRegistry(DSModuleRegistryBase): + registry: Dict = {} + + @staticmethod + def associated_class() -> Type[DSModuleBase]: + return DSSelfAttentionBase diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/embedding_base.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/embedding_base.py new file mode 100644 index 0000000000000000000000000000000000000000..1ab7e5f0b7a2455ad3e60bb12a9d262900340e45 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/embedding_base.py @@ -0,0 +1,85 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from abc import abstractmethod +from typing import Any, Dict, Optional, Type + +import torch + +from deepspeed.runtime.config_utils import DeepSpeedConfigModel +from ...ragged import RaggedBatchWrapper +from ..ds_module import DSModuleBase +from ..module_registry import DSModuleRegistryBase +from ..configs import DSEmbeddingsConfig +from ...inference_parameter import InferenceParameter + + +class DSEmbeddingBase(DSModuleBase): + """ + Base mixin for embedding modules. The interface represented by this module is: + + hidden_out = embedding(input_ids) + + position_embedding(position_ids) + + token_type_embedding(token_type_ids) + with optional normalization. + """ + + @staticmethod + def config_class() -> Type[DeepSpeedConfigModel]: + return DSEmbeddingsConfig + + def __init__(self, config: DSEmbeddingsConfig, implementation_config: Dict[str, Any]) -> None: + super().__init__(config, implementation_config) + + def transform_param(self, embed_param: torch.Tensor) -> InferenceParameter: + """ + Perform any necessary transformations on an embedding parameter. This module assumes + that all embedding parameters would require the same set of transformations. + + Parameters: + embed_param (torch.Tensor): Embedding parameter. Shape is of [vocab_size, hidden_size] + """ + raise NotImplementedError() + + @property + @abstractmethod + def output(self) -> torch.Tensor: + """ + Pre-allocated output Tensor. This currently needs to be exposed for gather operations + on the output. + + TODO(cmikeh2): This is not ideal. We need a better abstraction for this, such as giving + access to the inference comm object to the DSModule. + """ + raise NotImplementedError() + + def forward(self, + ragged_batch: RaggedBatchWrapper, + word_embeddings: torch.Tensor, + position_embeddings: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + token_type_embeddings: Optional[torch.Tensor] = None) -> InferenceParameter: + """ + Parameters: + ragged_batch (torch.Tensor): Ragged batch of token ids + associated metadata. + word_embeddings (torch.Tensor): Word embeddings. + position_embeddings (torch.Tensor): Position embeddings. If passed, IDs will be + inferred from the ragged batch itself. + token_type_ids (torch.Tensor): Token type ids. + token_type_embeddings (torch.Tensor): Token type embeddings. + + Returns: + torch.Tensor: Hidden states. This should be the sum of the relevant + encodings for the model. + """ + raise NotImplementedError() + + +class DSEmbeddingRegistry(DSModuleRegistryBase): + registry: Dict = {} + + @staticmethod + def associated_class() -> Type[DSModuleBase]: + return DSEmbeddingBase diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/linear_base.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/linear_base.py new file mode 100644 index 0000000000000000000000000000000000000000..fe6ccbcd934490c94a0b656f09b1f25aef70c163 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/linear_base.py @@ -0,0 +1,72 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from abc import abstractmethod +from typing import Any, Dict, Optional, Type + +import torch + +from deepspeed.runtime.config_utils import DeepSpeedConfigModel +from ..ds_module import DSModuleBase +from ..module_registry import DSModuleRegistryBase +from ..configs import DSLinearConfig +from ...inference_parameter import InferenceParameter + + +class DSLinearBase(DSModuleBase): + """ + Base mixin for all Linear modules. The interface represented by this module + is: + + hidden_out = activation(hidden_in * weight + bias) + + The format and dtype of the weight and bias tensors are not defined and implementations + may compress as necessary. Must support a bias. + """ + + @staticmethod + def config_class() -> Type[DeepSpeedConfigModel]: + return DSLinearConfig + + def __init__(self, config: DSLinearConfig, implementation_config: Dict[str, Any]) -> None: + super().__init__(config, implementation_config) + + @abstractmethod + def transform_param(self, param: torch.Tensor) -> InferenceParameter: + """ + Perform any necessary transformations of the parameters of this module. + + Parameters: + param (torch.Tensor): Weight or bias tensor. + """ + ... + + def forward(self, hidden_states: torch.Tensor, w: torch.Tensor, b: Optional[torch.Tensor] = None) -> torch.Tensor: + """ + Parameters: + hidden_states (torch.Tensor): Hidden states tensor. Expected shape is either + [batch, seq_len, in_channels] or [batch, in_channels]. + + Returns: + torch.Tensor: Output tensor. Tensor should have same number of dimensions as + input tensor. + """ + raise NotImplementedError() + + @property + @abstractmethod + def output(self) -> torch.Tensor: + """ + Return the padded, pre-allocated output Tensor. + """ + ... + + +class DSLinearRegistry(DSModuleRegistryBase): + registry: Dict = {} + + @staticmethod + def associated_class() -> Type[DSModuleBase]: + return DSLinearBase diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/unembed_base.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/unembed_base.py new file mode 100644 index 0000000000000000000000000000000000000000..9eca6fcde7682903ef445f4703e009b5c1b9e641 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/unembed_base.py @@ -0,0 +1,61 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Any, Dict, Optional, Type + +import torch + +from deepspeed.runtime.config_utils import DeepSpeedConfigModel +from ...ragged import RaggedBatchWrapper +from ..ds_module import DSModuleBase +from ..module_registry import DSModuleRegistryBase +from ..configs import DSUnembedConfig + + +class DSUnembedBase(DSModuleBase): + """ + Base mixin for unmebedding modules. The interface represented by this module is: + + if config.do_normalization + hidden = layer_norm(hidden) + logits = hidden @ projection + """ + + @staticmethod + def config_class() -> Type[DeepSpeedConfigModel]: + return DSUnembedConfig + + def __init__(self, config: DSUnembedConfig, implementation_config: Dict[str, Any]) -> None: + super().__init__(config, implementation_config) + + def forward(self, + hidden_states: torch.Tensor, + vocab_embedding: torch.Tensor, + ragged_metadata: RaggedBatchWrapper, + gamma: Optional[torch.Tensor] = None, + beta: Optional[torch.Tensor] = None) -> torch.Tensor: + """ + Forward interface. Gamma and beta are optional parameters passed depending on + `self.config.do_normalization`. + + Args: + hidden_states (torch.Tensor): Hidden states of shape [tokens, model_dim] + vocab_embedding (torch.Tensor): Embedding matrix of shape [vocab_size, model_dim] + ragged_metadata (RaggedBatchWrapper): Metadata for the ragged batch. + gamma (Optional[torch.Tensor]): Gamma parameter for layer norm. + beta (Optional[torch.Tensor]): Beta parameter for layer norm. + + Returns: + torch.Tensor: Unembedded hidden states of shape [n_seqs, model_dim] + """ + raise NotImplementedError() + + +class DSUnembedRegistry(DSModuleRegistryBase): + registry: Dict = {} + + @staticmethod + def associated_class() -> Type[DSModuleBase]: + return DSUnembedBase diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3af09cff4be57716e3e4295b88163db6b90b608a --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__init__.py @@ -0,0 +1,15 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .kv_cache import split_kv +from .manager_configs import ( + AllocationMode, + DSStateManagerConfig, + KVCacheConfig, + MemoryConfig, +) +from .ragged_manager import DSStateManager +from .ragged_wrapper import RaggedBatchWrapper +from .sequence_descriptor import DSSequenceDescriptor, PlaceholderSequenceDescriptor diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f2be85434922080b3aa089af2d64293bdf340693 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/blocked_allocator.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/blocked_allocator.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8c35d19fbdc142c6f807077beedd2ec9fea0d4d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/blocked_allocator.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/kv_cache.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/kv_cache.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdf2a7bf52a8991c56ab298438254393b40f8fbc Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/kv_cache.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/manager_configs.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/manager_configs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17725d7064413adb9a1cdae932c6e6a9054ce2da Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/manager_configs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/ragged_manager.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/ragged_manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b93f276cf9676ae435648a0d6a9e451c1f68ae4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/ragged_manager.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/ragged_wrapper.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/ragged_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3631915d290b6a8405a552d7e84d3d174dc0ea10 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/ragged_wrapper.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/sequence_descriptor.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/sequence_descriptor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..274a1cfc0914f05206ec3673059f6145922aa979 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/__pycache__/sequence_descriptor.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/blocked_allocator.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/blocked_allocator.py new file mode 100644 index 0000000000000000000000000000000000000000..7884d8cccb47a1e9a4d94759a12007fabfea90a4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/blocked_allocator.py @@ -0,0 +1,105 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Iterable, Union + +import torch + + +class BlockedAllocator: + """ + Allocator class for managing which blocks are free/used in the + blocked KV-cache. This is a simple allocator that uses a linked list + to keep track of which blocks are free/used. The cost of allocation/deallocation + is O(blocks), where blocks is the number of blocks to allocate/deallocate. + + TODO(cmikeh2): Evaluate performance of this allocator and migrate + to C++ if necessary. + """ + # Number of blocks in the KV-cache(s). + _num_blocks: int + + # Array of blocks, where each element is the next block in the linked list. + _blocks: torch.Tensor + + # Index of the head of the linked list. + _head: int + + # Number of free blocks in the KV-cache. + _free_blocks: int + + def __init__(self, num_blocks: int) -> None: + """ + Initialize an allocator with `num_blocks` blocks. This requires at least + `num_blocks` * 4 bytes of host memory. + + Parameters: + num_blocks (int): The number of blocks to allocate. + """ + + if num_blocks < 1: + raise ValueError(f'Blocked KV-cache must have at least 1 block, provided {num_blocks}') + + self._num_blocks = num_blocks + self._blocks = torch.arange(1, num_blocks + 1, dtype=torch.int32, device='cpu', pin_memory=True) + self._head = 0 + self._free_blocks = num_blocks + + def allocate(self, num_blocks: int) -> torch.Tensor: + """ + Allocate a list of blocks from the associated KV-caches. This will + return `num_blocks` blocks from the KV-cache if they are available, + or raise an exception if there are not enough free blocks. + + Parameters: + num_blocks (int): The number of blocks to allocate. + + Returns: + List[int]: The list of blocks allocated. + """ + if num_blocks > self._free_blocks: + raise ValueError(f'Not enough free blocks in the KV-cache to allocate {num_blocks} blocks') + + allocated_blocks = torch.zeros(num_blocks, dtype=torch.int32) + for i in range(num_blocks): + allocated_blocks[i] = self._head + self._head = self._blocks[self._head].item() + self._blocks[allocated_blocks[i]] = -1 # Mark as used + self._free_blocks -= 1 + + return allocated_blocks + + def free(self, blocks: Union[Iterable[int], int]) -> None: + """ + Return a list of blocks to the free pool. If a single invalid block is provided (i.e., + one that is out of range of the allocator or is already free), then an exception is raised + and no blocks are freed. + + Parameters: + blocks (Union[Iterable[int], int]): The list of blocks to free. If only one block + is to be freed, this can be alone as an integer. + """ + if isinstance(blocks, int): + blocks = [blocks] + + for block in blocks: + # Parse all blocks for validity before mutating the list. + if block < 0 or block >= self._num_blocks: + raise ValueError(f'Invalid block {block} provided to free') + + if self._blocks[block] != -1: + raise ValueError(f'Block {block} is already free') + + for block in blocks: + self._blocks[block] = self._head + self._head = block + self._free_blocks += 1 + + @property + def free_blocks(self) -> int: + """ + Return the number of free blocks in the KV-cache. + """ + return self._free_blocks diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/csrc/fast_host_buffer.cu b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/csrc/fast_host_buffer.cu new file mode 100644 index 0000000000000000000000000000000000000000..31347636b50c40fc74f0c477e848a9cddbab0c92 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/csrc/fast_host_buffer.cu @@ -0,0 +1,18 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "ds_kernel_utils.h" +#include "fast_host_buffer.h" + +void* get_cuda_fast_buffer(int64_t size) +{ + void* buffer_ptr; + // Host allocation flags that should minimize the host -> accelerator copy latency + unsigned int alloc_flags = + cudaHostAllocPortable | cudaHostAllocMapped | cudaHostAllocWriteCombined; + + cudaHostAlloc(&buffer_ptr, size, alloc_flags); + return buffer_ptr; +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/csrc/ragged_ops.cpp b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/csrc/ragged_ops.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ce115f993c3c5676c77055773569b50ac68f0f7c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/csrc/ragged_ops.cpp @@ -0,0 +1,76 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include + +#include "fast_host_buffer.h" + +/* +Similar to doing an empty_like to replicate a Tensor on the host, but will +attempt to optimize for faster host -> accelerator copies. Since this is on the critical +path for the forward pass, this should directly improve performance. +Allocates the shadow buffers for the input_ids, batch, seq and kv_ids tensors. + +Arguments: + device_mirror: A tensor on the accelerator that should be mirrored by the host. + +Returns: + A tensor on the host of the same size and datatype optimized for fast host -> accelerator +copies. +*/ +torch::Tensor allocate_fast_host_buffer(torch::Tensor device_mirror) +{ +#ifdef __HIP_PLATFORM_AMD__ + auto options = + torch::TensorOptions().device(torch::kCPU).pinned_memory(true).dtype(device_mirror.dtype()); + auto buffer = torch::empty(device_mirror.sizes(), options); +#else + + void* buffer_ptr = get_cuda_fast_buffer(device_mirror.numel() * device_mirror.element_size()); + + auto options = torch::TensorOptions().device(torch::kCPU).dtype(device_mirror.dtype()); + auto buffer = torch::from_blob(buffer_ptr, device_mirror.sizes(), options); +#endif + return buffer; +} + +torch::Tensor allocate_view_on(torch::Tensor& tensor, torch::Tensor& buffer, int64_t offset) +{ + int8_t* data = reinterpret_cast(buffer.data_ptr()); + + auto options = tensor.options().device(buffer.device()); + + return at::from_blob(data + offset, tensor.sizes(), tensor.strides(), options); +} + +torch::Tensor allocate_view_like(py::tuple shape, + py::tuple strides, + torch::Tensor& dummy_tensor, + torch::Tensor& buffer, + int64_t offset) +{ + int8_t* data = reinterpret_cast(buffer.data_ptr()); + + auto options = torch::TensorOptions().device(buffer.device()).dtype(dummy_tensor.dtype()); + + return at::from_blob(data + offset, + shape.cast>(), + strides.cast>(), + options); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("allocate_fast_host_buffer", + &allocate_fast_host_buffer, + "Allocate a host mirror of an accelerator Tensor."); + m.def("allocate_view_on", + &allocate_view_on, + "Allocate a view on a Tensor on the same device as the input Tensor."); + m.def("allocate_view_like", + &allocate_view_like, + "Allocate a view on a Tensor on the same device as the input Tensor."); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/includes/fast_host_buffer.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/includes/fast_host_buffer.h new file mode 100644 index 0000000000000000000000000000000000000000..81f24ed8fdaadfcf1b712e7c025e6f7a2e4a95a1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/includes/fast_host_buffer.h @@ -0,0 +1,14 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include "ds_kernel_utils.h" + +/* +Wrapper around cudaHostAlloc with some specific flags. Returns a pointer to the +memory region of `size` bytes. +*/ +void* get_cuda_fast_buffer(int64_t size); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/kv_cache.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/kv_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..ceba3190b93cdc378bc4581def235ffa99210bfd --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/kv_cache.py @@ -0,0 +1,208 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import operator +from functools import reduce +from typing import Any, Iterable, Optional, Tuple + +import torch + +import deepspeed.comm as dist +from deepspeed.comm.reduce_op import ReduceOp + +from deepspeed.accelerator import get_accelerator +from ..inference_utils import elem_size +from ..logging import inference_logger +from .blocked_allocator import BlockedAllocator +from .manager_configs import AllocationMode, KVCacheConfig, MemoryConfig + + +def split_kv(kv_cache: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Split a KV cache instance into its key and value components. + + Parameters: + kv_cache (torch.Tensor): The KV-cache to split. This should be a 5D tensor with the + following shape: [num_blocks, block_size, 2, num_heads, head_size] + + Returns: + Tuple[torch.Tensor, torch.Tensor]: The key and value components of the KV-cache. Both + tensors will have the shape [num_blocks, block_size, num_heads, head_size]. + """ + if kv_cache.ndim != 5: + raise ValueError(f"KV-cache must have 5 dimensions, got {kv_cache.ndim}.") + + return kv_cache[:, :, 0, :, :], kv_cache[:, :, 1, :, :] + + +class BlockedKVCache: + + _caches: Tuple[torch.Tensor, ...] + """ + Backing storage for all KV caches. This is a 6D tensor with the following shape: + (num_caches, num_blocks, block_size, 2, num_heads, head_size) + """ + + _allocators: Tuple[BlockedAllocator, ...] + """ + Block allocator for tracking cache usage. This manages the GPU cache. + """ + + _configs: Tuple[KVCacheConfig, ...] + """ + Configuration of the KV cache(s). See ``KVCacheConfig`` for more details. This enables the support + for different types/shapes of KV-caches (i.e. the alternating local and global attention in + GPT-Neo). + """ + + def __init__(self, + configs: Tuple[KVCacheConfig, ...], + memory_config: MemoryConfig, + mp_group: Optional[Any] = None, + offload: bool = False) -> None: + """ + Create a container that will maintain the storage and allocations for a set of + blocked KV-caches. + + Parameters: + config (KVCacheConfig): The configuration of the KV-cache. + slack (int): The amount of slack space to reserve in GPU memory for the cache. + enable_offload (bool): Whether to enable offloading of the cache to the host. + blocks (int): The number of blocks to pre-allocate for the cache. If this is set, + slack will be ignored. + """ + self._configs = configs + self._memory_config = memory_config + self._enable_offload = offload + + if self._enable_offload: + raise NotImplementedError("Offloading of KV-caches is not yet supported.") + + if AllocationMode(self._memory_config.mode) is AllocationMode.RESERVE: + # TODO(cmikeh2): Change the weighting based on the type of the KV-cache + + total_per_block_footprint = 0 + for config in self._configs: + per_block_footprint = reduce(operator.mul, config.cache_shape, config.block_size) + per_block_footprint *= 2 # for key and value + total_per_block_footprint += per_block_footprint * elem_size(config.cache_dtype) + + # Perform a dummy nccl call before calculating available memory, on some systems (H100) we've observed higher memory allocations from NCCL + if dist.get_world_size(group=mp_group) > 1: + dummy_tensor = torch.tensor(0, dtype=torch.int32, device=get_accelerator().current_device()) + dist.all_reduce(dummy_tensor, op=ReduceOp.MIN, group=mp_group) + + get_accelerator().empty_cache() + available_kv_memory = get_accelerator().available_memory() - self._memory_config.size + total_memory = get_accelerator().total_memory() + + inference_logger().debug( + f"Memory usage before KV-cache allocation: total_memory={total_memory}, available_kv_memory={available_kv_memory}, total_per_block_footprint={total_per_block_footprint}" + ) + + if available_kv_memory < total_per_block_footprint: + raise ValueError( + f"Insufficient memory to allocate KV-caches. Required: {total_per_block_footprint}, Available: {available_kv_memory}" + ) + + num_blocks = available_kv_memory // total_per_block_footprint + + # In a multi-process setting, we need to ensure that all processes have the same + # KV cache capacity to ensure scheduling guarantees are equivalent on all ranks. + if dist.get_world_size(group=mp_group) > 1: + reduce_tensor = torch.tensor(num_blocks, dtype=torch.int32, device=get_accelerator().current_device()) + dist.all_reduce(reduce_tensor, op=ReduceOp.MIN, group=mp_group) + num_blocks = reduce_tensor.item() + + # This is ugly but don't want the fragmentation of the 8 byte Tensor maybe + # hanging around. + del reduce_tensor + get_accelerator().empty_cache() + else: # AllocationMode.ALLOCATE + num_blocks = self._memory_config.size + + caches = [] + allocators = [] + + for cache_group_id, config in enumerate(self._configs): + num_caches = config.cache_shape[0] + num_heads = config.cache_shape[1] + head_size = config.cache_shape[2] + + alloc_shape = (num_caches, num_blocks, config.block_size, 2, num_heads, head_size) + inference_logger().info( + f"Allocating KV-cache {cache_group_id} with shape: {alloc_shape} consisting of {num_blocks} blocks.") + caches.append(torch.empty(alloc_shape, dtype=config.cache_dtype, + device=get_accelerator().current_device())) + allocators.append(BlockedAllocator(num_blocks)) + + self._caches = tuple(caches) + self._allocators = tuple(allocators) + + def reserve(self, num_blocks: int, cache_group: int = 0) -> torch.Tensor: + """ + Reserve a number of blocks from the cache. This will return a 1D tensor of + block_ids that have been marked as reserved. + + Parameters: + num_blocks (int): The number of blocks to reserve. + cache_group (int): The cache group to reserve from. Default is 0. + """ + return self._allocators[cache_group].allocate(num_blocks) + + def free(self, blocks: Iterable[int], cache_group: int = 0) -> None: + """ + Free a set of blocks from the cache. This will mark the blocks as free in the + allocator. + + Parameters: + blocks (Iterable[int]): The blocks to free. + cache_group (int): The cache group to free from. Default is 0. + """ + self._allocators[cache_group].free(blocks) + + def offload(self, blocks: Iterable[int], cache_group: int = 0) -> torch.Tensor: + """ + Offload KV-cache blocks from accelerator memory to the host. + + Parameters: + blocks (Iterable[int]): The blocks to offload. + cache_group (int): The cache group to offload from. Default is 0. + """ + raise NotImplementedError("Offloading is not yet supported.") + + def restore(self, blocks: Iterable[int], cache_group: int = 0) -> torch.Tensor: + """ + Restore KV-cache blocks from the host to accelerator memory. + + Parameters: + blocks (Iterable[int]): The blocks to restore. + cache_group (int): The cache group to restore to. Default is 0. + """ + raise NotImplementedError("Offloading is not yet supported.") + + def get_cache(self, cache_id: int, cache_group: int = 0) -> torch.Tensor: + """ + Get the tensor associated with the given cache ID. + + Parameters: + cache_id (int): The ID of the cache tensor to get. + cache_group (int): The cache group to get from. Default is 0. + """ + return self._caches[cache_group][cache_id] + + @property + def free_blocks(self) -> torch.Tensor: + """ + Return the number of free blocks in each cache + """ + return [allocator.free_blocks for allocator in self._allocators] + + @property + def num_caches(self) -> int: + """ + Return the number of caches + """ + return len(self._caches) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/manager_configs.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/manager_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..a5e98e5bcef16d4268ad27fd0b947b8d3ec2a2e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/manager_configs.py @@ -0,0 +1,183 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from enum import Enum +from typing import Tuple + +from deepspeed.pydantic_v1 import PositiveInt, validator + +from deepspeed.runtime.config_utils import DeepSpeedConfigModel +from ..inference_utils import DtypeEnum + + +class KVCacheType(Enum): + + DENSE = "dense" + """ + Dense KV-cache. This is the default type. + """ + + LOCAL = "local" + """ + KV-cache that attends to only a local (trailing) window of tokens. + """ + + +class KVCacheConfig(DeepSpeedConfigModel): + + type: KVCacheType = KVCacheType.DENSE + """ + Type of KV-cache to use. This may inform the allocator of the expected access/retention pattern + to enable more efficient memory management. + """ + + block_size: int = 128 + """ + Number of tokens that may be contained in each cache block. + """ + + num_allocation_groups: PositiveInt = 1 + """ + Allocation groups are assumed to be able to use the same allocation block size because + the allocation granularity is the same but the number of blocks required in each group + may differ. + + As a concrete example, consider a model with alternating layers of local and global + attention (such as GPTNeo). The local attention layers do not require the same number + of cache blocks as the global layer. However, a static partitioning scheme is sub-optimal since the ratio of local to global KV-cache blocks is not constant across + the range of sequence lengths that may be encountered. + + NOTE: In theory, this functionality could be used to do per-head and per-layer + KV-cache allocation, but it is likely the allocator will struggle with managing that + many blocks. + + NOTE: This will need to be primarily understood and handled by the model implementation + itself, rather than the KV cache manager. However, I'd like to make this explicit. + """ + + cache_shape: Tuple[PositiveInt, PositiveInt, PositiveInt] + """ + The shape of the cache per token. The first dimension is the number of individual + caches, the second is the number of heads, and the third is the head size. The number + of caches argument here is per allocation group. + """ + + cache_dtype: DtypeEnum = DtypeEnum.fp16 + """ + Data type of the KV-cache. + """ + + max_blocks_per_allocation_group: PositiveInt = 64 + """ + Maximum number of blocks that can be associated with an allocation group. + """ + + +""" +The config above is a little confusing so let's use a couple of concrete examples of +usage: + +Model 1: Llama-13B with a block size of 256 + +Llama is uniform attention so we have a single allocation group. The cache shape is +(40 layers, 40 heads, 128 head size) + +```python +llama_kv_config = KVCacheConfig(block_size=256, + num_allocation_groups=1, + cache_shape=(40, 40, 128)) +``` + +Model 2: GPTNeo-2.7B with a block size of 128 + +GPTNeo has alternating local and global attention layers. We have two allocation groups. +There are 16 layers of each type with 20 heads apiece at 128 head size. + +```python +gptneo_kv_config = KVCacheConfig(num_allocation_groups=2, cache_shape=(16, 20, 128)) +``` +""" + + +class AllocationMode(Enum): + """ + Helper class to describe memory allocation strategies for the KV-cache. + """ + + RESERVE = "reserve" + """ + Reserve a small amount of memory for non-KV cache allocations. + """ + + ALLOCATE = "allocate" + """ + Allocate an explicit number of KV blocks. + """ + + +class MemoryConfig(DeepSpeedConfigModel): + + mode: AllocationMode = AllocationMode.RESERVE + + size: PositiveInt = 1_000_000_000 + """ + Parameter for each of the modes. + + If mode is RESERVE, this is the amount of memory in bytes to reserve after allocating the + KV-cache. If in a tensor-parallel regime, this amount is guaranteed to be reserved on + all devices. + + If mode is ALLOCATE, this is the number of blocks to allocate for the KV-cache. This may + require tuning for model/GPU setups. + """ + + +class DSStateManagerConfig(DeepSpeedConfigModel): + + max_tracked_sequences: PositiveInt = 2048 + """ + How many sequences this engine will track simultaneously. This limit should be greater + than the ``max_ragged_sequence_count``. + """ + + max_ragged_batch_size: PositiveInt = 768 + """ + The maximum number of tokens that can be contained in a single ragged batch. Passing + a larger value than this will raise an exception that must be handled by the runtime. + """ + + max_ragged_sequence_count: PositiveInt = 512 + """ + The maximum number of sequences that can compose a batch. This limitation is only + relevant under CUDA graphing scenarios currently, where the maximum number of blocks + is largely bound by the total number of sequences in the ragged batch. This number cannot + be larger than ``max_tracked_sequences`` or ``max_ragged_batch_size``. + """ + + max_context: PositiveInt = 8192 + """ + The maximum number of tokens (inclusive of generation) that can be contained in a single + sequence. Currently used to bound the size of the KV cache metadata. + """ + + memory_config: MemoryConfig = MemoryConfig() + """ + Directive for how to manage the creation of the KV-cache. See MemoryConfig for more + details. + """ + + offload: bool = False + """ + Enable tracking for offloading KV-cache to host memory. Currently unsupported. + """ + + @validator("max_ragged_sequence_count") + def max_ragged_sequence_count_validator(cls, v: int, values: dict): + # If the attributes below failed their validation they won't appear in the values dict. + if "max_tracked_sequences" in values and v > values["max_tracked_sequences"]: + raise ValueError("max_ragged_sequence_count must be less than max_tracked_sequences") + if "max_ragged_batch_size" in values and v > values["max_ragged_batch_size"]: + raise ValueError("max_ragged_sequence_count must be less than max_ragged_batch_size") + return v diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/ragged_manager.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/ragged_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..ecc3c52a5834321255d3e475fc2fcf8eb391dd82 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/ragged_manager.py @@ -0,0 +1,206 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +from typing import Any, Dict, Optional, Tuple + +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import RaggedUtilsBuilder +from deepspeed.utils.logging import logger + +from .blocked_allocator import BlockedAllocator +from .kv_cache import BlockedKVCache +from .manager_configs import DSStateManagerConfig, KVCacheConfig +from .sequence_descriptor import DSSequenceDescriptor + + +class DSStateManager: + """ + Base abstract class for managing blocked KV caches. Will probably have a single + implementation for now. + """ + + _config: DSStateManagerConfig + """ + Config for state management. See DSStateManagerConfig for more details. The arguments here + should come from the engine config. + """ + + _kv_configs: Tuple[KVCacheConfig] + """ + Config for the KV cache. See KVCacheConfig for more details. These arguments should derive + from the model implementation. + """ + + _kv_cache: BlockedKVCache + """ + Persistent KV cache store. + """ + + # Container for tracking all sequences in the system. + _seqs: Dict[int, DSSequenceDescriptor] + """ + Container for tracking all sequences in the system. + + TODO(cmikeh2): Evaluate if this has any performance implications. + """ + + # Allocator for tracking sequences. + _tracking_allocator: BlockedAllocator + _all_block_ids: Tuple[torch.Tensor, ...] + _all_block_ids_shadow: Tuple[torch.Tensor, ...] + + def __init__(self, + config: DSStateManagerConfig, + kv_configs: Tuple[KVCacheConfig, ...], + base_mp_group: Optional[Any] = None) -> None: + """ + The key + + Parameters: + block_size (int): The number of tokens to allocate in each block. + """ + self._config = config + self._kv_configs = kv_configs + + # Load our helpers for host allocation. + self._ragged_utils = RaggedUtilsBuilder().load() + + # Initialize the allocator for tracking sequences (so this doesn't need to be ad-hoc). + self._tracking_allocator = BlockedAllocator(self._config.max_tracked_sequences) + + all_block_ids = [] + all_block_ids_shadow = [] + + for cache_config in self._kv_configs: + # Storage to back tracking the KV cache allocation. + ids_shape = ( + self._config.max_tracked_sequences, + cache_config.num_allocation_groups, + cache_config.max_blocks_per_allocation_group, + ) + + all_block_ids.append(torch.zeros(ids_shape, dtype=torch.int32, device=get_accelerator().current_device())) + all_block_ids_shadow.append(self._ragged_utils.allocate_fast_host_buffer(all_block_ids[-1])) + + self._all_block_ids = tuple(all_block_ids) + self._all_block_ids_shadow = tuple(all_block_ids_shadow) + + # Initialize the sequence container. + self._seqs = {} + + # Finally initialize the KV cache. + self._kv_cache = BlockedKVCache(self._kv_configs, + self._config.memory_config, + mp_group=base_mp_group, + offload=self._config.offload) + + def get_cache(self, cache_id: int, cache_group: int = 0) -> torch.Tensor: + """ + Return the Tensor associated with the given cache id in the specified cache group. + + Arguments: + cache_group (str): The KV cache group. + cache_id (int): The cache id within that group. + """ + return self._kv_cache.get_cache(cache_id, cache_group=cache_group) + + def flush_sequence(self, uid: int) -> None: + """ + Free all resources associated with the given sequence id. + """ + if uid not in self._seqs: + logger.warning(f"Attempting to flush sequence {uid} which does not exist.") + return + + seq = self._seqs[uid] + for i in range(self.n_kv_cache_groups): + self._kv_cache.free(seq.all_block_ids(cache_group=i), cache_group=i) + + self._tracking_allocator.free(seq.tracking_id) + del self._seqs[uid] + + def get_sequence(self, uid: int) -> Optional[DSSequenceDescriptor]: + """ + Get the sequence descriptor for the given sequence id. If the sequence does not exist, + then None is returned. + """ + return self._seqs.get(uid, None) + + def get_or_create_sequence(self, uid: int) -> DSSequenceDescriptor: + """ + Get the existing sequence descriptor for a given uid or initialize one if + it does not exist. NOTE: This will always return a valid sequence descriptor + if one may be allocated and should not be used from APIs that are attempting + to test the schedulability of a hypothetical batch. + """ + seq = self.get_sequence(uid) + if seq is not None: + return seq + else: + return self._create_sequence(uid) + + def _create_sequence(self, uid: int) -> DSSequenceDescriptor: + """ + Create a new sequence descriptor for the given sequence id. + """ + if uid in self._seqs: + raise ValueError(f"Sequence {uid} already exists.") + + try: + tracking_slot = self._tracking_allocator.allocate(1).item() + except ValueError: + raise RuntimeError( + f"Unable to create tracking slot for sequence {uid} since the metadata buffers are full.") + + seq_block_ids = tuple(all_block_ids[tracking_slot] for all_block_ids in self._all_block_ids) + seq_block_ids_shadow = tuple(all_block_ids_shadow[tracking_slot] + for all_block_ids_shadow in self._all_block_ids_shadow) + + self._seqs[uid] = DSSequenceDescriptor(tracking_slot, + seq_block_ids, + seq_block_ids_shadow, + max_context=self._config.max_context) + # TODO(cmikeh2): Debug call here might be unnecessary and is potentially on critical path. + logger.debug(f"Created sequence {uid} with tracking slot {tracking_slot}.") + return self._seqs[uid] + + @property + def tracked_sequences(self) -> Dict[int, DSSequenceDescriptor]: + """ + Return the tracked sequences. + """ + return self._seqs + + @property + def n_tracked_sequences(self) -> int: + """ + Return the number of sequences currently tracked. + """ + return len(self._seqs) + + @property + def kv_block_size(self) -> int: + """ + Return the block size of the KV cache. + """ + return self._kv_config.block_size + + @property + def n_kv_cache_groups(self) -> int: + """ + Return the number of KV caches. + """ + return self._kv_cache.num_caches + + @property + def free_blocks(self) -> torch.Tensor: + """ + Return the number of free blocks in the KV cache. + """ + return self._kv_cache.free_blocks + + def allocate_blocks(self, n_blocks: int, cache_group: int = 0) -> torch.Tensor: + return self._kv_cache.reserve(n_blocks, cache_group=cache_group) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/ragged_wrapper.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/ragged_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..056ecfa2ac4094cf9380db533a1f031332815641 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/ragged_wrapper.py @@ -0,0 +1,292 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Optional + +import torch + +from deepspeed.accelerator import get_accelerator +from deepspeed.ops.op_builder import RaggedUtilsBuilder + +from .sequence_descriptor import DSSequenceDescriptor +from .manager_configs import DSStateManagerConfig + + +def to_padded(original_size: int) -> int: + """ + Pad to a backend friendly granularity. + """ + + def _pad_to_mul_of_pow2(val: int, pow_2_val: int) -> int: + return val + (pow_2_val - 1) & ~(pow_2_val - 1) + + # TODO(cmikeh2): Tune this approach. This is mainly a placeholder right now. + granularity = 64 if original_size <= 512 else 128 + + return _pad_to_mul_of_pow2(original_size, granularity) + + +class RaggedBatchWrapper: + """ + Container for all the auxiliary Tensors used in the management of a ragged batch. + + For each Tensor, we maintain a shadow Tensor on the host. This Tensor is what is + directly populated when constructing the ragged batch. The shadow Tensors, when possible, + should be allocated so as to support fast host-to-accelerator copies. + """ + + # Tensors to populate the ragged batch into. + _input_ids_shadow: torch.Tensor + _input_ids: torch.Tensor + """ + Forward pass input buffer. + """ + + _batch_metadata_storage: torch.Tensor + _batch_metadata_storage_shadow: torch.Tensor + """ + Holds the number of inflight sequences and tokens for the ragged batch. + """ + + _token_to_seq_storage: torch.Tensor + _token_to_seq_storage_shadow: torch.Tensor + """ + Linear mapping for each of the tokens. Let's say we have 8 tokens in the batch, + with the sequence breakdown being [4, 1, 3]. Then, the mapping would be: + [0, 0, 0, 0, 1, 2, 2, 2] + """ + + _inflight_seq_descriptors: torch.Tensor + _inflight_seq_descriptors_shadow: torch.Tensor + """ + For each sequence in the batch, we store the start token in the batch, the number of tokens + the number of tokens in the history of this sequence, and an unused 4th reserved for alignment. + For the above example this would give: + [[0, 4, H0, X], [4, 1, H1, X], [5, 3, H2, X]] + """ + + # Holds the block ids for each sequence in the ragged batch. + _kv_ptrs: torch.Tensor + _kv_ptrs_shadow: torch.Tensor + """ + List of ptrs pointing to the GPU buffer that holds the KV-block ids for each sequence. + If there are multiple allocation groups associated with each of the sequences, then + then accessing the Nth cache will require accessing the Nth block id + """ + + def __init__(self, config: DSStateManagerConfig) -> None: + """ + Convenience wrapper around the data structures used to represent a ragged + batch for inference. Only a single `RaggedBatchWrapper` should be used per + ragged inference engine. + + The underlying data structures are implemented in `ragged_batch_descriptor.h`. + """ + self._config = config + self._input_ids = torch.zeros((self._config.max_ragged_batch_size), + dtype=torch.int64, + device=get_accelerator().current_device()) + + self._batch_metadata_storage = torch.zeros(2, dtype=torch.int32, device=get_accelerator().current_device()) + + self._token_to_seq_storage = torch.zeros((self._config.max_ragged_batch_size), + dtype=torch.int32, + device=get_accelerator().current_device()) + self._inflight_seq_descriptors = torch.zeros((self._config.max_ragged_sequence_count, 4), + dtype=torch.int32, + device=get_accelerator().current_device()) + self._kv_ptrs = torch.zeros((self._config.max_ragged_sequence_count), + dtype=torch.int64, + device=get_accelerator().current_device()) + + self._utils_module = RaggedUtilsBuilder().load() + host_alloc = self._utils_module.allocate_fast_host_buffer + + self._input_ids_shadow = host_alloc(self._input_ids) + self._batch_metadata_storage_shadow = host_alloc(self._batch_metadata_storage) + self._token_to_seq_storage_shadow = host_alloc(self._token_to_seq_storage) + self._inflight_seq_descriptors_shadow = host_alloc(self._inflight_seq_descriptors) + self._kv_ptrs_shadow = host_alloc(self._kv_ptrs) + + # Default behavior should be no padding + self._is_padded = False + + self._current_tokens = 0 + self._current_sequences = 0 + self._batch_tokens = [] + self._inflight_seq_descriptors_shadow_buf = [] + self._kv_blocks_ptr_buf = [] + self._token_to_seq_storage_shadow_buf = [] + + def clear(self) -> None: + """ + Clear the ragged batch. This will reset the number of tokens and sequences to 0. + """ + self._current_tokens = 0 + self._current_sequences = 0 + self._batch_tokens = [] + self._inflight_seq_descriptors_shadow_buf = [] + self._kv_blocks_ptr_buf = [] + self._token_to_seq_storage_shadow_buf = [] + + def insert_sequence(self, seq_descriptor: DSSequenceDescriptor, tokens: torch.Tensor, do_checks=True) -> None: + """ + Incrementally insert a sequence into the ragged batch. This will update the + metadata for the ragged batch and the sequence. + + Arguments: + seq_descriptor () + """ + if tokens.device != torch.device("cpu"): + # This doesn't really fall under schedulability, so we'll unconditionally check for it. + raise RuntimeError(f"Expected tokens to be on host but found device '{tokens.device}'") + + if do_checks and self.current_sequences == self._config.max_ragged_sequence_count: + raise RuntimeError(f"Ragged batch is full due to sequence limit: {self._config.max_ragged_sequence_count}") + + seq_tokens = tokens.numel() + + if do_checks and self.current_tokens + seq_tokens > self._config.max_ragged_batch_size: + raise RuntimeError(f"Ragged batch is full due to capacity limit: {self._config.max_ragged_batch_size})") + + # The values in _inflight_seq_descriptors_shadow_buf, _token_to_seq_storage_shadow_buf, _kv_blocks_ptr_buf, etc., + # are ultimately stored in PyTorch tensors: _inflight_seq_descriptors_shadow, _token_to_seq_storage_shadow, _kv_ptrs_shadow, etc. + # However, we found it inefficient to iterate over and substitute values into tensor slices or to use copy/fill calls for this purpose. + # Therefore, we initially store the values in Python lists or primitive data types and then copy them collectively in the finalize() method, + # instead of updating the tensors directly in each iteration. + self._batch_tokens.append(tokens) + self._inflight_seq_descriptors_shadow_buf.append(self.current_tokens) + self._inflight_seq_descriptors_shadow_buf.append(seq_tokens) + self._inflight_seq_descriptors_shadow_buf.append(seq_descriptor.seen_tokens) + self._inflight_seq_descriptors_shadow_buf.append(0) # alignment + + self._token_to_seq_storage_shadow_buf.extend([self.current_sequences] * seq_tokens) + + self._kv_blocks_ptr_buf.append(seq_descriptor.kv_blocks_ptr) + + self._current_tokens += seq_tokens + self._current_sequences += 1 + + @property + def tensor_toks(self) -> torch.Tensor: + """ + The number of tokens in the in-flight ragged batch. This will not trigger + synchronization with the device. + """ + cur_toks = self.current_tokens + if self._is_padded: + return to_padded(cur_toks) + else: + return cur_toks + + def finalize(self, padding: Optional[bool] = False) -> None: + """ + Completes construction of the ragged batch by flushing the host buffers to the device. + """ + cur_toks = self.current_tokens + + # Batch-copy the values recorded in insert_sequence() into PyTorch tensors to enhance efficiency. + self._inflight_seq_descriptors_shadow.flatten()[:len(self._inflight_seq_descriptors_shadow_buf)].copy_( + torch.tensor(self._inflight_seq_descriptors_shadow_buf)) + self._input_ids_shadow[:self.current_tokens].copy_(torch.cat(self._batch_tokens, dim=0)) + self._token_to_seq_storage_shadow[:len(self._token_to_seq_storage_shadow_buf)].copy_( + torch.tensor(self._token_to_seq_storage_shadow_buf)) + self._kv_ptrs_shadow[:len(self._kv_blocks_ptr_buf)].copy_(torch.tensor(self._kv_blocks_ptr_buf)) + self._batch_metadata_storage_shadow.copy_(torch.tensor([cur_toks, self.current_sequences])) + + if padding: + padded_toks = to_padded(cur_toks) + self._input_ids_shadow[cur_toks:padded_toks].fill_(-1) + self._token_to_seq_storage_shadow[cur_toks:padded_toks].fill_(-1) + self._is_padded = True + else: + padded_toks = cur_toks + self._is_padded = False + + current_sequences = self.current_sequences + + def _noblock_copy(dst: torch.Tensor, src: torch.Tensor) -> None: + dst.copy_(src, non_blocking=True) + + _noblock_copy(self._input_ids[:padded_toks], self._input_ids_shadow[:padded_toks]) + _noblock_copy(self._batch_metadata_storage, self._batch_metadata_storage_shadow) + _noblock_copy(self._token_to_seq_storage[:padded_toks], self._token_to_seq_storage_shadow[:padded_toks]) + _noblock_copy(self._inflight_seq_descriptors[:current_sequences], + self._inflight_seq_descriptors_shadow[:current_sequences]) + _noblock_copy(self._kv_ptrs[:current_sequences], self._kv_ptrs_shadow[:current_sequences]) + + def input_ids(self, on_device: bool = True) -> torch.Tensor: + """ + The input ids tensor for the ragged batch. If the device Tensor is requested, the Tensor + is truncated to the number of tokens in the batch. + """ + if on_device: + return self._input_ids[:self.tensor_toks] + else: + return self._input_ids_shadow + + def batch_metadata_buffer(self, on_device: bool = True) -> torch.Tensor: + """ + Buffer associated with the batch metadata tensor that can + be populated in preparation for passing a new input to the device. + """ + if on_device: + return self._batch_metadata_storage + else: + return self._batch_metadata_storage_shadow + + def tokens_to_seq(self, on_device: bool = True) -> torch.Tensor: + """ + Mapping of token to which sequence it belongs to in the ragged batch. If the device Tensor + is requested, the Tensor is truncated to the number of tokens in the batch. + """ + if on_device: + return self._token_to_seq_storage[:self.tensor_toks] + else: + return self._token_to_seq_storage_shadow + + def inflight_seq_descriptors(self, on_device: bool = True) -> torch.Tensor: + """ + Buffer associated with the metadata of each sequence in the ragged batch. If the device Tensor + is requested, the Tensor is truncated to the number of sequences in the batch. + """ + if on_device: + return self._inflight_seq_descriptors[:self.current_sequences] + else: + return self._inflight_seq_descriptors_shadow + + def kv_ptrs(self, on_device: bool = True) -> torch.Tensor: + """ + Pointer to where the list of KV ids associated with a sequence are. If the device Tensor + is requested, the Tensor is truncated to the number of sequences in the batch. + """ + if on_device: + return self._kv_ptrs[:self.current_sequences] + else: + return self._kv_ptrs_shadow + + def masks(self, on_device: bool = True) -> Optional[torch.Tensor]: + """ + Placeholder for supporting complex masks. Currently not supported. + + Models that will need this will be BERT-like, not generative. + """ + return None + + @property + def current_tokens(self) -> int: + """ + The number of tokens in the in-flight ragged batch. This will not trigger + synchronization with the device. + """ + return self._current_tokens + + @property + def current_sequences(self) -> int: + """ + The number of sequences in the in-flight ragged batch. This will not trigger + synchronization with the device. + """ + return self._current_sequences diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/sequence_descriptor.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/sequence_descriptor.py new file mode 100644 index 0000000000000000000000000000000000000000..6b9f65255eec8ed0996c45e224941779777ef7f3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/ragged/sequence_descriptor.py @@ -0,0 +1,280 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import List, Tuple, Union + +import torch + + +class BaseSequenceDescriptor: + + @property + def seen_tokens(self) -> int: + """ + The number of tokens for this sequence that have completed a forward pass. + """ + raise NotImplementedError() + + @property + def cur_allocated_blocks(self, cache_group: int = 0) -> int: + """ + The number of KV blocks currently allocated for this sequence. + """ + raise NotImplementedError() + + @property + def kv_blocks_ptr(self, cache_group: int = 0) -> int: + """ + The pointer to the KV blocks for this sequence. + """ + raise NotImplementedError() + + +class PlaceholderSequenceDescriptor(BaseSequenceDescriptor): + """ + The DummySequenceDescriptor is an empty object that allows us to perform schedulability + checks before formally tracking a sequence. + """ + + def __init__(self, seen_tokens=0, cur_allocated_blocks=0, kv_blocks_ptr=0) -> None: + self._seen_tokens = seen_tokens + self._cur_allocated_blocks = cur_allocated_blocks + self._kv_blocks_ptr = kv_blocks_ptr + + @property + def seen_tokens(self) -> int: + return self._seen_tokens + + @property + def cur_allocated_blocks(self, cache_group: int = 0) -> int: + return self._cur_allocated_blocks + + @property + def kv_blocks_ptr(self, cache_group: int = 0) -> int: + return self._kv_blocks_ptr + + +class DSSequenceDescriptor(BaseSequenceDescriptor): + + _seen_tokens: int + """ + Number of tokens in the sequence that have completed a forward pass. + """ + + _in_flight_tokens: int + """ + Number of tokens that have begun a forward pass but not yet completed it. + """ + + _max_context: int + """ + Maximum number of tokens this sequence may eventually include. Currently unused but + may be used in future implementations for speculative caching. + """ + + _num_allocation_groups: Tuple[int, ...] + """ + Number of unique allocation groups associated with the sequence for each cache group. + """ + + _blocks_per_allocation_group: Tuple[torch.IntTensor, ...] + """ + Number of blocks allocated for each allocation group in each cache group. + """ + + # Padded list of KV-cache IDs for the sequence. + _kv_cache_ids: Tuple[torch.Tensor, ...] + _kv_cache_ids_shadow: Tuple[torch.Tensor, ...] + """ + Padded list of KV-cache IDs for the sequence. The padded shape is [num_allocation_groups, max_blocks_per_allocation_group]. + """ + + # The location in the broader ID tensor where the KV-cache IDs for the sequence + # are stored. Used on flush. + _tracking_id: int + + def __init__(self, + tracking_id: int, + kv_cache_ids: Tuple[torch.Tensor, ...], + kv_cache_ids_shadow: Tuple[torch.Tensor, ...], + max_context: int = -1) -> None: + """ + Create the metadata to track a single sequence in the system. + + Arguments: + tracking_id (int): The slot in the tracking buffers used to track this sequence. + kv_cache_ids (Tuple[torch.Tensor, ...]): The KV-cache IDs for the sequence. The shape + of the tensor should be [num_allocation_groups, max_blocks_per_allocation_group]. + There should be one tensor per cache group. + kv_cache_ids_shadow (Tuple[torch.Tensor, ...]): The shadow tensor for the KV-cache IDs. + This tensor should be allocated on the host and should have the same shape as the + tensor provided in ``kv_cache_ids``. There should be one tensor per cache group. + max_context (int): The maximum number of tokens this sequence may eventually include. + Currently unused but may be used in future implementations for speculative caching. + """ + self._tracking_id = tracking_id + self._kv_cache_ids = kv_cache_ids + self._kv_cache_ids_shadow = kv_cache_ids_shadow + self._max_context = max_context + self._n_cache_groups = len(kv_cache_ids) + + self._seen_tokens = 0 + self._in_flight_tokens = 0 + + self._num_allocation_groups = tuple(kv_cache_ids_shadow.shape[0] + for kv_cache_ids_shadow in kv_cache_ids_shadow) + self._blocks_per_allocation_group = tuple( + torch.zeros(num_groups, dtype=torch.int32, device="cpu") for num_groups in self._num_allocation_groups) + + for cache_group, kv_cache_ids in enumerate(kv_cache_ids): + assert self._num_allocation_groups[cache_group] == kv_cache_ids.shape[0] + assert len(kv_cache_ids.shape) == 2 + + @property + def seen_tokens(self) -> int: + """ + Number of tokens in the sequence that have completed a forward pass. + """ + return self._seen_tokens + + @property + def in_flight_tokens(self) -> int: + """ + Number of tokens that have begun a forward pass but not yet completed it. + """ + return self._in_flight_tokens + + @property + def max_context(self) -> int: + """ + Maximum number of tokens for this sequence. Currently unused. + """ + return self._max_context + + @property + def tracking_id(self) -> int: + """ + Return the slot in the tracking buffers used to track this sequence. + """ + return self._tracking_id + + @property + def cur_allocated_blocks(self, cache_group: int = 0) -> int: + """ + Returns the number of blocks currently allocated for this sequence in the specified cache group. + + Arguments: + cache_group (int): The cache group to query. + """ + # Currently, there is only one allocation group. + # A shortcut is used here to bypass the overhead of sum(). + if len(self._blocks_per_allocation_group) == 1: + return self._blocks_per_allocation_group[0].item() + return self._blocks_per_allocation_group[cache_group].sum().item() + + def kv_cache_ids(self, cache_group: int = 0, on_device: bool = False) -> torch.Tensor: + """ + Returns the Tensor containing the block IDs for this sequence on the appropriate device + for the specified cache group. + + Arguments: + cache_group (int): The cache group to query. + on_device (bool): Whether or not to return the Tensor on the device or on the host. + """ + if on_device: + return self._kv_cache_ids[cache_group] + else: + return self._kv_cache_ids_shadow[cache_group] + + @property + def kv_blocks_ptr(self, cache_group: int = 0) -> int: + """ + Get the device pointer to the base of the KV-cache ids for the specified cache group and + sequence. + + Arguments: + cache_group (int): The cache group to query. + """ + return self._kv_cache_ids[cache_group].data_ptr() + + #TODO: this was previously a property but causing issues with PR-4668 need to consult w. Connor + def all_block_ids(self, cache_group: int = 0) -> torch.Tensor: + """ + Return the Tensor containing all block IDs for this sequence in the specified cache group. + + Arguments: + cache_group (int): The cache group to query. + """ + block_ids = [] + for allocation_group, num_blocks in zip(self._kv_cache_ids[cache_group], + self._blocks_per_allocation_group[cache_group]): + block_ids.append(allocation_group[:num_blocks]) + return torch.cat(block_ids) + + def pre_forward(self, num_tokens: int) -> None: + """ + Update the state of the sequence before a forward pass. + + Arguments: + num_tokens (int): The number of tokens in the sequence that will be executed during the + next forward pass of the model. + """ + self._in_flight_tokens = num_tokens + + def post_forward(self) -> None: + """ + Update the state of the sequence after a forward pass. This should be called after the forward + pass completes. NOTE: due to the asynchronous nature of the accelerator, this may be called + before the forward pass completes on the device itself. + """ + self._seen_tokens += self._in_flight_tokens + self._in_flight_tokens = 0 + + def extend_kv_cache(self, new_ids: Union[List[torch.IntTensor], torch.IntTensor], cache_group: int = 0) -> None: + """ + Extend the KV-cache for the sequence. + + Arguments: + new_ids (Union[List[torch.IntTensor], torch.IntTensor]): For each allocation group, the IDs + to add to the KV-cache. If there is only one allocation group, a single tensor can be + provided. Otherwise, a list of tensors should be provided. The tensors do not need + to have the same shape. + """ + if isinstance(new_ids, torch.Tensor): + new_ids = [new_ids] + + if len(new_ids) != self._num_allocation_groups[cache_group]: + raise ValueError( + f"Only {len(new_ids)} allocation groups provided, expected {self._num_allocation_groups[cache_group]}") + + for group_id, new_group_ids in enumerate(new_ids): + new_blocks = new_group_ids.numel() + + if new_blocks == 0: + # If we have multiple groups, it's possible to have an empty group. + continue + + shadow_alloc_group = self._kv_cache_ids_shadow[cache_group][group_id] + alloc_group = self._kv_cache_ids[cache_group][group_id] + cur_blocks = self._blocks_per_allocation_group[cache_group][group_id] + + shadow_alloc_group[cur_blocks:cur_blocks + new_blocks].copy_(new_group_ids) + alloc_group[cur_blocks:cur_blocks + new_blocks].copy_(shadow_alloc_group[cur_blocks:cur_blocks + + new_blocks], + non_blocking=True) + + self._blocks_per_allocation_group[cache_group][group_id] += new_blocks + + def free_kv_cache(self, free_ids: Union[List[torch.IntTensor], torch.IntTensor], cache_group: int = 0) -> None: + """ + Free blocks from the KV-cache for the sequence. + + Arguments: + free_ids (Union[List[torch.IntTensor], torch.IntTensor]): The ids of blocks to free + from the KV-cache. If there is only one allocation group, a single tensor can be + provided. Otherwise, a list of tensors should be provided. The tensors do not need + to have the same shape. + """ + raise NotImplementedError("Partial KV-cache freeing is not yet supported.") diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jd-1590.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jd-1590.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..93289f3064f0d69614a88c2d71922a7a31a92b4a --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jd-1590.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9b105adfedc6b6b82f4695ca9bfe232393034cdf79803523f397a6dc5bf824d1 +size 1544 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jdf-1590.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jdf-1590.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..a273bcdfbb3409d37146d32081c82fbc3e7c6e52 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/api-v1-jdf-1590.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:046f5e60564693f0f3b8e382725c8012c3e058647139c24685cec984e40fcd00 +size 1032 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jd-561.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jd-561.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..92ba4281fe86b5273792d24afaabb04eef03199d --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jd-561.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1d38fdd601b67bb9c6d16152f53ddf166a0cfcfef4fa86438e899bfe449226c +size 1798 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-dv-1.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-dv-1.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..52ae92392967d187709107d1c1bc9709c085b519 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-dv-1.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0703b0ae20b9ff75087dc601640ee58f1c2ad6768858ea21a245151da9ba8e4c +size 301 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-s-act-.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-s-act-.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..6bde2de0c6047726f26476a514d27a0d03c7d4b5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdl-dn-cpu-l-2-s-act-.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70d4596ad879547863109da8675c2b789d07df66b526d7ebcbce9616c4c9b94c +size 347 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdq-561.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdq-561.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..911f6823bb1bf0d9de5120e23e902e9a0a39a2bc --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/api-v1-jdq-561.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8743b2d93d2c62a82fb47e1fbc002b97e25adcfb5bf1fcb26b58ad0bed15bd48 +size 1074 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/data-v1-dl-52739.arff.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/data-v1-dl-52739.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..225208c948bd5270b3911828bead9d2fd3af3fbb --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_561/data-v1-dl-52739.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e96142b5e00dfec2617b0c22d7192b340ae2c28ec3ffc3a894c5be746b970a59 +size 3303