applied-ai-018 commited on
Commit
c08b9bf
·
verified ·
1 Parent(s): 2334d5a

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__init__.py +19 -0
  2. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/__init__.cpython-310.pyc +0 -0
  3. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/attn_output_parameters.cpython-310.pyc +0 -0
  4. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/embedding_parameters.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/invfreq_parameters.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/mlp_parameters.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/moe_parameters.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/norm_parameters.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/qkv_parameters.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/unembed_parameters.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/attn_output_parameters.py +29 -0
  12. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/embedding_parameters.py +26 -0
  13. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/invfreq_parameters.py +19 -0
  14. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/moe_parameters.py +78 -0
  15. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/unembed_parameters.py +26 -0
  16. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/policy.py +33 -0
  17. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/flat_model_helpers.py +282 -0
  18. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_model_base.py +272 -0
  19. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_policy_base.py +220 -0
  20. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_transformer_base.py +617 -0
  21. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/layer_container_base.py +355 -0
  22. venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/parameter_base.py +255 -0
  23. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__pycache__/__init__.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__pycache__/ds_module.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__pycache__/heuristics.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__pycache__/module_registry.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/__init__.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/moe_config.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/unembed_config.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/__init__.py +13 -0
  31. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/__pycache__/__init__.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/embedding/__init__.py +6 -0
  33. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/embedding/__pycache__/__init__.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/embedding/__pycache__/ragged_embedding.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/embedding/ragged_embedding.py +77 -0
  36. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/post_norm/__init__.py +6 -0
  37. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/post_norm/cuda_post_ln.py +56 -0
  38. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/__pycache__/__init__.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/__pycache__/cuda_pre_ln.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/__pycache__/cuda_pre_rms.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/cuda_pre_ln.py +69 -0
  42. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/cuda_pre_rms.py +79 -0
  43. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/unembed/__init__.py +6 -0
  44. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/unembed/__pycache__/__init__.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/unembed/ragged_unembed.py +120 -0
  46. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__init__.py +12 -0
  47. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/__init__.cpython-310.pyc +0 -0
  48. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/attention_base.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/embedding_base.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/linear_base.cpython-310.pyc +0 -0
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .inference_model_base import DSInferenceModelBase
7
+ from .inference_transformer_base import DSTransformerModelBase, DSMoETransformerModelBase
8
+ from .inference_policy_base import InferenceV2Policy, ContainerMap
9
+ from .sharding import *
10
+
11
+ # Model Implementations
12
+ from .llama_v2 import *
13
+ from .opt import *
14
+ from .mistral import *
15
+ from .mixtral import *
16
+ from .falcon import *
17
+ from .phi import *
18
+ from .qwen import *
19
+ from .qwen_v2 import *
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (466 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/attn_output_parameters.cpython-310.pyc ADDED
Binary file (1.04 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/embedding_parameters.cpython-310.pyc ADDED
Binary file (912 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/invfreq_parameters.cpython-310.pyc ADDED
Binary file (763 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/mlp_parameters.cpython-310.pyc ADDED
Binary file (2.56 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/moe_parameters.cpython-310.pyc ADDED
Binary file (3.03 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/norm_parameters.cpython-310.pyc ADDED
Binary file (801 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/qkv_parameters.cpython-310.pyc ADDED
Binary file (4.23 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/__pycache__/unembed_parameters.cpython-310.pyc ADDED
Binary file (951 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/attn_output_parameters.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from ...model_implementations.parameter_base import ParameterBase
9
+ """
10
+ Common Attention Output Parameter Patterns
11
+ """
12
+
13
+
14
+ class AttentionOutputParameter(ParameterBase):
15
+ """
16
+ Attention output parameter container.
17
+
18
+ Note: The differentiation for something like GQA for this matrix is primarily
19
+ encompassed in the sharding logic, which is currently expected to be performed by
20
+ the model implementation.
21
+ """
22
+
23
+ params: torch.Tensor
24
+ """
25
+ Unsharded attention output parameter of shape [model_dim, model_dim]
26
+ """
27
+
28
+ def finalize(self) -> torch.Tensor:
29
+ return self.inference_model.transform_attn_out_param(self.params)
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/embedding_parameters.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from ...model_implementations.parameter_base import ParameterBase
9
+ """
10
+ Embedding containers.
11
+ """
12
+
13
+
14
+ class EmbeddingParameter(ParameterBase):
15
+ """
16
+ Embedding container. This should be safe to use for all types of embeddings (i.e. word, position,
17
+ and token type).
18
+ """
19
+
20
+ params: torch.Tensor
21
+ """
22
+ Vocabulary parameter of shape [vocab_size, model_dim].
23
+ """
24
+
25
+ def finalize(self) -> torch.Tensor:
26
+ return self.inference_model.transform_embedding_param(self.params)
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/invfreq_parameters.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from ...model_implementations.parameter_base import ParameterBase
9
+ """
10
+ Common InvFreq Parameter Patterns
11
+ """
12
+
13
+
14
+ class InvFreqParameter(ParameterBase):
15
+
16
+ params: torch.Tensor
17
+
18
+ def finalize(self) -> torch.Tensor:
19
+ return self.params.to(self.inference_model.activation_dtype.value)
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/moe_parameters.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from ...model_implementations.parameter_base import ParameterBase, ParamList
9
+ """
10
+ Moe Parameters
11
+
12
+ These parameters are compatible with any model inheriting from ``DSMoETransformerModelBase``.
13
+ """
14
+
15
+
16
+ class MoEGatingWeightParameter(ParameterBase):
17
+ """
18
+ Gating weight matrix.
19
+ """
20
+
21
+ params: torch.Tensor
22
+ """
23
+ Projection matrix from the input activations to the gate logits.
24
+ """
25
+
26
+ def finalize(self) -> torch.Tensor:
27
+ return self.inference_model.transform_moe_gate_param(self.params)
28
+
29
+
30
+ class UnfusedMoEMLP1Parameter(ParameterBase):
31
+ """
32
+ This container should be used when the experts are held in separate parameters
33
+ and need to be joined into a single group.
34
+ """
35
+
36
+ experts: ParamList("n_experts") # noqa: F821
37
+
38
+ def finalize(self) -> torch.Tensor:
39
+ stacked_experts = torch.stack([p for p in self.experts], dim=0)
40
+ return self.inference_model.transform_moe_mlp_1_param(stacked_experts)
41
+
42
+
43
+ class UnfusedMoEMLP2Parameter(ParameterBase):
44
+ """
45
+ This container should be used when the experts are held in separate parameters
46
+ and need to be joined into a single group.
47
+ """
48
+
49
+ experts: ParamList("n_experts") # noqa: F821
50
+
51
+ def finalize(self) -> torch.Tensor:
52
+ stacked_experts = torch.stack([p for p in self.experts], dim=0)
53
+ return self.inference_model.transform_moe_mlp_2_param(stacked_experts)
54
+
55
+
56
+ class UnfusedMoEGatedMLPParameter(ParameterBase):
57
+ """
58
+ MoE Parameter for a gated activation function in which the gating matrix is not
59
+ fused in the same parameter as the non-gating matrix.
60
+
61
+ This is a stacked version of the ``GatedMLPParameter``. Please see that class for more
62
+ documentation on the layout of the parameters.
63
+ """
64
+
65
+ gating_experts: ParamList("n_experts") # noqa: F821
66
+
67
+ up_experts: ParamList("n_experts") # noqa: F821
68
+
69
+ def finalize(self) -> torch.Tensor:
70
+ transposed_experts = []
71
+ for gate, up in zip(self.gating_experts, self.up_experts):
72
+ assert gate.shape[0] == up.shape[0], "Gated MLP parameters must have the same number of neurons."
73
+ total_neurons = gate.shape[0] + up.shape[0]
74
+ fused_expert = torch.cat([gate, up], dim=-1).reshape(total_neurons, -1)
75
+ transposed_experts.append(fused_expert)
76
+
77
+ stacked_experts = torch.stack(transposed_experts, dim=0)
78
+ return self.inference_model.transform_moe_mlp_1_param(stacked_experts)
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/common_parameters/unembed_parameters.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from ...model_implementations.parameter_base import ParameterBase
9
+ """
10
+ Unembedding containers.
11
+ """
12
+
13
+
14
+ class UnembedParameter(ParameterBase):
15
+ """
16
+ Unembedding parameter. This will likely be mapped to the same original weight in the model as the
17
+ embedding, but we have a different preferred sharding approach.
18
+ """
19
+
20
+ params: torch.Tensor
21
+ """
22
+ Unembedding parameter of shape [vocab_size, model_dim].
23
+ """
24
+
25
+ def finalize(self) -> torch.Tensor:
26
+ return self.inference_model.transform_unembed_param(self.params)
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/falcon/policy.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Any
7
+
8
+ from ...config_v2 import RaggedInferenceEngineConfig
9
+ from ..inference_policy_base import ContainerMap, InferenceV2Policy
10
+ from .container import FalconNonTransformerContainer, FalconTransformerContainer
11
+ from .container import FalconNewArchTransformerContainer
12
+ from .model import FalconInferenceModel
13
+
14
+
15
+ class FalconPolicy(InferenceV2Policy):
16
+
17
+ def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> FalconInferenceModel:
18
+ return FalconInferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group)
19
+
20
+ def build_container_map(self) -> ContainerMap:
21
+ map = ContainerMap()
22
+
23
+ trans_container_cls = FalconNewArchTransformerContainer if self._model_config.new_decoder_architecture else FalconTransformerContainer
24
+ transformer_containers = [trans_container_cls(self.model) for _ in range(self.model.num_layers)]
25
+
26
+ map.set_transformer_params(['transformer.h'], transformer_containers)
27
+
28
+ map.set_non_transformer_params(FalconNonTransformerContainer(self.model))
29
+
30
+ map.set_unmapped_params(
31
+ [f'model.layers.{i}.self_attn.rotary_emb.inv_freq' for i in range(self.model.num_layers)])
32
+
33
+ return map
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/flat_model_helpers.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Dict, Iterable, Tuple, Optional
7
+ from os import path
8
+
9
+ import torch
10
+
11
+ from deepspeed.accelerator import get_accelerator
12
+ from deepspeed.ops.op_builder import RaggedUtilsBuilder
13
+ from deepspeed.runtime.config_utils import DeepSpeedConfigModel
14
+ from .layer_container_base import LayerContainer
15
+ from ..inference_parameter import InferenceParameter, STR_TO_DTYPE
16
+ from ..inference_utils import elem_size
17
+
18
+
19
+ def pad_to_aligned_offset(offset: int, alignment: int = 256) -> int:
20
+ """
21
+ Pad the provided offset to a well-aligned value.
22
+ """
23
+ return ((offset + alignment - 1) // alignment) * alignment
24
+
25
+
26
+ class TensorMetadata(DeepSpeedConfigModel):
27
+ """
28
+ A class to represent a tensor specification.
29
+ """
30
+ dtype: Optional[str]
31
+ shape: Optional[Tuple[int, ...]]
32
+ strides: Optional[Tuple[int, ...]]
33
+ offset: int
34
+
35
+
36
+ class ParameterMetadata(DeepSpeedConfigModel):
37
+ """
38
+ A class to represent a parameter specification.
39
+ """
40
+ core_param: TensorMetadata = None
41
+ aux_params: Dict[str, TensorMetadata] = {}
42
+
43
+
44
+ class LayerMetadata(DeepSpeedConfigModel):
45
+ """
46
+ A class to represent a layer specification.
47
+ """
48
+ params: Dict[str, ParameterMetadata] = {}
49
+
50
+
51
+ class ModelMetadata(DeepSpeedConfigModel):
52
+ """
53
+ A class to represent a model specification.
54
+ """
55
+ policy: str = ""
56
+ layers: Dict[str, LayerMetadata] = {}
57
+
58
+
59
+ def make_param_filename(base: str, rank: int, n_ranks: int) -> str:
60
+ """
61
+ Make a filename for a parameter file.
62
+
63
+ Arguments:
64
+ rank: Rank of the file.
65
+ n_ranks: Total number of ranks.
66
+
67
+ Returns:
68
+ str: Filename.
69
+ """
70
+ return path.join(base, f"params_rank_{rank}_of_{n_ranks}.pt")
71
+
72
+
73
+ def make_metadata_filename(base: str, rank: int, n_ranks: int) -> str:
74
+ """
75
+ Make a filename for a metadata file.
76
+
77
+ Arguments:
78
+ rank: Rank of the file.
79
+ n_ranks: Total number of ranks.
80
+
81
+ Returns:
82
+ str: Filename.
83
+ """
84
+ return path.join(base, f"metadata_rank_{rank}_of_{n_ranks}.json")
85
+
86
+
87
+ def make_model_config_filename(base: str) -> str:
88
+ """
89
+ Make a filename for a model config file.
90
+
91
+ Arguments:
92
+ base: Base directory.
93
+
94
+ Returns:
95
+ str: Filename.
96
+ """
97
+ return path.join(base, "ds_model_config.json")
98
+
99
+
100
+ def flatten_inference_model(
101
+ transformer_containers: Iterable[LayerContainer],
102
+ non_transformer_container: LayerContainer,
103
+ policy_name: str,
104
+ ) -> Tuple[torch.Tensor, ModelMetadata]:
105
+ """
106
+ Flatten the underlying parameters into
107
+
108
+ Arguments:
109
+ transformer_containers: Iterable of layer containers corresponding to the transformer
110
+ parameters.
111
+ non_transformer_container: Layer container corresponding to the non-transformer parameters.
112
+ policy_name: The name of the policy class (typically accessed with `type(policy).__name__`).
113
+
114
+ Returns:
115
+ Iterable[Any]: Flattened list of parameters.
116
+ """
117
+ alloc_fn = RaggedUtilsBuilder().load().allocate_view_on
118
+
119
+ total_size = 0
120
+ metadata = ModelMetadata(policy=policy_name)
121
+
122
+ def process_layer(layer_container: LayerContainer, l_name: str, cur_offset: int) -> int:
123
+ """
124
+ Iterate over the parameters of a single container and collect metadata for the final
125
+ flattened buffer.
126
+
127
+ Arguments:
128
+ layer_container: The layer container to process.
129
+ l_name: The name of the layer container to key the metadata.
130
+ cur_offset: The current offset into the flattened buffer.
131
+
132
+ Captured Variables:
133
+ metadata: The metadata object to populate.
134
+
135
+ Returns:
136
+ int: The updated offset into the flattened buffer.
137
+ """
138
+ try:
139
+ _ = layer_container.is_populated
140
+ except ValueError as e:
141
+ raise ValueError(f"Layer container {l_name} is not populated.") from e
142
+
143
+ layer_metadata = LayerMetadata()
144
+
145
+ for p_name in layer_container.annotation_attrs:
146
+ param = getattr(layer_container, p_name)
147
+ param_metadata = ParameterMetadata()
148
+
149
+ if param is None:
150
+ param_metadata.core_param = TensorMetadata(offset=-1)
151
+ layer_metadata.params[p_name] = param_metadata
152
+ continue
153
+
154
+ param_metadata.core_param = TensorMetadata(dtype=str(param.dtype),
155
+ shape=param.shape,
156
+ strides=param.stride(),
157
+ offset=cur_offset)
158
+
159
+ cur_offset += pad_to_aligned_offset(elem_size(param.dtype) * param.numel())
160
+
161
+ for t_name, tensor in param.aux_attrs.items():
162
+ param_metadata.aux_params[t_name] = TensorMetadata(dtype=str(tensor.dtype),
163
+ shape=tensor.shape,
164
+ strides=tensor.stride(),
165
+ offset=cur_offset)
166
+
167
+ cur_offset += pad_to_aligned_offset(elem_size(tensor.dtype) * tensor.numel())
168
+
169
+ layer_metadata.params[p_name] = param_metadata
170
+
171
+ metadata.layers[l_name] = layer_metadata
172
+ return cur_offset
173
+
174
+ for i, layer in enumerate(transformer_containers):
175
+ l_name = f"transformer_layer_{i}"
176
+ total_size = process_layer(layer, l_name, total_size)
177
+
178
+ l_name = "non_transformer"
179
+ total_size = process_layer(non_transformer_container, l_name, total_size)
180
+
181
+ buffer = torch.empty(total_size, dtype=torch.uint8, device=get_accelerator().current_device())
182
+
183
+ def copy_layer(layer_container: LayerContainer, l_name: str) -> None:
184
+ """
185
+ Local method for copying from the layer container to the flattened buffer.
186
+
187
+ Arguments:
188
+ layer_container: The layer container to copy from.
189
+ l_name: The name of the layer container to key the metadata.
190
+
191
+ Captured Variables:
192
+ buffer: The flattened buffer to copy into.
193
+ metadata: The metadata object to populate.
194
+ """
195
+ l_metadata = metadata.layers[l_name]
196
+ for p_name in layer_container.annotation_attrs:
197
+ p_metadata = l_metadata.params[p_name]
198
+ param = getattr(layer_container, p_name)
199
+
200
+ if param is None:
201
+ continue
202
+
203
+ core_param = alloc_fn(param, buffer, p_metadata.core_param.offset)
204
+ core_param.copy_(param)
205
+
206
+ aux_params = {}
207
+
208
+ for t_name, tensor in param.aux_attrs.items():
209
+ t_view = alloc_fn(tensor, buffer, p_metadata.aux_params[t_name].offset)
210
+ aux_params[t_name] = t_view
211
+ t_view.copy_(tensor)
212
+
213
+ setattr(layer_container, p_name, InferenceParameter.initialize(core_param, **aux_params))
214
+
215
+ for i, layer in enumerate(transformer_containers):
216
+ l_name = f"transformer_layer_{i}"
217
+ copy_layer(layer, l_name)
218
+
219
+ l_name = "non_transformer"
220
+ copy_layer(non_transformer_container, l_name)
221
+
222
+ return buffer, metadata
223
+
224
+
225
+ def restore_inference_model(buffer: torch.Tensor, metadata: ModelMetadata,
226
+ transformer_containers: Iterable[LayerContainer],
227
+ non_transformer_container: LayerContainer) -> None:
228
+ """
229
+ Restore the model from the buffer and metadata.
230
+
231
+ Arguments:
232
+ buffer: Buffer containing the model parameters.
233
+ metadata: Metadata for the model.
234
+ transformer_containers: Iterable of transformer layer containers.
235
+ non_transformer_container: Non-transformer layer container.
236
+ """
237
+ alloc_fn = RaggedUtilsBuilder().load().allocate_view_like
238
+
239
+ def restore_layer(layer_container: LayerContainer, l_name: str) -> None:
240
+ """
241
+ Local method for restoring a layer container from a flattened buffer. This
242
+ only constructs views for the parameters onto the buffer. No data movement
243
+ is performed.
244
+
245
+ Arguments:
246
+ layer_container: The layer container to restore.
247
+ l_name: The name of the layer container to key the metadata.
248
+
249
+ Captured Variables:
250
+ buffer: The flattened buffer to reconstruct views on top of.
251
+ metadata: The metadata object describing the each parameter in the model.
252
+ """
253
+ l_metadata = metadata.layers[l_name]
254
+
255
+ for p_name in layer_container.annotation_attrs:
256
+ p_metadata = l_metadata.params[p_name]
257
+
258
+ if p_metadata.core_param.offset == -1:
259
+ layer_container.direct_injection(p_name, None)
260
+ continue
261
+
262
+ dummy_tensor = torch.empty([], dtype=STR_TO_DTYPE[p_metadata.core_param.dtype])
263
+ core_param = alloc_fn(p_metadata.core_param.shape, p_metadata.core_param.strides, dummy_tensor, buffer,
264
+ p_metadata.core_param.offset)
265
+
266
+ aux_params = {}
267
+
268
+ for t_name, t_metadata in p_metadata.aux_params.items():
269
+ dummy_tensor = torch.empty([], dtype=STR_TO_DTYPE[t_metadata.dtype])
270
+ t_view = alloc_fn(t_metadata.shape, t_metadata.strides, dummy_tensor, buffer, t_metadata.offset)
271
+
272
+ aux_params[t_name] = t_view
273
+
274
+ restored_param = InferenceParameter.initialize(core_param, **aux_params)
275
+ layer_container.direct_injection(p_name, restored_param)
276
+
277
+ for i, layer in enumerate(transformer_containers):
278
+ l_name = f"transformer_layer_{i}"
279
+ restore_layer(layer, l_name)
280
+
281
+ l_name = "non_transformer"
282
+ restore_layer(non_transformer_container, l_name)
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_model_base.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from abc import ABC, abstractmethod
7
+ from typing import Iterable, Optional, Tuple, Type
8
+
9
+ import torch
10
+
11
+ import deepspeed.comm as dist
12
+ from ..ragged import DSStateManager, RaggedBatchWrapper
13
+ from ..ragged.manager_configs import KVCacheConfig
14
+ from ..ragged import DSSequenceDescriptor
15
+ from ..model_implementations.layer_container_base import LayerContainer
16
+ from ..config_v2 import RaggedInferenceEngineConfig
17
+ from .flat_model_helpers import ModelMetadata
18
+
19
+ try:
20
+ from functools import cached_property
21
+ except ImportError:
22
+
23
+ def cached_property(func):
24
+ return property(func)
25
+
26
+
27
+ """
28
+ This abstract class defines the interfaces that a model implementation should implement
29
+ in order to include anything that may be called by the engine. Most models should be able
30
+ to inherit from `DSInferenceTransformerModelBase` to reduce implementation work so it is recommended
31
+ to begin there.
32
+ """
33
+ """
34
+ Placeholder for typing the model config, which can vary based on model implementation/
35
+ """
36
+ DSModelImplementationConfig = Type['DSModelImplementationConfig']
37
+ """
38
+ Placeholder for typing the distributed comm object.
39
+
40
+ TODO(cmikeh2): Replace when we have a more defined API for the inference communication system.
41
+ """
42
+ MPType = Type["MPType"]
43
+
44
+
45
+ class DSInferenceModelBase(torch.nn.Module, ABC):
46
+ """
47
+ Implementation of a model for inference composable with ragged batching.
48
+ """
49
+
50
+ _config: DSModelImplementationConfig
51
+ """
52
+ Model-specific configuration. No abstraction surrounds this yet.
53
+ """
54
+
55
+ _engine_config: RaggedInferenceEngineConfig
56
+ """
57
+ Engine configuration.
58
+ """
59
+
60
+ _base_mp_group: MPType
61
+ """
62
+ Base communication group for Tensor-parallel inference.
63
+ """
64
+
65
+ _non_transformer: Optional[LayerContainer]
66
+ """
67
+ Abstract container for storing both embedding (pre-transformer) and unembedding (post-transformer)
68
+ parameters. This attribute should be None at model instantiation until the Policy sets
69
+ the model parameters. These parameters are grouped together since many model implementations
70
+ will tie the embedding and unembedding parameters together.
71
+ """
72
+
73
+ _transformer: Optional[Iterable[LayerContainer]]
74
+ """
75
+ List of abstract containers (1 per layer) for storing transformer (transformer)
76
+ parameters. This attribute should be None at model instantiation until the Policy
77
+ sets the model parameters.
78
+ """
79
+
80
+ state_manager: Optional[DSStateManager]
81
+ """
82
+ Since the state manager is lazy initialized, by the engine, it is not guaranteed to be present
83
+ until full initialization.
84
+ """
85
+
86
+ def __init__(self, config: DSModelImplementationConfig, engine_config: RaggedInferenceEngineConfig,
87
+ base_mp_group: MPType) -> None:
88
+ """
89
+ Minimal initialization of the model.
90
+
91
+ Arguments:
92
+ config (DSModelImplementationConfig): Model-specific configuration. No assumptions
93
+ should be made about this config that are not closely tied to the specific
94
+ model implementation.
95
+ engine_config (RaggedInferenceEngineConfig): Engine configuration.
96
+ base_mp_group (MPType): Base communication group for Tensor-parallel inference.
97
+ """
98
+ super().__init__()
99
+ self._config = config
100
+ self._engine_config = engine_config
101
+ self._base_mp_group = base_mp_group
102
+
103
+ # Set to None until the Policy sets the model parameters
104
+ self._non_transformer = None
105
+ self._transformer = None
106
+ self._flattened_param_buffer = None
107
+ self._flattened_param_metadata = None
108
+
109
+ @property
110
+ def config(self) -> DSModelImplementationConfig:
111
+ """
112
+ The model config.
113
+ """
114
+ return self._config
115
+
116
+ def set_parameters(self, transformer: Iterable[LayerContainer], non_transformer: LayerContainer,
117
+ flattened_param_buffer: torch.Tensor, flattened_param_metadata: ModelMetadata):
118
+ """
119
+ Set the model parameters for the embedding, transformer, and unembedding containers.
120
+ """
121
+ self._transformer = transformer
122
+ self._non_transformer = non_transformer
123
+ self._flattened_param_buffer = flattened_param_buffer
124
+ self._flattened_param_metadata = flattened_param_metadata
125
+
126
+ def set_state_manager(self, state_manager: DSStateManager):
127
+ """
128
+ Sets the state manager attribute. This is called by the inference engine after
129
+ the model is fully initialized.
130
+ """
131
+ self.state_manager = state_manager
132
+
133
+ @cached_property
134
+ def tp_rank(self) -> int:
135
+ """
136
+ The rank of the current process.
137
+
138
+ # TODO(cmikeh2): Kind of a hack right now, but this is too verbose to use at
139
+ the frequency we need.
140
+ """
141
+ return dist.get_rank(group=self._base_mp_group)
142
+
143
+ @cached_property
144
+ def tp_size(self) -> int:
145
+ """
146
+ The total number of processes.
147
+
148
+ # TODO(cmikeh2): Kind of a hack right now, but this is too verbose to use at
149
+ the frequency we need.
150
+ """
151
+ return dist.get_world_size(group=self._base_mp_group)
152
+
153
+ @property
154
+ def model_config(self):
155
+ """
156
+ The model config.
157
+ """
158
+ return self._config
159
+
160
+ @property
161
+ def engine_config(self):
162
+ """
163
+ The engine config.
164
+ """
165
+ return self._engine_config
166
+
167
+ @property
168
+ def flattened_params(self) -> Optional[torch.Tensor]:
169
+ """
170
+ The flattened parameter buffer.
171
+ """
172
+ return self._flattened_param_buffer
173
+
174
+ @property
175
+ def flattened_param_metadata(self) -> Optional[ModelMetadata]:
176
+ """
177
+ The flattened parameter metadata.
178
+ """
179
+ return self._flattened_param_metadata
180
+
181
+ @abstractmethod
182
+ def get_kv_requirements(self, sequence: DSSequenceDescriptor, max_new_tokens: int,
183
+ max_new_blocks: Tuple[int, ...]) -> Tuple[int, torch.Tensor]:
184
+ """
185
+ Given a sequence and the number of new tokens in the sequence, determine the
186
+ number of new KV blocks needed to support the sequence. This method is
187
+ used to help the engine provide schedulability APIs and can be used as a helper
188
+ for ``maybe_allocate_kv``.
189
+
190
+ Args:
191
+ sequence (DSSequenceDescriptor): The sequence for which to allocate KV-storage.
192
+ max_new_tokens (int): Maximum number of tokens to hypothetically schedule.
193
+ max_new_blocks (int): Maximum number of blocks to hypothetically allocate.
194
+
195
+ Returns:
196
+ Tuple[int, torch.Tensor]: The tuple of number of tokens scheduled and number
197
+ of blocks allocated (per KV cache). In general, only one of these numbers will
198
+ match the corresponding input argument, but this is not guaranteed.
199
+ """
200
+ raise NotImplementedError()
201
+
202
+ @abstractmethod
203
+ def get_remaining_block_capacity(self, sequence: DSSequenceDescriptor) -> int:
204
+ raise NotImplementedError()
205
+
206
+ @abstractmethod
207
+ def maybe_allocate_kv(self, sequence: DSSequenceDescriptor, n_new_tokens: int) -> None:
208
+ """
209
+ Given a sequence and the number of new tokens in the sequence, determine
210
+ whether or not additional KV-storage is needed and allocate it if so.
211
+
212
+ Args:
213
+ sequence (DSSequenceDescriptor): The sequence for which to allocate KV-storage.
214
+ n_new_tokens (int): The number of new tokens in the sequence.
215
+ """
216
+ raise NotImplementedError()
217
+
218
+ @abstractmethod
219
+ def kv_cache_config(self) -> Tuple[KVCacheConfig, ...]:
220
+ """
221
+ Return the KV-cache configuration for this model. This should be a tuple of one or more
222
+ KVCacheConfig objects (one for each distinct cache group).
223
+ """
224
+ raise NotImplementedError()
225
+
226
+ @property
227
+ @abstractmethod
228
+ def max_sequence_length(self) -> int:
229
+ """
230
+ The maximum sequence length supported by the model.
231
+ """
232
+ ...
233
+
234
+ def maybe_free_kv(self, sequence: DSSequenceDescriptor) -> None:
235
+ """
236
+ After completing a forward pass, determine whether or not the there are any KV blocks
237
+ that maybe freed since they are no longer in use.
238
+
239
+ Consider the following example:
240
+
241
+ We have a block size of 4 and a local window size of 8. At the beginning of the forward
242
+ pass there 10 tokens had been seen and the new forward has a size of 4. This would lend
243
+ itself to the following cache structure prior to the forward:
244
+ [[0, 1, 2*, 3*] [4*, 5*, 6*, 7*] [8*, 9*, x, x] [x x x x]]
245
+ Where x's denote empty cache locations and * denote values that are needed for attention
246
+ of the next open slot. After the forward, the cache would look like the following:
247
+ [[0, 1, 2, 3] [4, 5, 6*, 7*] [8*, 9*, 10*, 11*] [12* 13* x x]]
248
+ In this case, the first block is no longer needed since it is not needed for any future
249
+ local attention windows. This function would be responsible for freeing that block.
250
+
251
+ Default behavior assumes no local patterns that require freeing and in general should
252
+ be sufficient.
253
+ """
254
+ pass
255
+
256
+ @abstractmethod
257
+ def prepare_batch(self, wrapped_batch: RaggedBatchWrapper) -> None:
258
+ """
259
+ This will be called before each forward with the intent of building forward-specific metadata
260
+ about a batch. The intent here is to build data structures like attention atoms without necessarily
261
+ needing to implement graphable kernels to do so.
262
+
263
+ Abstract so as to force model implementations to opt out of doing anything here explicitly.
264
+ """
265
+ raise NotImplementedError()
266
+
267
+ def forward(wrapped_batch: RaggedBatchWrapper) -> torch.Tensor:
268
+ """
269
+ Complete a forward pass of the model. This interface should be graphable, so it
270
+ should not rely on the ability to use python control flow.
271
+ """
272
+ raise NotImplementedError()
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_policy_base.py ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import json
7
+ from abc import ABC, ABCMeta, abstractmethod
8
+ from typing import Any, Iterable, List, Optional, Union
9
+
10
+ import torch
11
+
12
+ from ..config_v2 import RaggedInferenceEngineConfig
13
+ from ..checkpoint import CheckpointEngineBase
14
+ from ..logging import inference_logger
15
+ from .layer_container_base import LayerContainer
16
+ from .inference_model_base import DSInferenceModelBase
17
+ from .flat_model_helpers import (
18
+ flatten_inference_model,
19
+ make_param_filename,
20
+ make_metadata_filename,
21
+ ModelMetadata,
22
+ restore_inference_model,
23
+ )
24
+
25
+ POLICIES = {}
26
+
27
+
28
+ class ContainerMap:
29
+
30
+ def __init__(self) -> None:
31
+ self._prefix_map = {}
32
+ self._transformer_params = None
33
+ self._non_transformer_params = None
34
+
35
+ @property
36
+ def transformer_params(self) -> Iterable[LayerContainer]:
37
+ return self._transformer_params
38
+
39
+ @property
40
+ def non_transformer_params(self) -> LayerContainer:
41
+ return self._non_transformer_params
42
+
43
+ def set_transformer_params(self, prefixes: Union[str, Iterable[str]], containers: List[LayerContainer]) -> None:
44
+ if not isinstance(containers, list):
45
+ raise ValueError(
46
+ f"The transformer containers should be a list, of one container per layer, but got {type(containers)} instead."
47
+ )
48
+
49
+ self._transformer_prefixes = prefixes if isinstance(prefixes, list) else [prefixes]
50
+ self._transformer_params = containers
51
+
52
+ def set_non_transformer_params(self, container: LayerContainer) -> None:
53
+ self._non_transformer_params = container
54
+
55
+ def set_unmapped_params(self, prefixes: Union[str, Iterable[str]]) -> None:
56
+ self._unmapped_prefixes = prefixes
57
+
58
+ def map_param(self, name, parameter) -> None:
59
+ for unmapped_prefix in self._unmapped_prefixes:
60
+ if name.startswith(unmapped_prefix):
61
+ inference_logger().debug(f"Ignoring: {name} for {unmapped_prefix}")
62
+ return
63
+
64
+ for transformer_prefix in self._transformer_prefixes:
65
+ if name.startswith(transformer_prefix):
66
+ popped_name = name[len(transformer_prefix) + 1:]
67
+ layer_idx = popped_name.split(".")[0]
68
+ assert layer_idx.isdigit(
69
+ ), f"expected name to start w. list index but got {layer_idx} instead, name={name}"
70
+ layer_idx = int(layer_idx)
71
+ inference_logger().debug(
72
+ f"Setting: {'.'.join(popped_name.split('.')[1:])} for layer-idx={layer_idx} to {parameter.shape}")
73
+ self._transformer_params[layer_idx].set_dependency(".".join(popped_name.split(".")[1:]), parameter)
74
+ return
75
+
76
+ try:
77
+ inference_logger().debug(f"Setting: {name} to {parameter.shape}")
78
+ self._non_transformer_params.set_dependency(name, parameter)
79
+ except ValueError:
80
+ # Catch the ValueError here from the non_transformer_params because we are knowingly
81
+ # calling it with something that may not match. This should allow us to raise a slightly more
82
+ # informative error message.
83
+ raise ValueError(f"Cannot find container for {name}, please double check the Containers/ContainerMap")
84
+
85
+ def validate(self) -> None:
86
+ if not self._non_transformer_params.is_initialized:
87
+ raise RuntimeError("Non-transformer parameters not fully initialized after checkpoint load.")
88
+
89
+ for layer_idx, container in enumerate(self._transformer_params):
90
+ if not container.is_initialized:
91
+ raise RuntimeError(
92
+ f"Transformer container at index {layer_idx} not fully initialized after checkpoint load.")
93
+
94
+
95
+ class PolicyMeta(ABCMeta):
96
+
97
+ def __new__(cls, name, bases, dct):
98
+ new_obj = super().__new__(cls, name, bases, dct)
99
+ if name != "InferenceV2Policy":
100
+ POLICIES[name] = new_obj
101
+ return new_obj
102
+
103
+
104
+ class InferenceV2Policy(ABC, metaclass=PolicyMeta):
105
+ """
106
+ The InferenceV2Policy is the base class for all inference policies. An inference policy
107
+ is responsible for instantiating the inference model and mapping the parameters from the
108
+ checkpoint engine to the model itself.
109
+ """
110
+
111
+ def __init__(
112
+ self,
113
+ model_config: Any,
114
+ checkpoint_engine: Optional[CheckpointEngineBase] = None,
115
+ inf_checkpoint_path: Optional[str] = None,
116
+ ) -> None:
117
+ """
118
+ Create the Policy with sufficient context to build the model. There are two supported
119
+ model creation mechanisms.
120
+
121
+ The first is the generalized ``checkpoint_engine`` which
122
+ will iterate over the parameters of the model and provide them to the policy. These in
123
+ turn will be sharded/transformed by the model implementation.
124
+
125
+ The second is used to re-create a previously serialized DeepSpeed inference model. These
126
+ checkpoints should not be used across different model backend configurations.
127
+
128
+ TODO(cmikeh2): Enforce this in code
129
+ """
130
+ if checkpoint_engine is None and inf_checkpoint_path is None:
131
+ raise ValueError("Either checkpoint_engine or ds_checkpoint_path must be provided.")
132
+
133
+ if checkpoint_engine is not None and inf_checkpoint_path is not None:
134
+ raise ValueError("Only one of checkpoint_engine or ds_checkpoint_path can be provided.")
135
+
136
+ self._checkpoint_engine = checkpoint_engine
137
+ self._inf_checkpoint_path = inf_checkpoint_path
138
+ self._model_config = model_config
139
+
140
+ def build_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> DSInferenceModelBase:
141
+ """
142
+ Completely instantiate the inference model. This will both create the ops needed to run the
143
+ model, as well as load the model parameters via the checkpoint engine. For more context
144
+ on each of these components please see ``instantiate_model`` and ``populate_model_parameters``.
145
+
146
+ Arguments:
147
+ engine_config: The config that has been used to instantiate the engine. This is used
148
+ to communicate to the model implementation the limits on batches (sequences/tokens)
149
+ and bound the size of intermediate buffers.
150
+ mp_group: Object to enable communication between tensor parallel ranks.
151
+
152
+ Returns:
153
+ DSInferenceModelBase: An implementation of the inference model abstraction that will be
154
+ run by the engine.
155
+ """
156
+ self.model = self.instantiate_model(engine_config, mp_group)
157
+ self.populate_model_parameters()
158
+ return self.model
159
+
160
+ @abstractmethod
161
+ def instantiate_model(self, engine_config: RaggedInferenceEngineConfig) -> DSInferenceModelBase:
162
+ """
163
+ Instantiate the inference model. Depending on the engine/model config, this could be where
164
+ different model implementations could be selected.
165
+
166
+ Arguments:
167
+ engine_config: The config that has been used to instantiate the engine. This is used
168
+ to communicate to the model implementation the limits on batches (sequences/tokens)
169
+ and bound the size of intermediate buffers.
170
+
171
+ Returns:
172
+ DSInferenceModelBase: An implementation of the inference model abstraction that will be
173
+ run by the engine.
174
+ """
175
+ ...
176
+
177
+ @abstractmethod
178
+ def build_container_map(self) -> ContainerMap:
179
+ """
180
+ Build a dictionary representing the structure of the string prefixes leading
181
+ to the parameters to be mapped to the container.
182
+
183
+ Returns:
184
+ ContainerMap: An instantiated mapping describing how checkpoint prefixes map
185
+ to ``LayerContainer`` instances.
186
+ """
187
+ raise NotImplementedError()
188
+
189
+ def populate_model_parameters(self) -> None:
190
+ """
191
+ This model will iterate over the parameters (as provided by the checkpoint engine) and
192
+ use the container map built by ``build_container_map`` to populate the model
193
+ """
194
+
195
+ container_map = self.build_container_map()
196
+
197
+ if self._checkpoint_engine is not None:
198
+ for name, parameter in self._checkpoint_engine.parameters():
199
+ container_map.map_param(name, parameter)
200
+
201
+ buffer, metadata = flatten_inference_model(container_map.transformer_params,
202
+ container_map.non_transformer_params, self.__class__.__name__)
203
+ else:
204
+
205
+ buffer_path = make_param_filename(self._inf_checkpoint_path, self.model.tp_rank, self.model.tp_size)
206
+ metadata_path = make_metadata_filename(self._inf_checkpoint_path, self.model.tp_rank, self.model.tp_size)
207
+
208
+ buffer = torch.load(buffer_path)
209
+ metadata = json.load(open(metadata_path, "r"))
210
+ metadata = ModelMetadata.parse_raw(metadata)
211
+
212
+ restore_inference_model(buffer, metadata, container_map.transformer_params,
213
+ container_map.non_transformer_params)
214
+
215
+ container_map.validate()
216
+
217
+ self.model.set_parameters(transformer=container_map.transformer_params,
218
+ non_transformer=container_map.non_transformer_params,
219
+ flattened_param_buffer=buffer,
220
+ flattened_param_metadata=metadata)
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/inference_transformer_base.py ADDED
@@ -0,0 +1,617 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from abc import abstractmethod
7
+ from typing import Optional
8
+
9
+ import torch
10
+
11
+ from deepspeed.accelerator import get_accelerator
12
+ from ..config_v2 import RaggedInferenceEngineConfig
13
+ from ..inference_utils import ActivationType, ceil_div, is_gated
14
+ from ..model_implementations import *
15
+ from ..model_implementations.sharding import *
16
+ from ..modules.configs import (
17
+ DSEmbeddingsConfig,
18
+ DSLinearConfig,
19
+ DSMoEConfig,
20
+ DSNormConfig,
21
+ DSSelfAttentionConfig,
22
+ DSUnembedConfig,
23
+ NormTypeEnum,
24
+ PositionalEmbeddingType,
25
+ RotateHalfConfig,
26
+ )
27
+ from ..modules import heuristics
28
+ from ..ragged import (
29
+ DSSequenceDescriptor,
30
+ KVCacheConfig,
31
+ RaggedBatchWrapper,
32
+ )
33
+ from .inference_model_base import (
34
+ DSInferenceModelBase,
35
+ DSModelImplementationConfig,
36
+ MPType,
37
+ )
38
+ from ..inference_parameter import InferenceParameter
39
+
40
+ try:
41
+ from functools import cached_property
42
+ except ImportError:
43
+
44
+ def cached_property(func):
45
+ return property(func)
46
+
47
+
48
+ class DSTransformerModelBase(DSInferenceModelBase):
49
+ """
50
+ Dimensioning properties
51
+ """
52
+
53
+ @property
54
+ @abstractmethod
55
+ def num_layers(self) -> int:
56
+ """
57
+ Number of the layers in the model
58
+ """
59
+ ...
60
+
61
+ @property
62
+ @abstractmethod
63
+ def model_dim(self) -> int:
64
+ """
65
+ Size of embedding projection and residuals.
66
+ """
67
+ ...
68
+
69
+ @property
70
+ @abstractmethod
71
+ def vocab_size(self) -> int:
72
+ """
73
+ Size of the vocabulary (including padding).
74
+ """
75
+ ...
76
+
77
+ @property
78
+ @abstractmethod
79
+ def head_size(self) -> int:
80
+ """
81
+ Size of each attention head.
82
+ """
83
+ ...
84
+
85
+ @property
86
+ @abstractmethod
87
+ def n_heads(self) -> int:
88
+ """
89
+ The number of query heads on the model. This should not take into account
90
+ any dimension reductions from model sharding.
91
+ """
92
+ ...
93
+
94
+ @property
95
+ def n_heads_q(self) -> int:
96
+ """
97
+ Alias to n_heads.
98
+ """
99
+ return self.n_heads
100
+
101
+ @property
102
+ def n_heads_kv(self) -> int:
103
+ """
104
+ The number of key and value heads on the model. For GQA or MQA, overload this attribute.
105
+ Otherwise it adopts MHA formulations and uses n_heads. This should not take into account
106
+ any dimension reductions from model sharding.
107
+ """
108
+ return self.n_heads
109
+
110
+ @property
111
+ @abstractmethod
112
+ def intermediate_dim(self) -> int:
113
+ """
114
+ The size of the (unsharded) intermediate projection dim. For a gated activation function
115
+ this is the size of the input to the second MLP layer. This should not take into account
116
+ any dimension reductions from model sharding.
117
+ """
118
+ ...
119
+
120
+ @property
121
+ @abstractmethod
122
+ def positional_embedding_type(self) -> PositionalEmbeddingType:
123
+ """
124
+ The type of positional embedding used by the model.
125
+ """
126
+ ...
127
+
128
+ """
129
+ Architectural properties
130
+ """
131
+
132
+ @property
133
+ @abstractmethod
134
+ def activation_dtype(self) -> torch.dtype:
135
+ """
136
+ The activation dtype of the model.
137
+ """
138
+ ...
139
+
140
+ @property
141
+ @abstractmethod
142
+ def mlp_activation_fn(self) -> ActivationType:
143
+ """
144
+ The activation function used in the MLP.
145
+ """
146
+ ...
147
+
148
+ @property
149
+ @abstractmethod
150
+ def norm_type(self) -> NormTypeEnum:
151
+ """
152
+ The type of normalization used in the model.
153
+ """
154
+ ...
155
+
156
+ @property
157
+ @abstractmethod
158
+ def positional_embedding_config(self) -> Optional[RotateHalfConfig]:
159
+ """
160
+ The positional embedding configuration for the model.
161
+ """
162
+ ...
163
+
164
+ """
165
+ Derived helpers
166
+ """
167
+
168
+ @cached_property
169
+ def n_heads_q_local(self) -> int:
170
+ """
171
+ Number of local heads post sharding.
172
+ """
173
+ return get_local_heads(self.tp_rank, self.tp_size, self.n_heads_q, self.n_heads_kv)[0]
174
+
175
+ @cached_property
176
+ def n_heads_kv_local(self) -> int:
177
+ """
178
+ Number of local heads post sharding.
179
+ """
180
+ return get_local_heads(self.tp_rank, self.tp_size, self.n_heads_q, self.n_heads_kv)[1]
181
+
182
+ @property
183
+ def gated_mlp(self) -> bool:
184
+ """
185
+ Return a boolean to determine whether the model uses a gated activation function.
186
+ """
187
+ return is_gated(self.mlp_activation_fn)
188
+
189
+ """
190
+ Method implementations
191
+ """
192
+
193
+ def __init__(self, config: DSModelImplementationConfig, engine_config: RaggedInferenceEngineConfig,
194
+ base_mp_group: MPType) -> None:
195
+ """
196
+ Base implementation for initialization. By default, this will initialize
197
+ the traditional components of a transformer model:
198
+ - Embedding
199
+ - QKV projection
200
+ - Self attention
201
+ - Attention output projection
202
+ - Feed forward network
203
+ - Normalization
204
+ - Unembedding
205
+
206
+ Arguments:
207
+ config (DSModelImplementationConfig): Model-specific configuration. No assumptions
208
+ should be made about this config that are not closely tied to the specific
209
+ model implementation.
210
+ engine_config (RaggedInferenceEngineConfig): Engine configuration.
211
+ base_mp_group (MPType): Base communication group for Tensor-parallel inference.
212
+ """
213
+ super().__init__(config, engine_config, base_mp_group)
214
+
215
+ self.make_norm_layer()
216
+ self.make_qkv_layer()
217
+ self.make_attn_layer()
218
+ self.make_attn_out_layer()
219
+ self.make_mlp_1_layer()
220
+ self.make_mlp_2_layer()
221
+ self.make_embedding_layer()
222
+ self.make_unembedding_layer()
223
+ self._kv_cache_config = None
224
+
225
+ ######### Embedding #########
226
+ def make_embedding_layer(self) -> None:
227
+ """
228
+ Performs setup and creates embedding DSModule. This will set the `self.embed` attribute.
229
+ """
230
+
231
+ embed_config = DSEmbeddingsConfig(
232
+ max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
233
+ residual_dtype=self.activation_dtype,
234
+ embedding_dim=self.model_dim,
235
+ )
236
+
237
+ self.embed = heuristics.instantiate_embed(embed_config, self._engine_config)
238
+
239
+ def transform_embedding_param(self, param: torch.Tensor) -> InferenceParameter:
240
+ """
241
+ Performs embedding sharding along the channels dimension.
242
+ """
243
+ # Until we can do non-contiguous all-gather, we won't shard the embedding parameters.
244
+ param = param.to(self.activation_dtype.value)
245
+ return InferenceParameter.initialize(param)
246
+
247
+ ######### Unembedding #########
248
+ def make_unembedding_layer(self) -> None:
249
+ """
250
+ Performs setup and creates an unembedding layer. This implementation assumes
251
+ normalization prior to the LM head projection. If this does not match the model's
252
+ implementation, override this method. This will set the ``self.unembed`` attribute.
253
+ """
254
+ unembed_dim = sharded_unembed_dim(self.vocab_size, self.tp_rank, self.tp_size)
255
+
256
+ unembed_config = DSUnembedConfig(
257
+ max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
258
+ max_sequences=self._engine_config.state_manager.max_ragged_sequence_count,
259
+ dtype=self.activation_dtype,
260
+ model_dim=self.model_dim,
261
+ vocab_size=unembed_dim,
262
+ norm_type=self.norm_type,
263
+ )
264
+
265
+ self.unembed = heuristics.instantiate_unembed(unembed_config, self._engine_config)
266
+
267
+ if self.tp_size > 1:
268
+ self._comm_logits = torch.empty(self.tp_size,
269
+ self._engine_config.state_manager.max_ragged_sequence_count,
270
+ unembed_dim,
271
+ device=get_accelerator().current_device(),
272
+ dtype=self.activation_dtype.value)
273
+ self._return_logits = torch.empty(self._engine_config.state_manager.max_ragged_sequence_count,
274
+ self.vocab_size,
275
+ device=get_accelerator().current_device(),
276
+ dtype=self.activation_dtype.value)
277
+
278
+ def transform_unembed_param(self, param: torch.Tensor) -> InferenceParameter:
279
+ """
280
+ Performs sharding along the vocab dimension.
281
+ """
282
+ param = shard_unembed_param(param, self.tp_rank, self.tp_size).to(self.activation_dtype.value)
283
+ return InferenceParameter.initialize(param)
284
+
285
+ ######### QKV #########
286
+ def make_qkv_layer(self) -> None:
287
+ """
288
+ Instantiates the linear projection layer for the QKV linear layer. This sets the
289
+ `self.qkv` attribute.
290
+ """
291
+ out_features = qkv_out_features(self.model_dim, self.tp_rank, self.tp_size, self.head_size, self.n_heads_q,
292
+ self.n_heads_kv)
293
+
294
+ linear_config = DSLinearConfig(
295
+ max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
296
+ in_channels=self.model_dim,
297
+ out_channels=out_features,
298
+ input_dtype=self.activation_dtype,
299
+ output_dtype=self.activation_dtype,
300
+ )
301
+
302
+ self.qkv = heuristics.instantiate_linear(linear_config, self._engine_config)
303
+
304
+ def transform_qkv_param(self, param: torch.Tensor) -> InferenceParameter:
305
+ """
306
+ Passes a QKV parameter to the underlying implementation for any necessary
307
+ transformations.
308
+
309
+ Args:
310
+ param (torch.Tensor): The parameter to transform. This may be either a bias or weight and should have
311
+ the shape (out_neurons, in_neurons)
312
+ """
313
+ param = shard_qkv_param(param, self.tp_rank, self.tp_size, self.head_size, self.n_heads_q, self.n_heads_kv)
314
+ return self.qkv.transform_param(param)
315
+
316
+ ######### Attention #########
317
+ def make_attn_layer(self) -> None:
318
+ """
319
+ Builds the attention layer for the model. This sets the `self.attn` attribute.
320
+ """
321
+ softmax_scale = 1.0 / (self.head_size**0.5)
322
+
323
+ attn_config = DSSelfAttentionConfig(max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
324
+ n_heads_q=self.n_heads_q_local,
325
+ n_heads_kv=self.n_heads_kv_local,
326
+ head_size=self.head_size,
327
+ max_sequences=self._engine_config.state_manager.max_ragged_sequence_count,
328
+ scale_factor=softmax_scale,
329
+ input_dtype=self.activation_dtype,
330
+ output_dtype=self.activation_dtype,
331
+ positional_embedding_type=self.positional_embedding_type,
332
+ positional_embedding_config=self.positional_embedding_config)
333
+
334
+ self.attn = heuristics.instantiate_attention(attn_config, self._engine_config)
335
+
336
+ def get_kv_requirements(self, sequence: DSSequenceDescriptor, max_new_tokens: int,
337
+ max_new_blocks: int) -> Tuple[int, int]:
338
+ """
339
+ See ``DSInferenceModelBase.get_kv_requirements`` for documentation.
340
+
341
+ This method assumes an autoregressive dense attention pattern. Override this method
342
+ if this does not match the model's attention pattern.
343
+ """
344
+ total_tokens = sequence.seen_tokens + max_new_tokens
345
+ req_blocks = ceil_div(total_tokens, self.attn.kv_block_size)
346
+ block_lim = req_blocks - sequence.cur_allocated_blocks
347
+
348
+ if block_lim <= max_new_blocks:
349
+ return max_new_tokens, block_lim
350
+
351
+ token_capacity = (max_new_blocks +
352
+ sequence.cur_allocated_blocks) * self.attn.kv_block_size - sequence.seen_tokens
353
+
354
+ return token_capacity, max_new_blocks
355
+
356
+ def get_remaining_block_capacity(self, sequence: DSSequenceDescriptor) -> int:
357
+ return sequence.seen_tokens % self.attn.kv_block_size
358
+
359
+ def maybe_allocate_kv(self, sequence: DSSequenceDescriptor, n_new_tokens: int) -> None:
360
+ """
361
+ See ``DSInferenceModelBase.maybe_allocate_kv`` for documentation.
362
+
363
+ This method assumes an autoregressive dense attention pattern. Override this method
364
+ if this does not match the model's attention pattern.
365
+ """
366
+ free_block = self.state_manager.free_blocks[0]
367
+ _, n_needed_blocks = self.get_kv_requirements(sequence, n_new_tokens, free_block)
368
+
369
+ if n_needed_blocks > 0:
370
+ new_blocks = self.state_manager.allocate_blocks(n_needed_blocks)
371
+ sequence.extend_kv_cache(new_blocks)
372
+
373
+ def kv_cache_config(self) -> Tuple[KVCacheConfig, ...]:
374
+ """
375
+ See ``DSInferenceModelBase.kv_cache_config`` for documentation.
376
+
377
+ This method assumes an autoregressive dense attention pattern. Override this method
378
+ if this does not match the model's attention pattern.
379
+ """
380
+ if self._kv_cache_config is None:
381
+ cache_shape = (self.num_layers, self.n_heads_kv_local, self.head_size)
382
+ max_blocks = ceil_div(self.max_sequence_length, self.attn.kv_block_size)
383
+ self._kv_cache_config = KVCacheConfig(block_size=self.attn.kv_block_size,
384
+ cache_shape=cache_shape,
385
+ cache_dtype=self.activation_dtype,
386
+ max_blocks_per_allocation_group=max_blocks)
387
+ return (self._kv_cache_config, )
388
+
389
+ def prepare_batch(self, wrapped_batch: RaggedBatchWrapper) -> None:
390
+ """
391
+ See ``DSInferenceModelBase.prepare_batch`` for documentation.
392
+
393
+ This method assumes an autoregressive dense attention pattern. Override this method
394
+ if this does not match the model's attention pattern.
395
+ """
396
+ self.attn.build_atoms(wrapped_batch)
397
+
398
+ ######### Attention output #########
399
+ def make_attn_out_layer(self) -> None:
400
+ """
401
+ Instantiates the linear projection layer for the attention output linear layer. This sets the
402
+ `self.attn_out` attribute.
403
+ """
404
+ in_features = attn_out_in_features(self.model_dim, self.tp_rank, self.tp_size, self.head_size, self.n_heads_q,
405
+ self.n_heads_kv)
406
+
407
+ linear_config = DSLinearConfig(
408
+ max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
409
+ in_channels=in_features,
410
+ out_channels=self.model_dim,
411
+ input_dtype=self.activation_dtype,
412
+ output_dtype=self.activation_dtype,
413
+ )
414
+
415
+ self.attn_out = heuristics.instantiate_linear(linear_config, self._engine_config)
416
+
417
+ def transform_attn_out_param(self, param: torch.Tensor) -> Optional[InferenceParameter]:
418
+ """
419
+ Shards an attention output projection parameter and passes it to the underlying
420
+ implementation for any necessary transformations. This will return `None` for bias parameters
421
+ if they are not on TP rank 0.
422
+
423
+ Args:
424
+ param (torch.Tensor): The parameter to transform. This may be either a bias or weight and should have
425
+ the shape (out_neurons, in_neurons).
426
+ """
427
+ param = shard_attn_out_param(param, self.tp_rank, self.tp_size, self.head_size, self.n_heads_q,
428
+ self.n_heads_kv)
429
+
430
+ if param is not None:
431
+ param = self.attn_out.transform_param(param)
432
+
433
+ return param
434
+
435
+ ######### MLP #########
436
+ def make_mlp_1_layer(self) -> None:
437
+ """
438
+ Instantiates the linear projection layer for the first MLP in the feedforward network.
439
+ This sets the `self.mlp_1` attribute.
440
+ """
441
+ shard_size = sharded_intermediate_dim(self.intermediate_dim, self.tp_size, self.tp_rank)
442
+
443
+ linear_config = DSLinearConfig(
444
+ max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
445
+ in_channels=self.model_dim,
446
+ out_channels=shard_size,
447
+ activation=self.mlp_activation_fn,
448
+ input_dtype=self.activation_dtype,
449
+ output_dtype=self.activation_dtype,
450
+ )
451
+
452
+ self.mlp_1 = heuristics.instantiate_linear(linear_config, self._engine_config)
453
+
454
+ def transform_mlp_1_param(self, param: torch.Tensor) -> InferenceParameter:
455
+ """
456
+ Shards the first MLP parameter and passes it to the underlying implementation
457
+ for any necessary transformations.
458
+
459
+ Args:
460
+ param (torch.Tensor): The parameter to transform. This may be either a bias or weight and should have
461
+ the shape (out_neurons, in_neurons).
462
+ """
463
+ param = shard_mlp_1_param(param, self.tp_rank, self.tp_size, gated=self.gated_mlp)
464
+
465
+ return self.mlp_1.transform_param(param)
466
+
467
+ def make_mlp_2_layer(self) -> None:
468
+ """
469
+ Instantiates the linear projection layer for the second MLP in the feedforward network.
470
+ This sets the `self.mlp_2` attribute.
471
+ """
472
+ shard_size = sharded_intermediate_dim(self.intermediate_dim, self.tp_size, self.tp_rank)
473
+
474
+ linear_config = DSLinearConfig(
475
+ max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
476
+ in_channels=shard_size,
477
+ out_channels=self.model_dim,
478
+ input_dtype=self.activation_dtype,
479
+ output_dtype=self.activation_dtype,
480
+ )
481
+
482
+ self.mlp_2 = heuristics.instantiate_linear(linear_config, self._engine_config)
483
+
484
+ def transform_mlp_2_param(self, param: torch.Tensor) -> Optional[InferenceParameter]:
485
+ """
486
+ Shards the second MLP parameter and passes it to the underlying implementation
487
+ for any necessary transformations. This will return `None` for bias parameters
488
+ if they are not on TP rank 0.
489
+
490
+ Args:
491
+ param (torch.Tensor): The parameter to transform. This may be either a bias or weight and should have
492
+ the shape (out_neurons, in_neurons).
493
+ """
494
+ param = shard_mlp_2_param(param, self.tp_rank, self.tp_size)
495
+
496
+ if param is not None:
497
+ param = self.mlp_2.transform_param(param)
498
+
499
+ return param
500
+
501
+ ######### Norm #########
502
+ def make_norm_layer(self) -> None:
503
+ """
504
+ Instantiates the normalization layer for the model. This sets the `self.norm` attribute.
505
+
506
+ TODO(cmikeh2): In the future we'll distinguish between the different norm objects,
507
+ but for now we'll just use the same one for all of them.
508
+ """
509
+ norm_config = DSNormConfig(
510
+ max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
511
+ type=self.norm_type,
512
+ channels=self.model_dim,
513
+ residual_dtype=self.activation_dtype,
514
+ input_dtype=self.activation_dtype,
515
+ output_dtype=self.activation_dtype,
516
+ )
517
+
518
+ self.norm = heuristics.instantiate_pre_norm(norm_config, self._engine_config)
519
+
520
+ def transform_norm_param(self, param: torch.Tensor) -> InferenceParameter:
521
+ """
522
+ Passes a normalization parameter to the underlying implementation for any
523
+ necessary transformations.
524
+
525
+ TODO(cmikeh2): In the future we'll distinguish between the different norm objects,
526
+ but for now we'll just use the same one for all of them.
527
+
528
+ Args:
529
+ param (torch.Tensor): The parameter to transform. This may be either a bias or weight and should have
530
+ shape (model_dim,)
531
+ """
532
+ return self.norm.transform_param(param)
533
+
534
+
535
+ class DSMoETransformerModelBase(DSTransformerModelBase):
536
+
537
+ @property
538
+ def n_experts(self) -> int:
539
+ """
540
+ Return the number of experts in the model.
541
+ """
542
+ raise NotImplementedError("Attempted to access an unimplemented number of experts")
543
+
544
+ @property
545
+ def n_top_k(self) -> int:
546
+ """
547
+ Number of experts per token.
548
+ """
549
+ raise NotImplementedError("Attempted to access an unimplemented number of experts per token")
550
+
551
+ @property
552
+ def normalize_expert_scores(self) -> bool:
553
+ """
554
+ Whether to normalize expert scores. If true, sum(expert_scores) = 1.
555
+ """
556
+ raise NotImplementedError("Attempted to access an unimplemented normalization flag")
557
+
558
+ def make_moe_layer(self) -> None:
559
+ """
560
+ Instantiates the MoE layer for the model. This sets the `self.moe` attribute.
561
+ """
562
+ sharded_dim = sharded_intermediate_dim(self.intermediate_dim, self.tp_size, self.tp_rank)
563
+
564
+ moe_config = DSMoEConfig(
565
+ max_tokens=self._engine_config.state_manager.max_ragged_batch_size,
566
+ model_dim=self.model_dim,
567
+ intermediate_features=sharded_dim,
568
+ activation=self.mlp_activation_fn,
569
+ n_experts=self.n_experts,
570
+ top_k=self.n_top_k,
571
+ input_dtype=self.activation_dtype,
572
+ output_dtype=self.activation_dtype,
573
+ normalize_scores=self.normalize_expert_scores,
574
+ )
575
+
576
+ self.moe = heuristics.instantiate_moe(moe_config, self._engine_config)
577
+
578
+ def transform_moe_gate_param(self, param: torch.Tensor) -> InferenceParameter:
579
+ """
580
+ Passes a MoE gate parameter to the underlying implementation for any necessary transformations.
581
+
582
+ TODO(cmikeh2): This will need to be updated/overridden for expert parallelism.
583
+ """
584
+ return self.moe.transform_gate_param(param)
585
+
586
+ def transform_moe_mlp_1_param(self, param: torch.Tensor) -> InferenceParameter:
587
+ """
588
+ Shards the first MoE param and passes it to the underlying implementation. Since it's possible for an architecture
589
+ to have both MoE and non-MoE layers, this can't be overloaded on the MLP1 transform. Furthermore, since both
590
+ the MoE DSModule owns both MLP1 and MLP2, under certain sharding conditions it's not possible for the model implementation
591
+ to infer from the shape whether to perform a different transformation based on MLP1 or MLP2. This (and the below)
592
+ separations are intended to solve both these issues.
593
+
594
+ Args:
595
+ param (torch.Tensor): The parameter to transform. This should have shape (n_experts, out_neurons, in_neurons).
596
+ """
597
+ param = shard_mlp_1_param(param, self.tp_rank, self.tp_size, gated=self.gated_mlp, is_moe=True)
598
+
599
+ return self.moe.transform_moe_mlp_1_param(param)
600
+
601
+ def transform_moe_mlp_2_param(self, param: torch.Tensor) -> Optional[torch.Tensor]:
602
+ """
603
+ Shards the second MoE param and passes it to the underlying implementation. See the above for context on why this API
604
+ exists.
605
+
606
+ This will return `None` for expert bias params not on TP rank 0. NOTE(cmikeh2): Does it make sense to round-robin assign?
607
+ My intuition is that this will make debugging much more difficult for minimal memory reduction.
608
+
609
+ Args:
610
+ param (torch.Tensor): The parameter to transform. This should have shape (n_experts, out_neurons, in_neurons).
611
+ """
612
+ param = shard_mlp_2_param(param, self.tp_rank, self.tp_size, is_moe=True)
613
+
614
+ if param is not None:
615
+ param = self.moe.transform_moe_mlp_2_param(param)
616
+
617
+ return param
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/layer_container_base.py ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import re
7
+ from typing import Type
8
+
9
+ import torch
10
+
11
+ from deepspeed.accelerator import get_accelerator
12
+ from .parameter_base import ParameterBase, ParametrizedList
13
+ from ..inference_parameter import InferenceParameter
14
+
15
+ # Currently have dependency loops for the type hints.
16
+ InferenceModel = Type["InferenceModel"]
17
+ LayerContainer = Type["LayerContainer"]
18
+
19
+ MAPPING_KEY = "PARAM_MAPPING"
20
+ PLIST_HELPERS = "_ds_plist_strip_vals"
21
+
22
+
23
+ def make_finalization_callback(all_names: str):
24
+ """
25
+ Helper method for building the finalization callback for a LayerContainer. This
26
+ is not client code and should not be used or called directly.
27
+ """
28
+
29
+ def finalization_callback(self, param: ParameterBase, finalized_param: torch.Tensor) -> None:
30
+ """
31
+ Callback for when a parameter is finalized.
32
+ """
33
+ self._finalized_params += 1
34
+
35
+ for name in all_names:
36
+ if getattr(self, name) is param:
37
+ setattr(self, name, finalized_param)
38
+
39
+ return finalization_callback
40
+
41
+
42
+ class LayerMetaclass(type):
43
+ """
44
+ MetaClass for the LayerContainer base class. This class will parse the annotations
45
+ of the class that correspond to `ParameterBase` and create None initializers for each
46
+ as well as a finalization callback that for when each `ParameterBase` is finalized
47
+ and should be replaced with a Tensor.
48
+ """
49
+
50
+ def __new__(cls, clsname, bases, attrs):
51
+
52
+ annotations = attrs.get("__annotations__", {})
53
+
54
+ for base in bases:
55
+ # We'll pick up all annotations on any base classes. This will allow us to
56
+ # to use inheritance to share common parameter groups in base classes.
57
+ if hasattr(base, "__annotations__"):
58
+ annotations.update(base.__annotations__)
59
+
60
+ if hasattr(base, MAPPING_KEY):
61
+ if MAPPING_KEY not in attrs:
62
+ # This is likely a fail state. If a parent has MAPPING KEY but the child does
63
+ # not, then we're guaranteed only a subset of the parameters will be mapped.
64
+ attrs[MAPPING_KEY] = {}
65
+ attrs[MAPPING_KEY].update(getattr(base, MAPPING_KEY))
66
+
67
+ all_names = [name for name, annotation in annotations.items() if issubclass(annotation, ParameterBase)]
68
+
69
+ if MAPPING_KEY in attrs:
70
+ # If we have a mapping key at all, then we will enter the validation mode for building
71
+ # helpers for mapping and ensuring we have complete mapping.
72
+
73
+ # First we'll build a flat list of every dependency for this layer.
74
+ all_deps = set()
75
+ for name in all_names:
76
+ parameter_deps = [
77
+ name for name, annotation in annotations[name].__annotations__.items()
78
+ if issubclass(annotation, (torch.Tensor, ParametrizedList))
79
+ ]
80
+
81
+ all_deps.update([f"{name}.{dep}" for dep in parameter_deps])
82
+
83
+ # Create static helper for doing the string processing only once.
84
+ attrs[PLIST_HELPERS] = []
85
+
86
+ # Iterate over all the mappings
87
+ for src_name, target_or_targets in attrs[MAPPING_KEY].items():
88
+ if isinstance(target_or_targets, str):
89
+ target_or_targets = [target_or_targets]
90
+
91
+ actual_targets = []
92
+ for target_name in target_or_targets:
93
+ base_dependency, dependency_attr = target_name.split(".")
94
+
95
+ # Check for invalid mappings
96
+ if base_dependency not in all_names:
97
+ raise ValueError(
98
+ "Target parameter \"{}\" not found in this layer. Valid targets are {}".format(
99
+ base_dependency, all_names))
100
+ if dependency_attr not in annotations[base_dependency].__annotations__:
101
+ # This check is not universal (see below) if a single dependency is being
102
+ # mapped to by a single row.
103
+ raise ValueError(
104
+ "Target dependency \"{}\" not found on parameter \"{}\". Valid targets are {}".format(
105
+ dependency_attr, base_dependency, annotations[base_dependency].__annotations__.keys()))
106
+ if target_name not in all_deps:
107
+ raise ValueError(
108
+ "Target dependency \"{}\" was targeted with multiple mapping rules.".format(target_name))
109
+
110
+ # If we've made it this far, the dependency definitely exists.
111
+ actual_targets.append(annotations[base_dependency].__annotations__[dependency_attr])
112
+
113
+ all_deps.remove(target_name)
114
+
115
+ are_plists = [issubclass(target, ParametrizedList) for target in actual_targets]
116
+ if all(are_plists):
117
+ # We can do direct sets on everything but ParametrizedLists, so we'll only explicitly
118
+ # handle these here.
119
+ # TODO(cmikeh2): SPLIT, error if more than 1
120
+ glob_count = src_name.count("*")
121
+ if glob_count > 1:
122
+ raise ValueError(
123
+ "ParametrizedList index inference can only work with a single glob: {}".format(src_name))
124
+ elif glob_count == 0:
125
+ raise ValueError(
126
+ "Must have wildcard (*) in source name for ParametrizedList mapping: {}".format(src_name))
127
+
128
+ wildcard_idx = src_name.find("*")
129
+ prefix = src_name[:wildcard_idx]
130
+ suffix = src_name[wildcard_idx + 1:]
131
+ attrs[PLIST_HELPERS].append((prefix, suffix, target_or_targets))
132
+ elif any(are_plists):
133
+ raise ValueError("Cannot mix ParametrizedLists and Tensors in a single mapping rule.")
134
+
135
+ if len(all_deps) > 0:
136
+ raise ValueError(
137
+ "A parameter mapping was provided for {}, but the following dependencies were not mapped: {}".
138
+ format(clsname, all_deps))
139
+
140
+ attrs["finalization_callback"] = make_finalization_callback(all_names)
141
+
142
+ new_obj = super().__new__(cls, clsname, bases, attrs)
143
+
144
+ setattr(new_obj, "_n_params", len(all_names))
145
+ setattr(new_obj, "_annotation_attrs", all_names)
146
+
147
+ return new_obj
148
+
149
+ def __call__(cls, *args, **kwargs):
150
+ instance = cls.__new__(cls, *args, **kwargs)
151
+ instance.__init__(*args, **kwargs)
152
+
153
+ for name, annotation in instance.__annotations__.items():
154
+ if issubclass(annotation, ParameterBase):
155
+ # TODO(cmikeh2): Do we want to make this a property
156
+ # It might also make sense to do this in the base class __init__
157
+ # but since it is tied with the changes made in __new__ it feels
158
+ # to me like it should be here.
159
+ setattr(instance, name, annotation(instance.inference_model, instance))
160
+
161
+ return instance
162
+
163
+
164
+ class LayerContainer(metaclass=LayerMetaclass):
165
+ """
166
+ Abstract base class for containing model parameters.
167
+
168
+ This is primarily a guidance abstraction since we do not put any restrictions
169
+ on how the parameters are stored.
170
+
171
+ To use this class, annotate the class with `ParameterBase` subclasses and give them
172
+ names. As a checkpoint is loaded into this container, the `ParameterBase` instances
173
+ will be replaced with realized Tensors as soon as each of their dependencies are met.
174
+
175
+ To enable automatic mapping, add a static attribute `PARAM_MAPPING` to the class
176
+ definition. This should be a dictionary mapping from a source string to one or
177
+ more dependencies.
178
+
179
+ ```python
180
+ class MyLayer(LayerContainer):
181
+ PARAM_MAPPING = {
182
+ "path.to.param.dependency", "container_param_1.dependency",
183
+ "path.to.param2.dependency", "container_param_2.dependency",
184
+ "path.to.param3.*.dependency", "container_param_3.list_dependency"
185
+ }
186
+
187
+ ...
188
+ ```
189
+ """
190
+
191
+ def __init__(self, model: InferenceModel) -> None:
192
+ """
193
+ Initialization of the LayerContainer. This method does not need to be overridden
194
+ for any children classes.
195
+
196
+ Args:
197
+ model (InferenceModel): Inference model that will be used to shard and transform
198
+ parameters correctly, as well as provide specific information about the model
199
+ for `ParameterizedList`s that may be part of one of the member `ParameterBase`s.
200
+ """
201
+ self.inference_model = model
202
+ self._finalized_params = 0
203
+
204
+ def _initialization_checker(self, check_device: bool = True) -> bool:
205
+ """
206
+ Returns whether or not all parameters have been initialized and transformed by
207
+ the model. Once this returns True, all the `ParameterBase` instances will be
208
+ torch.Tensors.
209
+ """
210
+ if self._finalized_params != self.n_params:
211
+ return False
212
+
213
+ for name in self._annotation_attrs:
214
+ tensor = getattr(self, name)
215
+ if tensor is None:
216
+ continue
217
+ elif not isinstance(tensor, InferenceParameter):
218
+ raise ValueError("Layer should be finalized, but {} ({}) is neither InferenceParameter or None".format(
219
+ name, type(tensor)))
220
+ elif check_device and tensor.device != torch.device(get_accelerator().current_device()):
221
+ raise RuntimeError("Layer should be finalized, but {} is not on device {}".format(
222
+ name,
223
+ get_accelerator().current_device()))
224
+ return True
225
+
226
+ @property
227
+ def is_populated(self) -> bool:
228
+ """
229
+ Returns whether or not all parameters have been populated by the checkpoint engine, but
230
+ does not validat the parameters are on the correct device.
231
+ """
232
+ return self._initialization_checker(check_device=False)
233
+
234
+ @property
235
+ def is_initialized(self) -> bool:
236
+ """
237
+ Returns whether or not all parameters have been initialized and transformed by
238
+ the model and are located on the appropriate device. Once this returns True, all
239
+ the `ParameterBase` instances ``InferenceParameter``s or explicitly set to ``None``.
240
+ """
241
+ return self._initialization_checker()
242
+
243
+ @property
244
+ def n_params(self) -> int:
245
+ """
246
+ The number of parameters this container holds. This is a read-only value
247
+ that is set by the metaclass.
248
+ """
249
+ return self._n_params
250
+
251
+ @property
252
+ def annotation_attrs(self) -> list:
253
+ return self._annotation_attrs
254
+
255
+ @property
256
+ def mapping_params(self) -> dict:
257
+ return getattr(self.__class__, MAPPING_KEY, {})
258
+
259
+ @property
260
+ def plist_helpers(self) -> list:
261
+ return getattr(self.__class__, PLIST_HELPERS, [])
262
+
263
+ def direct_injection(self, name: str, tensor: InferenceParameter) -> None:
264
+
265
+ if name not in self._annotation_attrs:
266
+ raise ValueError(f"Cannot directly inject {name}, not a valid parameter.")
267
+
268
+ setattr(self, name, tensor)
269
+ self._finalized_params += 1
270
+
271
+ def set_dependency(self, dep_name: str, dep_value: torch.Tensor) -> None:
272
+ """
273
+ Set dependency can be used for managing dependencies when a mapping is provided
274
+ in the class definition for the layer. The dep_name here should have any prefix
275
+ for transformer layers removed (such as model.layers.*.attn.qkv.weight -> attn.qkv.weight).
276
+
277
+ Args:
278
+ dep_name (str): The name of the dependency to set.
279
+ dep_value (torch.Tensor): The value to set the dependency to.
280
+ """
281
+
282
+ def get_dep_name_target(dep_name: str) -> str:
283
+ """
284
+ Helper method for getting the target name for a dependency from the
285
+ mapping params. Tries to match exact string first, then looks for
286
+ wildcards and attempts regex matching. Will return empty string if
287
+ no match found.
288
+ """
289
+ if dep_name in self.mapping_params:
290
+ # If we have an exact match, it's a direct mapping and we can
291
+ # immediately set the value.
292
+ return self.mapping_params[dep_name]
293
+
294
+ matched_targets = []
295
+ for key, target in self.mapping_params.items():
296
+ regex_key = key.replace("*", ".*")
297
+ if re.match(regex_key, dep_name):
298
+ matched_targets.append(target)
299
+ if len(matched_targets) > 1:
300
+ raise ValueError(f"Multiple targets matched for dependency {dep_name}: {matched_targets}")
301
+ if matched_targets:
302
+ return matched_targets[0]
303
+ return ""
304
+
305
+ if dep_name in self.mapping_params:
306
+ # If we have an exact match, it's a direct mapping and we can immediately set
307
+ # the value.
308
+ target = self.mapping_params[dep_name]
309
+
310
+ # Convert single targets to a list for consistency
311
+ if isinstance(target, str):
312
+ target = [target]
313
+
314
+ for target_name in target:
315
+ # Double setting doesn't set the attribute correctly, so we do a getattr then setattr
316
+ target_param_name, target_dependency_name = target_name.split(".")
317
+ target_param = getattr(self, target_param_name)
318
+ setattr(target_param, target_dependency_name, dep_value)
319
+ return
320
+
321
+ # Otherwise we need to map to one of the parameter lists.
322
+ for prefix, suffix, dests in self.plist_helpers:
323
+ if dep_name.startswith(prefix) and dep_name.endswith(suffix):
324
+ # We have a match, so we can set the value.
325
+ target_idx = int(dep_name[len(prefix):-len(suffix)])
326
+
327
+ # Convert single targets to a list for consistency
328
+ if isinstance(dests, str):
329
+ dests = [dests]
330
+
331
+ for dest in dests:
332
+ target_param_name, target_dependency_name = dest.split(".")
333
+ target_param = getattr(self, target_param_name)
334
+ target_dependency = getattr(target_param, target_dependency_name)
335
+ target_dependency[target_idx] = dep_value
336
+ return
337
+
338
+ # TODO: Refactor this with the help of cmikeh2
339
+ # We should be able to combine this with the wildcard matching above.
340
+ target = get_dep_name_target(dep_name)
341
+ if target:
342
+ # Convert single targets to a list for consistency
343
+ if isinstance(target, str):
344
+ target = [target]
345
+
346
+ for target_name in target:
347
+ # Double setting doesn't set the attribute correctly, so we do a getattr then setattr
348
+ target_param_name, target_dependency_name = target_name.split(".")
349
+ target_param = getattr(self, target_param_name)
350
+ setattr(target_param, target_dependency_name, dep_value)
351
+ return
352
+
353
+ raise ValueError(
354
+ "Could not find a mapping for dependency \"{}\". Check that it is included in the ``MAPPING_PARAMS``. See docstring for more on ``MAPPING_PARAMS``"
355
+ .format(dep_name))
venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/parameter_base.py ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import weakref
7
+ from abc import abstractmethod
8
+ from typing import Type
9
+
10
+ import torch
11
+
12
+ # Currently have dependency loops for the type hints.
13
+ InferenceModel = Type["InferenceModel"]
14
+ LayerContainer = Type["LayerContainer"]
15
+
16
+ MAPPING_KEY = "PARAM_MAPPING"
17
+
18
+
19
+ def make_param_getter(clsname, param):
20
+ """
21
+ Normal getter implementation for a property.
22
+ """
23
+
24
+ def param_getter(self):
25
+ return getattr(self, f"__{clsname}__{param}")
26
+
27
+ return param_getter
28
+
29
+
30
+ def make_param_setter(clsname, param):
31
+ """
32
+ Setter implementation that will call complete component to potentially
33
+ finalize the parameter.
34
+ """
35
+
36
+ def param_setter(self, value):
37
+ setattr(self, f"__{clsname}__{param}", value)
38
+ self.complete_component()
39
+
40
+ return param_setter
41
+
42
+
43
+ def make_readonly_setter():
44
+ """
45
+ Setter implementation that will raise an error if called.
46
+ """
47
+
48
+ def paramlist_setter(self, value):
49
+ raise ValueError("Cannot set a ParametrizedList directly.")
50
+
51
+ return paramlist_setter
52
+
53
+
54
+ class ParameterMetaclass(type):
55
+ """
56
+ MetaClass for the ParameterBase base class. This class will parse the `src_params`
57
+ attribute and create properties for each of the dependencies. A dependency can either
58
+ be represented as a string, which is interpreted as a named Tensor, or a `ParametrizedList`
59
+ subclass.
60
+ """
61
+
62
+ def __new__(cls, clsname, bases, attrs):
63
+
64
+ annotations = attrs.get("__annotations__", {})
65
+ dependencies = {
66
+ name: annotation
67
+ for name, annotation in annotations.items() if issubclass(annotation, (torch.Tensor, ParametrizedList))
68
+ }
69
+ n_dependencies = len(dependencies)
70
+
71
+ # Create properties for each of our dependencies
72
+ for d_name, d_type in dependencies.items():
73
+ if issubclass(d_type, ParametrizedList):
74
+ assert hasattr(
75
+ d_type, "count_attr"
76
+ ), "ParametrizedList must have a count_attr attribute to access on the inference module."
77
+ attrs[d_name] = property(make_param_getter(clsname, d_name), make_readonly_setter())
78
+ else: # torch.Tensor
79
+ attrs[d_name] = property(make_param_getter(clsname, d_name), make_param_setter(clsname, d_name))
80
+
81
+ new_cls = super().__new__(cls, clsname, bases, attrs)
82
+ new_cls.n_dependencies = n_dependencies
83
+
84
+ return new_cls
85
+
86
+ def __call__(cls, *args, **kwargs):
87
+ new_obj = super().__call__(*args, **kwargs)
88
+ new_obj.__init__(*args, **kwargs)
89
+
90
+ setattr(new_obj, "dest_param", None)
91
+
92
+ # Initialize our dependences to None/empty `ParametrizedList`s
93
+ for name, annotation in new_obj.__annotations__.items():
94
+ if issubclass(annotation, ParametrizedList):
95
+ #TODO(jeff): update assert with this, model implementation attribute does not align or missing wrt the ParametrizedList attributes
96
+ assert hasattr(
97
+ new_obj.inference_model, annotation.count_attr
98
+ ), f"new_obj={new_obj.__class__.__name__}, name={name}, annotation.count_attr={annotation.count_attr}"
99
+ param_list = annotation(new_obj, getattr(new_obj.inference_model, annotation.count_attr))
100
+ setattr(new_obj, f"__{new_obj.__class__.__name__}__{name}", param_list)
101
+ else: # torch.Tensor
102
+ setattr(new_obj, f"__{new_obj.__class__.__name__}__{name}", None)
103
+
104
+ return new_obj
105
+
106
+
107
+ class ParameterBase(metaclass=ParameterMetaclass):
108
+ """
109
+ A ParameterBase allows us to consolidate tracking the dependencies of loading a parameter from
110
+ a checkpoint into a single object. This class should not be used directly, but rather subclassed
111
+ and the `src_params` attribute set to a list of strings and/or `ParametrizedList`s.
112
+ """
113
+
114
+ # inference_model: InferenceModel
115
+ """
116
+ Inference model that will provide context on how to shard and transform the parameter.
117
+ """
118
+
119
+ #completed_components: int
120
+ """
121
+ How many of the layer dependencies have been met. This is used to determine when the parameter
122
+ is ready to be finalized. A ParametrizedList counts as a single dependency for the purposes
123
+ of this counter.
124
+ """
125
+
126
+ def __init__(self, model: InferenceModel, parent_container: LayerContainer) -> None:
127
+ """
128
+ Direct constructor. This should not be called from client code.
129
+
130
+ Args:
131
+ model (InferenceModel): Inference model that will be used to shard and transform the
132
+ parameter in `finalize`.
133
+ parent_container (LayerContainer): The parent container that this parameter is a member
134
+ of. We will build a weakref to this container to call the finalization callback.
135
+ """
136
+ self.inference_model = model
137
+ self.completed_components = 0
138
+ self.parent_container = weakref.ref(parent_container)
139
+
140
+ @abstractmethod
141
+ def finalize(self) -> torch.Tensor:
142
+ """
143
+ Finalize the parameter after all of its source parameters have been set. This method
144
+ will be automatically called when all inputs have been set. It should return the Tensor
145
+ with all transformations performed on it.
146
+ """
147
+ pass
148
+
149
+ def complete_component(self) -> None:
150
+ """
151
+ Mark a component as completed. This should be called by the relevant setter of a direct
152
+ property or a ParametrizedList. This method will automatically call `finalize` when all
153
+ dependencies have been met and then call the finalization callback on the parent container.
154
+
155
+ Once the finalization callback has been called, the parameter will be replaced with the
156
+ `dst_param` attribute on the parent container, and this instance will be destroyed.
157
+ """
158
+ self.completed_components += 1
159
+
160
+ if self.completed_components != self.n_dependencies:
161
+ return
162
+
163
+ finalized_param = self.finalize()
164
+ self.parent_container().finalization_callback(self, finalized_param)
165
+
166
+
167
+ class ParametrizedList:
168
+ """
169
+ A ParametrizedList is a list of parameters that are dependencies
170
+ of a `ParameterBase` but may vary in length depending on the model
171
+ configuration (rather than architecture). For example, a MoE layer
172
+ may have different number of experts depending on the size of the model.
173
+
174
+ This class is used to manage these lists and provide integer indexing
175
+ of a single component rather than accessing names directly. For example,
176
+ it tends to be more natural to access the 8th expert with `experts[8]`
177
+ rather than a name like `expert_8`, especially as an attribute.
178
+
179
+ To inherit from this class, set static variables `name` and `count_attr`.
180
+
181
+ ```python
182
+ class MyParametrizedList(ParametrizedList):
183
+ count_attr: str = "my_list_count"
184
+ ```
185
+
186
+ In the above example, `my_list_count` should be an accessible attribute
187
+ of the inference model (i.e. via `self.inference_model.my_list_count`).
188
+
189
+ NOTE: There are some APIs in which this type cannot be used as if it is
190
+ just a list of Tensors. For example, `torch.cat(param_list)` will not work.
191
+ However, you can make it compatible with a tuple wrapper:
192
+ `torch.cat(tuple(param_list))`
193
+ """
194
+
195
+ n_params: int
196
+ """
197
+ Number of params this list contains.
198
+ """
199
+
200
+ param: ParameterBase
201
+ """
202
+ WeakRef to the owning parameter.
203
+ """
204
+
205
+ def __init__(self, param: ParameterBase, n_params: int) -> None:
206
+ """
207
+ Constructor. Should not be called from client code.
208
+
209
+ Args:
210
+ param (ParameterBase): The owning parameter.
211
+ n_params (int): The number of parameters this list contains. This should be
212
+ """
213
+ self.n_params = n_params
214
+ self.set_params = 0
215
+ self.param = weakref.ref(param)
216
+ self._params = [None] * n_params
217
+
218
+ def __getitem__(self, index):
219
+ return self._params[index]
220
+
221
+ def __setitem__(self, index, value):
222
+ if self._params[index] is not None:
223
+ raise ValueError("Cannot set a parameter twice.")
224
+
225
+ self._params[index] = value
226
+ self.set_params += 1
227
+
228
+ if self.set_params != self.n_params:
229
+ return
230
+
231
+ self.param().complete_component()
232
+
233
+ def __iter__(self):
234
+ return iter(self._params)
235
+
236
+
237
+ def ParamList(attr: str):
238
+ """
239
+ Helper to create a subclass of ParametrizedList with the desired `count_attr`.
240
+
241
+ In this manner, we can annotate the type of a Parameter dependency with the
242
+ following:
243
+
244
+ ```python
245
+ class CustomParameter(ParameterBase):
246
+ dependency_list: ParamList("dependencies_count_name")
247
+ ```
248
+
249
+ where "dependencies_count_name" is the name of the attribute on the inference model.
250
+ """
251
+
252
+ class ParametrizedListInstance(ParametrizedList):
253
+ count_attr: str = attr
254
+
255
+ return ParametrizedListInstance
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (323 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__pycache__/ds_module.cpython-310.pyc ADDED
Binary file (2.52 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__pycache__/heuristics.cpython-310.pyc ADDED
Binary file (6.68 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/__pycache__/module_registry.cpython-310.pyc ADDED
Binary file (2.38 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (634 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/moe_config.cpython-310.pyc ADDED
Binary file (883 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/configs/__pycache__/unembed_config.cpython-310.pyc ADDED
Binary file (794 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/__init__.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ # Imports for registering ops
7
+ from .attention import *
8
+ from .linear import *
9
+ from .post_norm import *
10
+ from .pre_norm import *
11
+ from .embedding import *
12
+ from .unembed import *
13
+ from .moe import *
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (354 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/embedding/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .ragged_embedding import DSRaggedEmbedding
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/embedding/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (282 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/embedding/__pycache__/ragged_embedding.cpython-310.pyc ADDED
Binary file (2.76 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/embedding/ragged_embedding.py ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Any, Dict, Optional
7
+
8
+ import torch
9
+
10
+ from deepspeed.accelerator import get_accelerator
11
+ from ....allocator import empty_from
12
+ from ....inference_utils import DtypeEnum
13
+ from ....kernels.ragged_ops import RaggedEmbeddingKernel
14
+ from ....ragged import RaggedBatchWrapper
15
+ from ...interfaces import DSEmbeddingBase, DSEmbeddingRegistry
16
+ from ...configs import DSEmbeddingsConfig
17
+
18
+
19
+ @DSEmbeddingRegistry.register_module
20
+ class DSRaggedEmbedding(DSEmbeddingBase):
21
+
22
+ @staticmethod
23
+ def name():
24
+ return 'ragged_embedding'
25
+
26
+ @staticmethod
27
+ def supports_config(config: DSEmbeddingsConfig) -> bool:
28
+
29
+ if DtypeEnum(config.residual_dtype) not in [DtypeEnum.fp16, DtypeEnum.bf16, DtypeEnum.fp32]:
30
+ return False
31
+
32
+ if config.use_token_type:
33
+ return False
34
+
35
+ if config.output_normalization is not None:
36
+ return False
37
+
38
+ try:
39
+ _ = RaggedEmbeddingKernel(config.residual_dtype, torch.int32, config.embedding_dim)
40
+ except ValueError:
41
+ return False
42
+
43
+ return True
44
+
45
+ def __init__(self, config: DSEmbeddingsConfig, implementation_config: Dict[str, Any]) -> None:
46
+ super().__init__(config, implementation_config)
47
+
48
+ self.embed_offset = self._config.positional_offset
49
+
50
+ # TODO(cmikeh2): How do we want to avoid the int32 vs int64 issue?
51
+ self._ragged_embed = RaggedEmbeddingKernel(self._config.residual_dtype, torch.int32,
52
+ self._config.embedding_dim)
53
+
54
+ self._output = torch.empty((self._config.max_tokens, self._config.embedding_dim),
55
+ dtype=self._config.residual_dtype,
56
+ device=get_accelerator().current_device())
57
+
58
+ @property
59
+ def output(self) -> torch.Tensor:
60
+ return self._output
61
+
62
+ def forward(self,
63
+ ragged_batch: RaggedBatchWrapper,
64
+ word_embeddings: torch.Tensor,
65
+ position_embeddings: Optional[torch.Tensor] = None) -> torch.Tensor:
66
+ """
67
+ Parameters:
68
+ ragged_batch (RaggedBatchWrapper): The input ids and associated ragged batch metadata.
69
+ word_embeddings (torch.Tensor): The word embedding table
70
+ """
71
+ output = empty_from(self._output, (ragged_batch.tensor_toks, self._config.embedding_dim))
72
+ self._ragged_embed(output,
73
+ ragged_batch,
74
+ word_embeddings,
75
+ position_embed_weight=position_embeddings,
76
+ position_embed_offset=self.embed_offset)
77
+ return output
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/post_norm/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .cuda_post_ln import DSPostLNCUDAModule
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/post_norm/cuda_post_ln.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Any, Dict, Tuple
7
+
8
+ import torch
9
+
10
+ from deepspeed.accelerator import get_accelerator
11
+ from ...interfaces import DSPostNormBase, DSPostNormRegistry
12
+ from ...configs import DSNormConfig
13
+ from ....kernels.core_ops.cuda_layer_norm.cuda_post_ln import CUDAFPPostLN
14
+ from ....allocator import empty_from
15
+ from ....inference_parameter import InferenceParameter
16
+
17
+
18
+ @DSPostNormRegistry.register_module
19
+ class DSPostLNCUDAModule(DSPostNormBase):
20
+
21
+ @staticmethod
22
+ def name():
23
+ return 'cuda_post_ln'
24
+
25
+ @staticmethod
26
+ def supports_config(config: DSNormConfig):
27
+ if len(set([config.residual_dtype, config.input_dtype, config.output_dtype])) != 1:
28
+ return False
29
+
30
+ try:
31
+ _ = CUDAFPPostLN(config.channels, config.residual_dtype)
32
+ except ValueError:
33
+ return False
34
+ return True
35
+
36
+ def __init__(self, config: DSNormConfig, implementation_config: Dict[str, Any]):
37
+ super().__init__(config, implementation_config)
38
+ self._fp_post_ln = CUDAFPPostLN(self._config.channels, self._config.residual_dtype, epsilon=self._config.eps)
39
+
40
+ self._output = torch.empty((config.max_tokens, config.channels),
41
+ dtype=config.output_dtype,
42
+ device=get_accelerator().current_device())
43
+
44
+ def transform_param(self, param: torch.Tensor) -> InferenceParameter:
45
+ param = param.to(self._config.input_dtype)
46
+ return InferenceParameter.initialize(param)
47
+
48
+ def forward(self, residual: torch.Tensor, hidden_in: torch.Tensor, gamma: torch.Tensor,
49
+ beta: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
50
+ """
51
+ Since the CUDA FP only supports all data types being the same, we will alias the residual
52
+ with our output.
53
+ """
54
+ self._residual_output = empty_from(self._output, residual.shape)
55
+ self._fp_post_ln(residual, residual, hidden_in, gamma, beta)
56
+ return residual, residual
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (331 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/__pycache__/cuda_pre_ln.cpython-310.pyc ADDED
Binary file (2.92 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/__pycache__/cuda_pre_rms.cpython-310.pyc ADDED
Binary file (3.03 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/cuda_pre_ln.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Any, Dict, Optional, Tuple
7
+
8
+ import torch
9
+
10
+ from deepspeed.accelerator import get_accelerator
11
+ from ...interfaces import DSPreNormBase, DSPreNormRegistry
12
+ from ...configs import DSNormConfig, NormTypeEnum
13
+ from ....kernels.core_ops.cuda_layer_norm.cuda_pre_ln import CUDAFPPreLN
14
+ from ....kernels.core_ops.cuda_layer_norm.cuda_ln import CUDAFPLN
15
+ from ....allocator import empty_from
16
+ from ....inference_parameter import InferenceParameter
17
+
18
+
19
+ @DSPreNormRegistry.register_module
20
+ class DSPreLNCUDAModule(DSPreNormBase):
21
+
22
+ @staticmethod
23
+ def name():
24
+ return 'cuda_pre_ln'
25
+
26
+ @staticmethod
27
+ def supports_config(config: DSNormConfig):
28
+ type = NormTypeEnum(config.type)
29
+ if type != NormTypeEnum.LayerNorm:
30
+ return False
31
+
32
+ if len(set([config.residual_dtype, config.input_dtype, config.output_dtype])) != 1:
33
+ return False
34
+
35
+ try:
36
+ _ = CUDAFPPreLN(config.channels, config.residual_dtype)
37
+ except ValueError:
38
+ return False
39
+ return True
40
+
41
+ def __init__(self, config: DSNormConfig, implementation_config: Dict[str, Any]):
42
+ super().__init__(config, implementation_config)
43
+ self._fp_pre_ln = CUDAFPPreLN(self._config.channels, self._config.residual_dtype, epsilon=self._config.eps)
44
+ self._fp_ln = CUDAFPLN(self._config.channels, self._config.residual_dtype, epsilon=self._config.eps)
45
+
46
+ # Buffers for the hidden output (residual is updated in-place)
47
+ self._hidden_output = torch.empty((config.max_tokens, config.channels),
48
+ dtype=config.output_dtype,
49
+ device=get_accelerator().current_device())
50
+
51
+ def transform_param(self, param: torch.Tensor) -> InferenceParameter:
52
+ param = param.to(self._config.input_dtype)
53
+ return InferenceParameter.initialize(param)
54
+
55
+ def forward(self, residual: torch.Tensor, hidden_in: Optional[torch.Tensor], gamma: torch.Tensor,
56
+ beta: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
57
+ """
58
+ Since the CUDA FP only supports all data types being the same, we will alias the residual
59
+ with our output.
60
+
61
+ If hidden_in is None, that means we do not need to perform the residual add and will
62
+ only return the hidden output modified.
63
+ """
64
+ hidden_out = empty_from(self._hidden_output, residual.shape)
65
+ if hidden_in is None:
66
+ self._fp_ln(hidden_out, residual, gamma, beta)
67
+ else:
68
+ self._fp_pre_ln(residual, hidden_out, residual, hidden_in, gamma, beta)
69
+ return residual, hidden_out
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/pre_norm/cuda_pre_rms.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Any, Dict, Optional, Tuple
7
+
8
+ import torch
9
+
10
+ from deepspeed.accelerator import get_accelerator
11
+ from ...interfaces import DSPreNormBase, DSPreNormRegistry
12
+ from ...configs import DSNormConfig, NormTypeEnum
13
+ from ....kernels.core_ops import CUDARMSNorm, CUDARMSPreNorm
14
+ from ....allocator import empty_from
15
+ from ....inference_parameter import InferenceParameter
16
+
17
+
18
+ @DSPreNormRegistry.register_module
19
+ class DSPreRMSCUDAModule(DSPreNormBase):
20
+
21
+ @staticmethod
22
+ def name():
23
+ return 'cuda_pre_rms'
24
+
25
+ @staticmethod
26
+ def supports_config(config: DSNormConfig):
27
+ type = NormTypeEnum(config.type)
28
+ if type != NormTypeEnum.RMSNorm:
29
+ return False
30
+
31
+ if len(set([config.residual_dtype, config.input_dtype, config.output_dtype])) != 1:
32
+ return False
33
+
34
+ try:
35
+ # Only need to check one since the support matrix for the two rms kernels is the same
36
+ _ = CUDARMSPreNorm(config.channels, config.residual_dtype)
37
+ except ValueError:
38
+ return False
39
+ return True
40
+
41
+ def __init__(self, config: DSNormConfig, implementation_config: Dict[str, Any]):
42
+ super().__init__(config, implementation_config)
43
+ self._fp_rms = CUDARMSNorm(self._config.channels, self._config.residual_dtype, epsilon=self._config.eps)
44
+ self._fp_rms_pre = CUDARMSPreNorm(self._config.channels, self._config.residual_dtype, epsilon=self._config.eps)
45
+
46
+ # Buffers for both the hidden and residual outputs
47
+ self._hidden_output = torch.empty((config.max_tokens, config.channels),
48
+ dtype=config.output_dtype,
49
+ device=get_accelerator().current_device())
50
+ self._residual_output = torch.empty((config.max_tokens, config.channels),
51
+ dtype=config.output_dtype,
52
+ device=get_accelerator().current_device())
53
+
54
+ def transform_param(self, param: torch.Tensor) -> InferenceParameter:
55
+ param = param.to(self._config.input_dtype)
56
+ return InferenceParameter.initialize(param)
57
+
58
+ def forward(self,
59
+ residual: torch.Tensor,
60
+ hidden_in: Optional[torch.Tensor],
61
+ gamma: torch.Tensor,
62
+ beta: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, torch.Tensor]:
63
+ """
64
+ Since the CUDA FP only supports all data types being the same, we will alias the residual
65
+ with our output.
66
+
67
+ If hidden_in is None, that means we do not need to perform the residual add and will
68
+ only return the hidden output modified.
69
+ """
70
+ assert beta is None, "Beta is not supported for RMSNorm"
71
+
72
+ hidden_out = empty_from(self._hidden_output, residual.shape)
73
+ if hidden_in is None:
74
+ self._fp_rms(hidden_out, residual, gamma)
75
+ residual_out = residual
76
+ else:
77
+ residual_out = empty_from(self._residual_output, residual.shape)
78
+ self._fp_rms_pre(residual_out, hidden_out, residual, hidden_in, gamma)
79
+ return residual_out, hidden_out
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/unembed/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .ragged_unembed import DSRaggedUnembed
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/unembed/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (276 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/implementations/unembed/ragged_unembed.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Any, Dict, Optional
7
+
8
+ import torch
9
+
10
+ from deepspeed.accelerator import get_accelerator
11
+ from ....allocator import empty_from
12
+ from ....inference_utils import DtypeEnum, ActivationType
13
+ from ....kernels.core_ops import CUDAFPLN, BlasLibLinear, CUDARMSNorm, CUDABiasActivation
14
+ from ....kernels.ragged_ops import RaggedLogitsGather
15
+ from ....ragged import RaggedBatchWrapper
16
+ from ...interfaces import DSUnembedBase, DSUnembedRegistry
17
+ from ...configs import DSUnembedConfig
18
+
19
+
20
+ @DSUnembedRegistry.register_module
21
+ class DSRaggedUnembed(DSUnembedBase):
22
+ """
23
+ Ragged unembedding implementation. This implementation will gather only the last token
24
+ of each sequence in the ragged inflight batch and calculate the logits only for those rows.
25
+ """
26
+
27
+ @staticmethod
28
+ def name():
29
+ return 'ragged_unembed'
30
+
31
+ @staticmethod
32
+ def supports_config(config: DSUnembedConfig):
33
+
34
+ if DtypeEnum(config.dtype) not in [DtypeEnum.fp16, DtypeEnum.bf16, DtypeEnum.fp32]:
35
+ return False
36
+
37
+ try:
38
+ _ = RaggedLogitsGather(config.model_dim, config.dtype)
39
+ except ValueError:
40
+ return False
41
+
42
+ if config.norm_type == 'rms_norm':
43
+ try:
44
+ _ = CUDARMSNorm(config.model_dim, config.dtype)
45
+ except ValueError:
46
+ return False
47
+ elif config.norm_type == 'layer_norm':
48
+ try:
49
+ _ = CUDAFPLN(config.model_dim, config.dtype)
50
+ except ValueError:
51
+ return False
52
+
53
+ return True
54
+
55
+ def __init__(self, config: DSUnembedConfig, implementation_config: Dict[str, Any]) -> None:
56
+ super().__init__(config, implementation_config)
57
+
58
+ self._logits_gather = RaggedLogitsGather(config.model_dim, self._config.dtype)
59
+
60
+ if self._config.norm_type == 'layer_norm':
61
+ self._norm = CUDAFPLN(self._config.model_dim, self._config.dtype)
62
+ elif self._config.norm_type == 'rms_norm':
63
+ self._norm = CUDARMSNorm(self._config.model_dim, self._config.dtype)
64
+ else:
65
+ self._norm = None
66
+
67
+ self._linear = BlasLibLinear(self._config.dtype)
68
+ # Here the activation kernel is being used to apply bias, hence the identity activation type!
69
+ self._act_fn = CUDABiasActivation(self._config.vocab_size, self._config.dtype, ActivationType.IDENTITY)
70
+
71
+ self._intermediate = torch.empty((self._config.max_sequences, self._config.model_dim),
72
+ dtype=self._config.dtype,
73
+ device=get_accelerator().current_device())
74
+
75
+ self._output = torch.empty((self._config.max_sequences, self._config.vocab_size),
76
+ dtype=self._config.dtype,
77
+ device=get_accelerator().current_device())
78
+
79
+ @property
80
+ def output(self) -> torch.Tensor:
81
+ return self._output
82
+
83
+ def forward(self,
84
+ hidden_states: torch.Tensor,
85
+ vocab_embedding: torch.Tensor,
86
+ ragged_metadata: RaggedBatchWrapper,
87
+ bias: Optional[torch.Tensor] = None,
88
+ gamma: Optional[torch.Tensor] = None,
89
+ beta: Optional[torch.Tensor] = None) -> torch.Tensor:
90
+ """
91
+ Return final model logits.
92
+
93
+ Args:
94
+ hidden_states (torch.Tensor): The hidden states from the model. This is the output of the
95
+ final layer of the model.
96
+ vocab_embedding (torch.Tensor): The vocab embedding table.
97
+ raged_metadata (RaggedBatchWrapper): The ragged batch metadata.
98
+ gamma (Optional[torch.Tensor]): The gamma tensor for normalization.
99
+ beta (Optional[torch.Tensor]): The beta tensor for normalization.
100
+ """
101
+
102
+ cut_down_hidden_states = empty_from(self._intermediate,
103
+ (ragged_metadata.current_sequences, self._config.model_dim))
104
+ self._logits_gather(cut_down_hidden_states, hidden_states, ragged_metadata)
105
+
106
+ if self._config.norm_type == 'rms_norm':
107
+ if gamma is None:
108
+ raise ValueError('RMS Normalization enabled but gamma not provided.')
109
+ self._norm(cut_down_hidden_states, cut_down_hidden_states, gamma)
110
+ elif self._config.norm_type == 'layer_norm':
111
+ if gamma is None or beta is None:
112
+ raise ValueError('Normalization enabled but gamma and/or beta not provided.')
113
+ self._norm(cut_down_hidden_states, cut_down_hidden_states, gamma, beta)
114
+
115
+ output = empty_from(self._output, (ragged_metadata.current_sequences, self._config.vocab_size))
116
+ self._linear(output, cut_down_hidden_states, vocab_embedding)
117
+ if bias is not None:
118
+ self._act_fn(output, bias)
119
+
120
+ return output
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .attention_base import DSSelfAttentionRegistry, DSSelfAttentionBase
7
+ from .embedding_base import DSEmbeddingRegistry, DSEmbeddingBase
8
+ from .linear_base import DSLinearRegistry, DSLinearBase
9
+ from .moe_base import DSMoERegistry, DSMoEBase
10
+ from .post_norm_base import DSPostNormRegistry, DSPostNormBase
11
+ from .pre_norm_base import DSPreNormRegistry, DSPreNormBase
12
+ from .unembed_base import DSUnembedRegistry, DSUnembedBase
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (765 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/attention_base.cpython-310.pyc ADDED
Binary file (4.51 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/embedding_base.cpython-310.pyc ADDED
Binary file (3.88 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/modules/interfaces/__pycache__/linear_base.cpython-310.pyc ADDED
Binary file (3.06 kB). View file