applied-ai-018 commited on
Commit
d8c89f0
·
verified ·
1 Parent(s): 3456c50

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__init__.py +8 -0
  3. venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/__init__.cpython-310.pyc +0 -0
  4. venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/huggingface_engine.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/in_memory_engine.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/base_engine.py +41 -0
  7. venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/huggingface_engine.py +124 -0
  8. venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/in_memory_engine.py +40 -0
  9. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/__pycache__/__init__.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/__pycache__/ds_kernel.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/configs.h +96 -0
  12. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/kernel_matmul.cuh +272 -0
  13. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/ptx_mma.cuh +137 -0
  14. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/utils_core.cuh +246 -0
  15. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/utils_gmem.cuh +86 -0
  16. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/utils_paralleldequant.cuh +127 -0
  17. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/weight_prepacking.h +209 -0
  18. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/__init__.py +7 -0
  19. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/__pycache__/__init__.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/cutlass_ops.cpp +19 -0
  21. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/__init__.py +6 -0
  22. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/__pycache__/__init__.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/__pycache__/mixed_gemm.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm.cu +93 -0
  25. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm.py +64 -0
  26. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm_api.h +57 -0
  27. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/__init__.py +7 -0
  28. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/__pycache__/__init__.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/__pycache__/mixed_moe_gemm.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/__pycache__/moe_gemm.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/mixed_moe_gemm.py +67 -0
  32. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm.cu +175 -0
  33. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm.h +24 -0
  34. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm.py +60 -0
  35. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm_api.h +64 -0
  36. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/shared_resources/weight_variant.h +11 -0
  37. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/activation_type.h +17 -0
  38. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/conversion_utils.h +640 -0
  39. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/ds_kernel_utils.h +58 -0
  40. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/memory_access_utils.h +1115 -0
  41. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/reduction_utils.h +778 -0
  42. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__init__.py +6 -0
  43. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__pycache__/__init__.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__pycache__/blocked_flash.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/attention_atom.h +39 -0
  46. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.cpp +101 -0
  47. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.h +16 -0
  48. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.py +107 -0
  49. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/flash.h +74 -0
  50. venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__init__.py +6 -0
.gitattributes CHANGED
@@ -141,3 +141,4 @@ venv/lib/python3.10/site-packages/numpy/core/_multiarray_umath.cpython-310-x86_6
141
  venv/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
142
  venv/lib/python3.10/site-packages/numpy.libs/libopenblas64_p-r0-0cf96a72.3.23.dev.so filter=lfs diff=lfs merge=lfs -text
143
  venv/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so filter=lfs diff=lfs merge=lfs -text
 
 
141
  venv/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
142
  venv/lib/python3.10/site-packages/numpy.libs/libopenblas64_p-r0-0cf96a72.3.23.dev.so filter=lfs diff=lfs merge=lfs -text
143
  venv/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so filter=lfs diff=lfs merge=lfs -text
144
+ venv/lib/python3.10/site-packages/scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .base_engine import CheckpointEngineBase
7
+ from .in_memory_engine import InMemoryModelEngine
8
+ from .huggingface_engine import HuggingFaceCheckpointEngine
venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (387 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/huggingface_engine.cpython-310.pyc ADDED
Binary file (3.96 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/in_memory_engine.cpython-310.pyc ADDED
Binary file (1.96 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/base_engine.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from abc import ABC, abstractmethod
7
+ from typing import Iterable, Tuple
8
+
9
+ import torch
10
+
11
+ #from .huggingface_engine import HuggingFaceCheckpointEngine
12
+
13
+ MEGATRON = 'megatron'
14
+ HUGGINGFACE = 'huggingface'
15
+
16
+
17
+ class CheckpointEngineBase(ABC):
18
+ """
19
+ Abstract interface for checkpoint engines to implement.
20
+
21
+ There is no ``__init__`` method here by design, since the creation of the checkpoint
22
+ engine will happen outside the policy/engine code. The tradeoff being made here is
23
+ that we will write different frontends for different checkpoint engines, but these
24
+ frontends can be tailored to the specific checkpoint engine/model source needs.
25
+ """
26
+
27
+ @abstractmethod
28
+ def parameters(self) -> Iterable[Tuple[str, torch.Tensor]]:
29
+ """
30
+ This method should create a generator of tuples of the form (name, parameter) for
31
+ all parameters in the model. The name should be the fully qualified name of the
32
+ parameter, and the parameter should be a torch.Tensor.
33
+
34
+ The expected use of a checkpoint engine is the following:
35
+ ```python
36
+ for name, parameter in checkpoint_engine.parameters():
37
+ container_map.map_param(name, parameter)
38
+ ```
39
+ For a concrete use example, see ``InferenceV2Policy``.
40
+ """
41
+ ...
venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/huggingface_engine.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import os
7
+ import json
8
+ import torch
9
+ from .base_engine import CheckpointEngineBase
10
+ from typing import Iterable, Tuple
11
+ from functools import partial
12
+
13
+ from ..logging import inference_logger
14
+
15
+
16
+ class HuggingFaceCheckpointEngine(CheckpointEngineBase):
17
+
18
+ def __init__(self, model_name_or_path: str, auth_token: str = None) -> None:
19
+ super().__init__()
20
+ from transformers import AutoConfig, GenerationConfig
21
+
22
+ self.model_name_or_path = model_name_or_path
23
+ self.auth_token = auth_token
24
+ self.model_config = AutoConfig.from_pretrained(self.model_name_or_path)
25
+ # Define this property here so we can use it in the model implementation
26
+ if not hasattr(self.model_config, "max_seq_length"):
27
+ if hasattr(self.model_config, "max_position_embeddings"):
28
+ self.model_config.max_seq_length = self.model_config.max_position_embeddings
29
+ else:
30
+ generation_config = GenerationConfig.from_pretrained(self.model_name_or_path)
31
+ self.model_config.max_seq_length = generation_config.max_length
32
+ self._local_checkpoint_dir = None
33
+ self._all_ckpt_paths = self._fetch_checkpoint_files()
34
+
35
+ def _fetch_checkpoint_files(self):
36
+ """
37
+ Fetch the checkpoint files from the HuggingFace Hub.
38
+ """
39
+ # TODO(jeff): for models like llama-2 the user will have to provide an auth `token`,
40
+ # currently coming from the ckpt engine init but maybe a catch all kwargs for other
41
+ # snapshot download parameters would be more flexible.
42
+
43
+ from huggingface_hub import snapshot_download, list_repo_tree
44
+
45
+ def model_has_safetensors(model_name_or_path: str) -> bool:
46
+ if os.path.isdir(model_name_or_path):
47
+ file_list = os.listdir(model_name_or_path)
48
+ else:
49
+ file_list = [rf.path for rf in list_repo_tree(model_name_or_path)]
50
+ for f in file_list:
51
+ if f.endswith(".safetensors"):
52
+ return True
53
+ return False
54
+
55
+ if os.path.isdir(self.model_name_or_path):
56
+ self._local_checkpoint_dir = self.model_name_or_path
57
+ else:
58
+ # We need to download the checkpoint files from HF
59
+ if model_has_safetensors(self.model_name_or_path):
60
+ # Prioritize downloading safetensors if they are available
61
+ allow_patterns = ["*.safetensors", "*.json"]
62
+ else:
63
+ # Fallback to bin files when safetensors are not present
64
+ allow_patterns = ["*.bin", "*.json", "*.pt"]
65
+ self._local_checkpoint_dir = snapshot_download(self.model_name_or_path,
66
+ allow_patterns=allow_patterns,
67
+ revision=None,
68
+ token=self.auth_token)
69
+
70
+ assert os.path.isdir(
71
+ self._local_checkpoint_dir
72
+ ), f"Checkpoint dir {self._local_checkpoint_dir} is not a directory, cannot load checkpoint."
73
+
74
+ # Set the appropriate file names based on whether we have safetensors or not
75
+ if model_has_safetensors(self._local_checkpoint_dir):
76
+ from safetensors.torch import load_file
77
+ model_param_json_fname = "model.safetensors.index.json"
78
+ model_file_fname = "model.safetensors"
79
+ self._checkpoint_load_fn = load_file
80
+ else:
81
+ model_param_json_fname = "pytorch_model.bin.index.json"
82
+ model_file_fname = "pytorch_model.bin"
83
+ self._checkpoint_load_fn = partial(torch.load, map_location="cpu")
84
+
85
+ model_param_json = os.path.join(self._local_checkpoint_dir, model_param_json_fname)
86
+
87
+ if not os.path.isfile(model_param_json):
88
+ # We don't need any json as all such HF models will have pytorch_model.bin
89
+ all_checkpoint_files = [os.path.join(self._local_checkpoint_dir, model_file_fname)]
90
+ else:
91
+ param_map = json.load(open(model_param_json, "r"))
92
+
93
+ # weight_map -> { "lm_head.weight": "pytorch_model-00002-of-00002.bin", ... }
94
+ weight_map = param_map["weight_map"]
95
+
96
+ # unique set of all checkpoint files
97
+ all_checkpoint_files = set(weight_map.values())
98
+
99
+ # get absolute path of all unique checkpoint files
100
+ all_checkpoint_files = [os.path.join(self._local_checkpoint_dir, f) for f in all_checkpoint_files]
101
+
102
+ return all_checkpoint_files
103
+
104
+ def parameters(self) -> Iterable[Tuple[str, torch.Tensor]]:
105
+ """
106
+ Generator of model parameters (satisfies the CheckpointEngineBase interface).
107
+ """
108
+ for checkpoint in self._all_ckpt_paths:
109
+ inference_logger().info(f"Loading checkpoint: {checkpoint}")
110
+ checkpoint_sd = self._checkpoint_load_fn(checkpoint)
111
+ param_keys = list(checkpoint_sd.keys())
112
+ for param_name in param_keys:
113
+ param = checkpoint_sd[param_name]
114
+ yield param_name, param
115
+
116
+ del checkpoint_sd
117
+
118
+
119
+ if __name__ == "__main__":
120
+ # To test, add your auth_token here and run `python huggingface_engine.py`
121
+ engine = HuggingFaceCheckpointEngine(model_name_or_path="meta-llama/Llama-2-7b-hf",
122
+ auth_token="hf_xxxxxxxxxxxxxxxxx")
123
+ for name, param in engine.parameters():
124
+ print(name, param.shape)
venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/in_memory_engine.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from typing import Iterable, Tuple
7
+ import torch
8
+
9
+ from .base_engine import CheckpointEngineBase
10
+
11
+
12
+ class InMemoryModelEngine(CheckpointEngineBase):
13
+ """
14
+ This "checkpoint" engine uses the existing interface to enable loading parameters into an
15
+ inference model from a model already instantiated in memory. In general, this is not the
16
+ recommended way to use the inference engine, and should only be used when absolutely necessary.
17
+
18
+ The primary limitation of this approach is that the model must be fully instantiated in memory.
19
+ In a tensor parallel scenario, this means that the model is either replicated many times in host
20
+ memory. Currently, it is also recommended to only use this approach for models held in host memory.
21
+
22
+ In order to free the memory held by this copy of the model, we delete the model in the first call
23
+ to `parameters`, so it is not safe to make this call twice.
24
+ """
25
+
26
+ def __init__(self, model: torch.nn.Module) -> None:
27
+ """
28
+ Create virtual checkpoint engine for the provided module.
29
+
30
+ Args:
31
+ model (torch.nn.Module): Model to load parameters from.
32
+ """
33
+ super().__init__()
34
+ self.model = model
35
+
36
+ def parameters(self) -> Iterable[Tuple[str, torch.Tensor]]:
37
+ for name, parameter in self.model.named_parameters():
38
+ yield name, parameter
39
+
40
+ del self.model
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (244 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/__pycache__/ds_kernel.cpython-310.pyc ADDED
Binary file (1.29 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/configs.h ADDED
@@ -0,0 +1,96 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ // This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112
7
+
8
+ #ifndef CONFIGS_H
9
+ #define CONFIGS_H
10
+
11
+ // #define DEBUG_MODE
12
+ #define PIPELINE_LEVEL_GMEM 2
13
+ #define PIPELINE_LEVEL_SMEM 2 // only support 2
14
+
15
+ /************************ Hardware Parameters ************************/
16
+ #define WARP_SIZE 32
17
+ #define REG_BIT_WIDTH 32
18
+ // mma: M=16 K=16 N=8
19
+ #define MMA_8 8
20
+ #define MMA_16 16
21
+ // for memory access
22
+ #define THREAD_OPT_ACCESS_BIT_WIDTH_128 128 // LDS.128, cp_async.128, ...
23
+ #define BIT_WIDTH_PER_HALF 16 // Half precision: FP16
24
+
25
+ /******************** Register Allocation For GEMM ********************/
26
+ #define REG_PER_THREAD_C_TENSOR_16_16 8 // 8 for FP32 Accumulation
27
+ /********************** Memory Padding Parameters **********************/
28
+ // Eliminating bank-conflict
29
+ #define PADDING_BYTES_16 16 // Padding 16 bytes each column
30
+ #define PADDING_SHARED_MEM_FOR_B_8 \
31
+ 8 // Padding 8 half each column, during CopyFromGlobalToShared() for B
32
+ #define PADDING_SHARED_MEM_FOR_C_4 \
33
+ 4 // Padding 4 float each column, during StoreToSharedMemoryFromRegister() for C
34
+ /************************* WARP Tiling part-1 *************************/
35
+ #define WARP_ROW_MMA_TENSORS 4
36
+ #define WARP_M (WARP_ROW_MMA_TENSORS * MMA_16) // 64
37
+ #define WARP_K_MMA_TENSORS 4
38
+ #define WARP_K (WARP_K_MMA_TENSORS * MMA_16) // 64
39
+ template <int BLOCK_ROW_WARPS_, int BLOCK_COL_WARPS_, int WARP_COL_MMA_TENSORS_>
40
+ struct TilingConfig {
41
+ // Depending on "n" dimension of the GEMM
42
+ static constexpr int BLOCK_ROW_WARPS = BLOCK_ROW_WARPS_;
43
+ static constexpr int BLOCK_COL_WARPS = BLOCK_COL_WARPS_;
44
+ static constexpr int WARP_COL_MMA_TENSORS = WARP_COL_MMA_TENSORS_;
45
+ /************************* WARP Tiling part-2 *************************/
46
+ static constexpr int WARP_N = WARP_COL_MMA_TENSORS * MMA_8;
47
+ /*************************Thread Block Tiling *************************/
48
+ static constexpr int TILE_M = WARP_M * BLOCK_ROW_WARPS;
49
+ static constexpr int TILE_N = MMA_8 * WARP_COL_MMA_TENSORS * BLOCK_COL_WARPS;
50
+ static constexpr int TILE_K = WARP_K;
51
+ /********************** #Thread per Thread Block **********************/
52
+ static constexpr int BLOCK_WARPS = BLOCK_ROW_WARPS * BLOCK_COL_WARPS;
53
+ static constexpr int BLOCK_THREADS = BLOCK_WARPS * WARP_SIZE;
54
+ /******************************* Others *******************************/
55
+ static constexpr int SMEM_SIZE_B_TILE = TILE_N * (TILE_K + PADDING_BYTES_16) * 2 *
56
+ PIPELINE_LEVEL_GMEM; // sizeof(half)=2, doubleBuffer=2
57
+ static constexpr int SMEM_SIZE_C_TILE =
58
+ TILE_N * (TILE_M + PADDING_BYTES_16) * 4; // sizeof(float)=4
59
+ };
60
+
61
+ /************************ General Config for Quant-LLM **********************/
62
+ #define WEIGHT_FRAG1_BIT_WIDTH 2
63
+ #define WEIGHT_FRAG2_BIT_WIDTH 4
64
+ #define WEIGHT_BIT_WIDTH (WEIGHT_FRAG1_BIT_WIDTH + WEIGHT_FRAG2_BIT_WIDTH) // 6
65
+ // #define QUANT_GROUP_SIZE_DIVIDED_BY_64 4 //
66
+ // QuantGroupSize: 4*64 = 256
67
+ /*************************** 64*64 Weghts of A WARP *************************/
68
+ #define WEIGHT_PER_UNIT (WARP_M * WARP_K) // 64*64
69
+ #define SMEM_SIZE_IN_BYTES_PER_WARP_A1 \
70
+ (WEIGHT_PER_UNIT * WEIGHT_FRAG1_BIT_WIDTH / \
71
+ 8) // 1024 Bytes #doubleBuffer not takedn into consideration
72
+ #define SMEM_SIZE_IN_BYTES_PER_WARP_A2 \
73
+ (WEIGHT_PER_UNIT * WEIGHT_FRAG2_BIT_WIDTH / \
74
+ 8) // 2048 Bytes #doubleBuffer not takedn into consideration
75
+ #define SMEM_SIZE_A1_TILE \
76
+ (SMEM_SIZE_IN_BYTES_PER_WARP_A1 * 4 * \
77
+ PIPELINE_LEVEL_GMEM) // #WARP=4, #Trible-Buffer for 3-level pipeline for A = 12 KB; double
78
+ // buffer for 2-level pipeline A= 8 KB.
79
+ #define SMEM_SIZE_A2_TILE \
80
+ (SMEM_SIZE_IN_BYTES_PER_WARP_A2 * 4 * \
81
+ PIPELINE_LEVEL_GMEM) // #WARP=4, #Trible-Buffer for 3-level pipeline for A = 24 KB; double
82
+ // buffer for 2-level pipeline A= 16 KB.
83
+ /******************** Global Memory Layout For QUANTIZED DATA ******************/
84
+ #define NUM_INT4_PER_UNIT_2BIT_FRAG (WEIGHT_PER_UNIT * WEIGHT_FRAG1_BIT_WIDTH / 128) // 64
85
+ #define NUM_INT4_PER_UNIT_4BIT_FRAG (WEIGHT_PER_UNIT * WEIGHT_FRAG2_BIT_WIDTH / 128) // 128
86
+ /******************** Register Allocation For QUANTIZED DATA ******************/
87
+ #define WEIGHT_PER_THREAD (WEIGHT_PER_UNIT / WARP_SIZE) // 128
88
+ #define REG_PER_THREAD_2BIT_FRAG (WEIGHT_PER_THREAD / REG_BIT_WIDTH * 2) // 8
89
+ #define REG_PER_THREAD_4BIT_FRAG (WEIGHT_PER_THREAD / REG_BIT_WIDTH * 4) // 16
90
+ /******************** Register Allocation For QUANT Scales ******************/
91
+ #define WARP_REG_QUANT_SCALE 4 // 8 rows per thread -> 8 FP16 scales -> 4 registers
92
+ #define WARP_REG_QUANT_SCALE_DISTRIBUTED \
93
+ 1 // T0-T3, T4-T7, ..., T28-T31 share the same scales, using shfl to get all the scales for
94
+ // each thread
95
+
96
+ #endif // CONFIGS_H
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/kernel_matmul.cuh ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ // This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112
7
+
8
+ #ifndef DEEPSPEED_CUDA_LINEAR_KERNEL_MATMUL_CUH
9
+ #define DEEPSPEED_CUDA_LINEAR_KERNEL_MATMUL_CUH
10
+
11
+ #include "configs.h"
12
+ #include "utils_core.cuh"
13
+ #include "utils_gmem.cuh"
14
+
15
+ /*
16
+ * C = A*B
17
+ * A: row major with ahead-of-time layout transformation, FP6
18
+ * B: col major, FP16
19
+ * C: col major, FP16
20
+ */
21
+ template <typename TilingConfig, typename OutputDataType>
22
+ __global__ void QUANT_GEMM_Kernel(const uint4* Weight1,
23
+ const uint4* Weight2,
24
+ const half* Scales,
25
+ const half* B,
26
+ OutputDataType* C,
27
+ const size_t M_Global,
28
+ const size_t N_Global,
29
+ const size_t K_Global,
30
+ int Split_K)
31
+ {
32
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 && __CUDA_ARCH__ < 900
33
+
34
+ #ifdef DEBUG_MODE
35
+ assert(K_Global % TilingConfig::TILE_K == 0);
36
+ assert(M_Global % TilingConfig::TILE_M == 0);
37
+ assert(gridDim.y == Split_K * (M_Global / TilingConfig::TILE_M));
38
+ #endif
39
+ extern __shared__ __align__(128)
40
+ half smem[]; // Dynamic shared memory for FP16 A tiles, 128 Bytes aligned
41
+ half(*smem_array)[WARP_K + PADDING_SHARED_MEM_FOR_B_8] =
42
+ reinterpret_cast<half(*)[WARP_K + PADDING_SHARED_MEM_FOR_B_8]>(
43
+ smem +
44
+ (SMEM_SIZE_A1_TILE + SMEM_SIZE_A2_TILE) / 2); // Dynamic shared memory for FP16 B tiles
45
+ __shared__ half QuantScales[64 * TilingConfig::BLOCK_WARPS]; // static shared memory for
46
+ // quantization scales, 64 row per
47
+ // warp * 4 warps = 512 Bytes
48
+ // Thread Block Mapping, considering SplitK
49
+ const size_t BatchID = blockIdx.y / (M_Global / TilingConfig::TILE_M);
50
+ const size_t x = blockIdx.x; // Output Block ID: (BlockID_Row = y; BlockID_Col = x )
51
+ const size_t y =
52
+ blockIdx.y %
53
+ (M_Global / TilingConfig::TILE_M); // Output Block ID: (BlockID_Row = y; BlockID_Col = x )
54
+ const size_t Tile_Start_M = y * TilingConfig::TILE_M;
55
+ const size_t Tile_Start_N = x * TilingConfig::TILE_N;
56
+ const size_t NumColumnToCopy = (N_Global - Tile_Start_N) < TilingConfig::TILE_N
57
+ ? (N_Global - Tile_Start_N)
58
+ : TilingConfig::TILE_N;
59
+ const size_t NumBlock_K = K_Global / TilingConfig::TILE_K;
60
+ const size_t AverageNumBlock_K = NumBlock_K / Split_K;
61
+ const size_t ExtraNumBlock_K = NumBlock_K - AverageNumBlock_K * Split_K;
62
+ size_t NumIter = AverageNumBlock_K;
63
+ if (BatchID < ExtraNumBlock_K) NumIter++;
64
+ size_t StartBlockID_K = AverageNumBlock_K * BatchID;
65
+ if (BatchID < ExtraNumBlock_K)
66
+ StartBlockID_K += BatchID;
67
+ else
68
+ StartBlockID_K += ExtraNumBlock_K;
69
+ // Warp ID.
70
+ const int warpId = threadIdx.x / WARP_SIZE;
71
+ int WARP_i =
72
+ warpId / TilingConfig::BLOCK_COL_WARPS; // WARP_i: row number; WARP_j: column number
73
+ // int WARP_j = warpId % TilingConfig::BLOCK_COL_WARPS;
74
+ // Global Memory Address for Matrix A (Weight)
75
+ // ///////////////////////////////////////////////////////////////////////// StartPTR for each
76
+ // ThreadBlock(TB)
77
+ const uint4* TB_StartGPTR_A1 =
78
+ Weight1 + (y * TilingConfig::BLOCK_ROW_WARPS) * NumBlock_K * NUM_INT4_PER_UNIT_2BIT_FRAG;
79
+ const uint4* TB_StartGPTR_A2 =
80
+ Weight2 + (y * TilingConfig::BLOCK_ROW_WARPS) * NumBlock_K * NUM_INT4_PER_UNIT_4BIT_FRAG;
81
+ // StartPTR for each WARP.
82
+ const uint4* WARP_StartGPTR_A1 =
83
+ TB_StartGPTR_A1 + WARP_i * NumBlock_K * NUM_INT4_PER_UNIT_2BIT_FRAG;
84
+ const uint4* WARP_StartGPTR_A2 =
85
+ TB_StartGPTR_A2 + WARP_i * NumBlock_K * NUM_INT4_PER_UNIT_4BIT_FRAG;
86
+ // StartPTR for each WARP, considering SplitK
87
+ const size_t WARP_Start_UnitID_K = StartBlockID_K;
88
+ WARP_StartGPTR_A1 += WARP_Start_UnitID_K * NUM_INT4_PER_UNIT_2BIT_FRAG;
89
+ WARP_StartGPTR_A2 += WARP_Start_UnitID_K * NUM_INT4_PER_UNIT_4BIT_FRAG;
90
+ // Copying A tile from Global to Shared, using double-buffer
91
+ // ////////////////////////////////////////////////////////// StartSPTR for each ThreadBlock
92
+ uint32_t* AFrag_2BIT_SPTR = reinterpret_cast<uint32_t*>(smem);
93
+ uint32_t* AFrag_4BIT_SPTR =
94
+ AFrag_2BIT_SPTR +
95
+ SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 4 * TilingConfig::BLOCK_WARPS *
96
+ PIPELINE_LEVEL_GMEM; // 8 buffers including double buffers, 12 for trible buffers
97
+ // StartSPTR for each WARP
98
+ AFrag_2BIT_SPTR += warpId * SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 4;
99
+ AFrag_4BIT_SPTR += warpId * SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 4;
100
+ // Pre-fetch of A tile
101
+ for (int i = 0; i < PIPELINE_LEVEL_GMEM - 1; i++) {
102
+ CopyFromGlobalToShared_A<SMEM_SIZE_IN_BYTES_PER_WARP_A1>(
103
+ AFrag_2BIT_SPTR + i * SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 4 * 4, WARP_StartGPTR_A1);
104
+ CopyFromGlobalToShared_A<SMEM_SIZE_IN_BYTES_PER_WARP_A2>(
105
+ AFrag_4BIT_SPTR + i * SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 4 * 4, WARP_StartGPTR_A2);
106
+ WARP_StartGPTR_A1 += SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 16;
107
+ WARP_StartGPTR_A2 += SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 16;
108
+ }
109
+ // Global Memory Address for Matrix A (QuantScale)
110
+ // /////////////////////////////////////////////////////////////////////
111
+ const half* TB_StartGPTR_A_Scale = Scales + (y * TilingConfig::BLOCK_ROW_WARPS) * 64;
112
+ const half* WARP_StartGPTR_A_Scales = TB_StartGPTR_A_Scale + WARP_i * 64;
113
+ CopyFromGlobalToShared_Scales(QuantScales + WARP_i * 64, WARP_StartGPTR_A_Scales);
114
+ // Copying B tile from Global to Shared, considering SplitK
115
+ // /////////////////////////////////////////////////////////////
116
+ const half* BTile_GPTR = B + Tile_Start_N * K_Global + StartBlockID_K * TilingConfig::TILE_K;
117
+ for (int i = 0; i < PIPELINE_LEVEL_GMEM - 1; i++) {
118
+ CopyFromGlobalToShared<TilingConfig::TILE_N, TilingConfig::BLOCK_WARPS>(
119
+ smem_array + i * TilingConfig::TILE_N, BTile_GPTR, K_Global, NumColumnToCopy);
120
+ BTile_GPTR += TilingConfig::TILE_K;
121
+ }
122
+ // Register Allocation for A,B, and C, Initilazed to Zeros
123
+ // /////////////////////////////////////////////////////////////////////
124
+ constexpr int NumRegSets_a =
125
+ WARP_ROW_MMA_TENSORS; // 1 set = 4 registers, containing a 16*16 MMA block
126
+ constexpr int NumRegSets_b = (TilingConfig::WARP_COL_MMA_TENSORS == 1)
127
+ ? 1
128
+ : TilingConfig::WARP_COL_MMA_TENSORS /
129
+ 2; // 1 set = 4 registers, containing a 16*16 MMA block
130
+ #ifdef PIPELINE_LEVEL_SMEM
131
+ uint32_t a[NumRegSets_a * PIPELINE_LEVEL_SMEM]
132
+ [4]; // double/Trible buffer is used // Registers to store decompressed FP6
133
+ uint32_t b[NumRegSets_b * PIPELINE_LEVEL_SMEM]
134
+ [4]; // double/Triple buffer is used // Register to store FP16 B matrix (a slice)
135
+ #endif
136
+ float c[NumRegSets_a * NumRegSets_b][REG_PER_THREAD_C_TENSOR_16_16];
137
+ for (int i = 0; i < NumRegSets_a * NumRegSets_b; i++)
138
+ for (int j = 0; j < REG_PER_THREAD_C_TENSOR_16_16; j++) c[i][j] = 0.0f;
139
+ //
140
+ cp_async_wait_all();
141
+ __syncthreads();
142
+
143
+ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
144
+ uint32_t Scales_RPTR[4]; // 4 Registers per thread for Quantization Scales
145
+ ExtractFromSharedToReg_Scales(Scales_RPTR, QuantScales + WARP_i * 64);
146
+ #ifdef PIPELINE_LEVEL_SMEM
147
+ // Initializing the Software Pipeline: writing registers.
148
+ // ////////////////////////////////////////////////////////////////////////////////////////////////
149
+ initialize_mma_slice<TilingConfig>(
150
+ a, b, AFrag_2BIT_SPTR, AFrag_4BIT_SPTR, smem_array, Scales_RPTR);
151
+ #endif
152
+ // The outer loop.
153
+ // /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
154
+ #pragma unroll(1)
155
+ for (size_t tile_id_k = 0; tile_id_k < NumIter; tile_id_k++) {
156
+ // Trible-Buffer for A Tile
157
+ uint32_t* __restrict__ read_SPTR_Frag1 =
158
+ AFrag_2BIT_SPTR + ((tile_id_k + 0) % PIPELINE_LEVEL_GMEM) *
159
+ SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 4 *
160
+ 4; // 1024 (1)*4: 4 WARPs; (2)/4: int*+1 = char*+16
161
+ uint32_t* __restrict__ read_SPTR_Frag2 =
162
+ AFrag_4BIT_SPTR + ((tile_id_k + 0) % PIPELINE_LEVEL_GMEM) *
163
+ SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 4 *
164
+ 4; // 2048 (1)*4: 4 WARPs; (2)/4: int*+1 = char*+16
165
+ #ifdef PIPELINE_LEVEL_SMEM
166
+ uint32_t* __restrict__ read2_SPTR_Frag1 =
167
+ AFrag_2BIT_SPTR +
168
+ ((tile_id_k + 1) % PIPELINE_LEVEL_GMEM) * SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 4 * 4;
169
+ uint32_t* __restrict__ read2_SPTR_Frag2 =
170
+ AFrag_4BIT_SPTR +
171
+ ((tile_id_k + 1) % PIPELINE_LEVEL_GMEM) * SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 4 * 4;
172
+ #endif
173
+ uint32_t* __restrict__ write_SPTR_Frag1 =
174
+ AFrag_2BIT_SPTR + ((tile_id_k + (PIPELINE_LEVEL_GMEM - 1)) % PIPELINE_LEVEL_GMEM) *
175
+ SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 4 *
176
+ 4; // 1024 (1)*4: 4 WARPs; (2)/4: int*+1 = char*+16
177
+ uint32_t* __restrict__ write_SPTR_Frag2 =
178
+ AFrag_4BIT_SPTR + ((tile_id_k + (PIPELINE_LEVEL_GMEM - 1)) % PIPELINE_LEVEL_GMEM) *
179
+ SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 4 *
180
+ 4; // 2048 (1)*4: 4 WARPs; (2)/4: int*+1 = char*+16
181
+ // Trible-Buffer for B Tile
182
+ half __restrict__(*read_SPTR)[WARP_K + PADDING_SHARED_MEM_FOR_B_8] =
183
+ smem_array + ((tile_id_k + 0) % PIPELINE_LEVEL_GMEM) * TilingConfig::TILE_N;
184
+ #ifdef PIPELINE_LEVEL_SMEM
185
+ half __restrict__(*read2_SPTR)[WARP_K + PADDING_SHARED_MEM_FOR_B_8] =
186
+ smem_array + ((tile_id_k + 1) % PIPELINE_LEVEL_GMEM) * TilingConfig::TILE_N;
187
+ #endif
188
+ half __restrict__(*write_SPTR)[WARP_K + PADDING_SHARED_MEM_FOR_B_8] =
189
+ smem_array +
190
+ ((tile_id_k + (PIPELINE_LEVEL_GMEM - 1)) % PIPELINE_LEVEL_GMEM) * TilingConfig::TILE_N;
191
+ //
192
+ bool GlobalCopy = (tile_id_k + PIPELINE_LEVEL_GMEM - 1) < NumIter;
193
+ // Copying A tile from Global to Register, Bypassing L1, using double-buffer
194
+ CopyFromGlobalToShared_A<SMEM_SIZE_IN_BYTES_PER_WARP_A1>(
195
+ write_SPTR_Frag1, WARP_StartGPTR_A1, GlobalCopy);
196
+ CopyFromGlobalToShared_A<SMEM_SIZE_IN_BYTES_PER_WARP_A2>(
197
+ write_SPTR_Frag2, WARP_StartGPTR_A2, GlobalCopy);
198
+ // copying B tile from GlobalMemory to SharedMemory
199
+ CopyFromGlobalToShared<TilingConfig::TILE_N, TilingConfig::BLOCK_WARPS>(
200
+ write_SPTR, BTile_GPTR, K_Global, NumColumnToCopy, GlobalCopy);
201
+ cp_async_group_commit();
202
+ #ifdef PIPELINE_LEVEL_SMEM
203
+ core_mma_slice<TilingConfig>(c,
204
+ a,
205
+ b,
206
+ read_SPTR_Frag1,
207
+ read_SPTR_Frag2,
208
+ read_SPTR,
209
+ Scales_RPTR,
210
+ 1); // read_SPTR_Frag1, read_SPTR_Frag2 are different for each
211
+ // WARP; read_SPTR is shared among WARPs
212
+ core_mma_slice<TilingConfig>(
213
+ c, a, b, read_SPTR_Frag1, read_SPTR_Frag2, read_SPTR, Scales_RPTR, 2);
214
+ core_mma_slice<TilingConfig>(
215
+ c, a, b, read_SPTR_Frag1, read_SPTR_Frag2, read_SPTR, Scales_RPTR, 3);
216
+ // Barriers and Synchronizations
217
+ cp_async_wait_group<PIPELINE_LEVEL_GMEM - 2>();
218
+ __syncthreads();
219
+ core_mma_slice<TilingConfig>(
220
+ c, a, b, read2_SPTR_Frag1, read2_SPTR_Frag2, read2_SPTR, Scales_RPTR, 0);
221
+ // Updating global PTRs
222
+ WARP_StartGPTR_A1 +=
223
+ SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 16; // 4KB/16=256 (1)/16: int4*+1 = char*+16
224
+ WARP_StartGPTR_A2 +=
225
+ SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 16; // 8KB/16=512 (1)/16: int4*+1 = char*+16
226
+ BTile_GPTR += TilingConfig::TILE_K;
227
+ #else
228
+ PipelinedCoreLoop<TilingConfig>(
229
+ c,
230
+ read_SPTR,
231
+ read_SPTR_Frag1,
232
+ read_SPTR_Frag2,
233
+ Scales_RPTR); // read_SPTR_Frag1, read_SPTR_Frag2 are different for each WARP;
234
+ // read_SPTR is shared among WARPs
235
+ // Updating global PTRs
236
+ WARP_StartGPTR_A1 +=
237
+ SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 16; // 4KB/16=256 (1)/16: int4*+1 = char*+16
238
+ WARP_StartGPTR_A2 +=
239
+ SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 16; // 8KB/16=512 (1)/16: int4*+1 = char*+16
240
+ BTile_GPTR += TilingConfig::TILE_K;
241
+ // Barriers and Synchronizations
242
+ cp_async_wait_group<PIPELINE_LEVEL_GMEM - 2>();
243
+ __syncthreads();
244
+ #endif
245
+ }
246
+ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
247
+ /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
248
+ // Store the C fragments to shared memory.
249
+ float(*smem_CFrag)[TilingConfig::TILE_M + PADDING_SHARED_MEM_FOR_C_4] =
250
+ reinterpret_cast<float(*)[TilingConfig::TILE_M + PADDING_SHARED_MEM_FOR_C_4]>(smem);
251
+ StoreToSharedMemoryFromRegister<TilingConfig>(smem_CFrag, c);
252
+ __syncthreads();
253
+ // Now that shared memory contains all the D tiles, stream them to global memory.
254
+ OutputDataType* BlockGlobalPTR =
255
+ C + BatchID * (M_Global * N_Global) + Tile_Start_M + Tile_Start_N * M_Global;
256
+ for (size_t i = warpId; i < NumColumnToCopy; i += TilingConfig::BLOCK_WARPS) // i-th column
257
+ #pragma unroll
258
+ for (size_t j = threadIdx.x % WARP_SIZE; j < TilingConfig::TILE_M;
259
+ j += WARP_SIZE) // j-th row
260
+ {
261
+ if constexpr (std::is_same<OutputDataType, half>::value)
262
+ BlockGlobalPTR[j + i * M_Global] = __float2half_rn(smem_CFrag[i][j]);
263
+ else
264
+ BlockGlobalPTR[j + i * M_Global] = smem_CFrag[i][j];
265
+ }
266
+
267
+ #else
268
+ #warning "The FP6 functions are only available on Ampere GPUs."
269
+ #endif
270
+ }
271
+
272
+ #endif
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/ptx_mma.cuh ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ // This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112
7
+
8
+ #ifndef DEEPSPEED_CUDA_LINEAR_PTX_MMA_CUH
9
+ #define DEEPSPEED_CUDA_LINEAR_PTX_MMA_CUH
10
+
11
+ #include <cuda.h>
12
+ #include <cuda_fp16.h>
13
+ #include <cuda_runtime.h>
14
+
15
+ #include <assert.h>
16
+ #include "configs.h"
17
+
18
+ #ifdef PIPELINE_LEVEL_SMEM
19
+ template <typename TilingConfig>
20
+ __device__ __forceinline__ void B_FromSharedToReg(
21
+ uint32_t __restrict__ Reg[][4],
22
+ half __restrict__ (*read_SPTR)[WARP_K + PADDING_SHARED_MEM_FOR_B_8],
23
+ int slice_id)
24
+ {
25
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
26
+ #ifdef DEBUG_MODE
27
+ static_assert((TilingConfig::WARP_COL_MMA_TENSORS == 1) ||
28
+ (TilingConfig::WARP_COL_MMA_TENSORS % 2 == 0));
29
+ #endif
30
+
31
+ const int warpId = threadIdx.x / WARP_SIZE;
32
+ int lane_id = threadIdx.x % WARP_SIZE;
33
+ int WARP_j = warpId % TilingConfig::BLOCK_COL_WARPS;
34
+ int warp_start_col = TilingConfig::WARP_COL_MMA_TENSORS * MMA_8 *
35
+ WARP_j; // each warp may start from reading warp_start_col'th column of
36
+ // the B tile in shared memory
37
+ #ifdef DEBUG_MODE
38
+ assert(warp_start_col == 0);
39
+ #endif
40
+
41
+ int col = (lane_id % 8) + (lane_id / 16) * 8;
42
+ int row = (lane_id % 16) / 8 * 8;
43
+ uint32_t smem_local_ptr = static_cast<uint32_t>(
44
+ __cvta_generic_to_shared(&read_SPTR[warp_start_col + col][slice_id * MMA_16 + row]));
45
+ if (TilingConfig::WARP_COL_MMA_TENSORS == 1) {
46
+ asm volatile("ldmatrix.sync.aligned.x2.m8n8.shared.b16 {%0, %1}, [%2];\n"
47
+ : "=r"(Reg[0][0]), "=r"(Reg[0][1])
48
+ : "r"(smem_local_ptr));
49
+ } else {
50
+ #pragma unroll
51
+ for (int i = 0; i < TilingConfig::WARP_COL_MMA_TENSORS / 2; i++) {
52
+ asm volatile("ldmatrix.sync.aligned.x4.m8n8.shared.b16 {%0, %1, %2, %3}, [%4];\n"
53
+ : "=r"(Reg[i][0]), "=r"(Reg[i][1]), "=r"(Reg[i][2]), "=r"(Reg[i][3])
54
+ : "r"(smem_local_ptr));
55
+ smem_local_ptr += 16 * (WARP_K + PADDING_SHARED_MEM_FOR_B_8) * sizeof(half);
56
+ }
57
+ }
58
+ #else
59
+ #warning "The matrix load functions are only supported on Ampere and newer architectures"
60
+ #endif
61
+ }
62
+ #else
63
+ // Debug: Whether ldmatrix.trans is required???
64
+ // B is in column-major
65
+ template <typename TilingConfig>
66
+ __device__ __forceinline__ void B_FromSharedToReg(
67
+ uint32_t __restrict__ Reg[][4],
68
+ half __restrict__ (*read_SPTR)[WARP_K + PADDING_SHARED_MEM_FOR_B_8],
69
+ int k_offset)
70
+ {
71
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
72
+ #ifdef DEBUG_MODE
73
+ static_assert((TilingConfig::WARP_COL_MMA_TENSORS == 1) ||
74
+ (TilingConfig::WARP_COL_MMA_TENSORS % 2 == 0));
75
+ #endif
76
+
77
+ const int warpId = threadIdx.x / WARP_SIZE;
78
+ int lane_id = threadIdx.x % WARP_SIZE;
79
+ int WARP_j = warpId % TilingConfig::BLOCK_COL_WARPS;
80
+ int warp_start_col = TilingConfig::WARP_COL_MMA_TENSORS * MMA_8 *
81
+ WARP_j; // each warp may start from reading warp_start_col'th column of
82
+ // the B tile in shared memory
83
+ #ifdef DEBUG_MODE
84
+ assert(warp_start_col == 0);
85
+ #endif
86
+
87
+ int col = (lane_id % 8) + (lane_id / 16) * 8;
88
+ int row = (lane_id % 16) / 8 * 8;
89
+ uint32_t smem_local_ptr = static_cast<uint32_t>(
90
+ __cvta_generic_to_shared(&read_SPTR[warp_start_col + col][k_offset + row]));
91
+ if (TilingConfig::WARP_COL_MMA_TENSORS == 1) {
92
+ asm volatile("ldmatrix.sync.aligned.x2.m8n8.shared.b16 {%0, %1}, [%2];\n"
93
+ : "=r"(Reg[0][0]), "=r"(Reg[0][1])
94
+ : "r"(smem_local_ptr));
95
+ } else {
96
+ #pragma unroll
97
+ for (int i = 0; i < TilingConfig::WARP_COL_MMA_TENSORS / 2; i++) {
98
+ asm volatile("ldmatrix.sync.aligned.x4.m8n8.shared.b16 {%0, %1, %2, %3}, [%4];\n"
99
+ : "=r"(Reg[i][0]), "=r"(Reg[i][1]), "=r"(Reg[i][2]), "=r"(Reg[i][3])
100
+ : "r"(smem_local_ptr));
101
+ smem_local_ptr += 16 * (WARP_K + PADDING_SHARED_MEM_FOR_B_8) * sizeof(half);
102
+ }
103
+ }
104
+ #else
105
+ #warning "The matrix load functions are only supported on Ampere and newer architectures"
106
+ #endif
107
+ }
108
+ #endif
109
+
110
+ __device__ __forceinline__ void MMA_FP16_M16N8K16(uint32_t __restrict__ c[],
111
+ uint32_t __restrict__* a,
112
+ uint32_t __restrict__* b)
113
+ {
114
+ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800
115
+ asm volatile(
116
+ "mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32"
117
+ "{ %0, %1, %2, %3},"
118
+ "{ %4, %5, %6, %7 },"
119
+ "{ %8, %9 },"
120
+ "{ %10, %11, %12, %13 };"
121
+ : "=r"(c[0]), "=r"(c[1]), "=r"(c[2]), "=r"(c[3])
122
+ : "r"(a[0]),
123
+ "r"(a[1]),
124
+ "r"(a[2]),
125
+ "r"(a[3]),
126
+ "r"(b[0]),
127
+ "r"(b[1]),
128
+ "r"(c[0]),
129
+ "r"(c[1]),
130
+ "r"(c[2]),
131
+ "r"(c[3]));
132
+ #else
133
+ #warning "The mma functions are only implemented for Ampere and newer architectures"
134
+ #endif
135
+ }
136
+
137
+ #endif
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/utils_core.cuh ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ // This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112
7
+
8
+ #ifndef DEEPSPEED_CUDA_LINEAR_UTILS_CORE_CUH
9
+ #define DEEPSPEED_CUDA_LINEAR_UTILS_CORE_CUH
10
+
11
+ #include <assert.h>
12
+
13
+ #include "configs.h"
14
+ #include "ptx_mma.cuh"
15
+ #include "utils_paralleldequant.cuh"
16
+
17
+ #ifdef PIPELINE_LEVEL_SMEM
18
+ template <int NUM_INT_PER_THREAD>
19
+ __device__ __forceinline__ void CopyFromSharedToRegister_AFrag(uint32_t Reg[],
20
+ uint32_t* SPTR,
21
+ int slice_id)
22
+ {
23
+ SPTR += slice_id * (NUM_INT_PER_THREAD * WARP_SIZE);
24
+ int lane_id = threadIdx.x % WARP_SIZE;
25
+ #pragma unroll
26
+ for (int i = 0; i < NUM_INT_PER_THREAD; i++) { Reg[i] = SPTR[lane_id + i * WARP_SIZE]; }
27
+ }
28
+
29
+ template <typename TilingConfig>
30
+ __device__ __forceinline__ void initialize_mma_slice(
31
+ uint32_t (*a)[4],
32
+ uint32_t (*b)[4],
33
+ uint32_t* __restrict__ A1_SPTR_read,
34
+ uint32_t* __restrict__ A2_SPTR_read,
35
+ half __restrict__ (*B_SPTR_read)[WARP_K + PADDING_SHARED_MEM_FOR_B_8],
36
+ uint32_t* RPTR_Scales)
37
+ {
38
+ // Writing registers
39
+ // Registers to store FP6 fragments for a slice (64*16) of A matrix => 32 FP6 per thread => 6
40
+ // register per thread;
41
+ uint32_t a_1[2]; // NO double buffer
42
+ uint32_t a_2[4]; // NO double buffer
43
+ CopyFromSharedToRegister_AFrag<2>(a_1, A1_SPTR_read, 0);
44
+ CopyFromSharedToRegister_AFrag<4>(a_2, A2_SPTR_read, 0);
45
+ Dequant_32FP6_4Way(a, a_1, a_2, RPTR_Scales); // SIMT Dequant: dequantizing FP6 to FP16 at
46
+ // register level, dequantizing a slice each time
47
+ B_FromSharedToReg<TilingConfig>(b, B_SPTR_read, 0); // Loading B from shared to registers
48
+ }
49
+
50
+ template <typename TilingConfig>
51
+ __device__ __forceinline__ void core_mma_slice(
52
+ float c[][REG_PER_THREAD_C_TENSOR_16_16],
53
+ uint32_t (*a)[4],
54
+ uint32_t (*b)[4],
55
+ uint32_t* __restrict__ A1_SPTR_read,
56
+ uint32_t* __restrict__ A2_SPTR_read,
57
+ half __restrict__ (*B_SPTR_read)[WARP_K + PADDING_SHARED_MEM_FOR_B_8],
58
+ uint32_t* RPTR_Scales,
59
+ int slice_id) // writing slice[slice_id] to registers, k=0 -> slice_id=1 for prefetching
60
+ {
61
+ #ifdef DEBUG_MODE
62
+ assert(
63
+ (TilingConfig::WARP_COL_MMA_TENSORS == 1) ||
64
+ (TilingConfig::WARP_COL_MMA_TENSORS % 2 ==
65
+ 0)); // if WARP_COL_MMA_TENSORS == 1, B tile in registers is padded to a 16*16 MMA block
66
+ #endif
67
+ const int NumRegSets_a =
68
+ WARP_ROW_MMA_TENSORS; // 1 set = 4 registers, containing a 16*16 MMA block
69
+ const int NumRegSets_b = (TilingConfig::WARP_COL_MMA_TENSORS == 1)
70
+ ? 1
71
+ : TilingConfig::WARP_COL_MMA_TENSORS /
72
+ 2; // 1 set = 4 registers, containing a 16*16 MMA block
73
+ uint32_t(*c_uint_ptr)[REG_PER_THREAD_C_TENSOR_16_16] =
74
+ reinterpret_cast<uint32_t(*)[REG_PER_THREAD_C_TENSOR_16_16]>(
75
+ c); // Registers for accumulated FP32 results
76
+
77
+ // Setting RPTRs for double buffers
78
+ uint32_t(*a_read)[4] = a;
79
+ uint32_t(*a_write)[4] = a;
80
+ uint32_t(*b_read)[4] = b;
81
+ uint32_t(*b_write)[4] = b;
82
+ if (slice_id % 2 == 1) {
83
+ b_write += NumRegSets_b;
84
+ a_write += NumRegSets_a;
85
+ } else {
86
+ b_read += NumRegSets_b;
87
+ a_read += NumRegSets_a;
88
+ }
89
+
90
+ // Reading registers and issuing core tensor core computations (a slice of A and B tile in shared
91
+ // memory)
92
+ #pragma unroll
93
+ for (int i = 0; i < WARP_ROW_MMA_TENSORS; i++) {
94
+ if (TilingConfig::WARP_COL_MMA_TENSORS == 1) {
95
+ MMA_FP16_M16N8K16(c_uint_ptr[i], a_read[i], b_read[0]);
96
+ } else {
97
+ #pragma unroll
98
+ for (int j = 0; j < TilingConfig::WARP_COL_MMA_TENSORS / 2; j++) {
99
+ MMA_FP16_M16N8K16(c_uint_ptr[i + j * WARP_ROW_MMA_TENSORS], a_read[i], b_read[j]);
100
+ MMA_FP16_M16N8K16(c_uint_ptr[i + j * WARP_ROW_MMA_TENSORS] + 4,
101
+ a_read[i],
102
+ b_read[j] + 2); // c+4; b+2
103
+ }
104
+ }
105
+ }
106
+
107
+ // Writing registers
108
+ // Registers to store FP6 fragments for a slice (64*16) of A matrix => 32 FP6 per thread => 6
109
+ // register per thread;
110
+ uint32_t a_1[2]; // NO double buffer
111
+ uint32_t a_2[4]; // NO double buffer
112
+ CopyFromSharedToRegister_AFrag<2>(a_1, A1_SPTR_read, slice_id);
113
+ CopyFromSharedToRegister_AFrag<4>(a_2, A2_SPTR_read, slice_id);
114
+ Dequant_32FP6_4Way(
115
+ a_write, a_1, a_2, RPTR_Scales); // SIMT Dequant: dequantizing FP6 to FP16 at register
116
+ // level, dequantizing a slice each time
117
+ B_FromSharedToReg<TilingConfig>(
118
+ b_write, B_SPTR_read, slice_id); // Loading B from shared to registers
119
+ }
120
+
121
+ #else
122
+ // Old version with naive pipeline design
123
+ template <int NUM_INT_PER_THREAD>
124
+ __device__ __forceinline__ void CopyFromSharedToRegister_AFrag(uint32_t Reg[], uint32_t* SPTR)
125
+ {
126
+ int lane_id = threadIdx.x % WARP_SIZE;
127
+ #pragma unroll
128
+ for (int i = 0; i < NUM_INT_PER_THREAD; i++) { Reg[i] = SPTR[lane_id + i * WARP_SIZE]; }
129
+ }
130
+ template <typename TilingConfig>
131
+ __device__ __forceinline__ void PipelinedCoreLoop(
132
+ float c[][REG_PER_THREAD_C_TENSOR_16_16],
133
+ half __restrict__ (*read_SPTR)[WARP_K + PADDING_SHARED_MEM_FOR_B_8],
134
+ uint32_t* __restrict__ read_SPTR_Frag1,
135
+ uint32_t* __restrict__ read_SPTR_Frag2,
136
+ uint32_t* RPTR_Scales)
137
+ {
138
+ #ifdef DEBUG_MODE
139
+ assert(
140
+ (TilingConfig::WARP_COL_MMA_TENSORS == 1) ||
141
+ (TilingConfig::WARP_COL_MMA_TENSORS % 2 ==
142
+ 0)); // if WARP_COL_MMA_TENSORS == 1, B tile in registers is padded to a 16*16 MMA block
143
+ #endif
144
+ const int NumRegSets_a =
145
+ WARP_ROW_MMA_TENSORS; // 1 set = 4 registers, containing a 16*16 MMA block
146
+ const int NumRegSets_b = (TilingConfig::WARP_COL_MMA_TENSORS == 1)
147
+ ? 1
148
+ : TilingConfig::WARP_COL_MMA_TENSORS /
149
+ 2; // 1 set = 4 registers, containing a 16*16 MMA block
150
+
151
+ // Registers to store FP32 results
152
+ uint32_t(*c_uint_ptr)[REG_PER_THREAD_C_TENSOR_16_16] =
153
+ reinterpret_cast<uint32_t(*)[REG_PER_THREAD_C_TENSOR_16_16]>(c);
154
+ // Registers to store FP6 fragments for a slice (64*16) of A matrix => 32 FP6 per thread => 6
155
+ // register per thread;
156
+ uint32_t a_1[2 * 2]; // double buffer is used
157
+ uint32_t a_2[4 * 2]; // double buffer is used
158
+ // Registers to store decompressed FP6
159
+ uint32_t a[NumRegSets_a * 1][4]; // No double buffer
160
+ // Register to store FP16 B matrix (a slice)
161
+ uint32_t b[NumRegSets_b * 2][4]; // double buffer is used
162
+
163
+ // Overlapped Smem and TC pipeline: pre-loading from shared to registers
164
+ CopyFromSharedToRegister_AFrag<2>(a_1, read_SPTR_Frag1);
165
+ CopyFromSharedToRegister_AFrag<4>(a_2, read_SPTR_Frag2);
166
+ B_FromSharedToReg<TilingConfig>(b, read_SPTR, 0);
167
+
168
+ #pragma unroll
169
+ for (int k = 0; k < WARP_K_MMA_TENSORS; k++) {
170
+ uint32_t(*b_read)[4] = b;
171
+ uint32_t(*b_write)[4] = b;
172
+ uint32_t* a_1_read = a_1;
173
+ uint32_t* a_1_write = a_1;
174
+ uint32_t* a_2_read = a_2;
175
+ uint32_t* a_2_write = a_2;
176
+ if (k % 2 == 0) {
177
+ b_write += NumRegSets_b;
178
+ a_1_write += 2;
179
+ a_2_write += 4;
180
+ } else {
181
+ b_read += NumRegSets_b;
182
+ a_1_read += 2;
183
+ a_2_read += 4;
184
+ }
185
+ // data loading
186
+ if (k + 1 < WARP_K_MMA_TENSORS) {
187
+ // updating SPTR for fragment1 and fragment2
188
+ read_SPTR_Frag1 += 2 * WARP_SIZE;
189
+ read_SPTR_Frag2 += 4 * WARP_SIZE;
190
+ CopyFromSharedToRegister_AFrag<2>(a_1_write, read_SPTR_Frag1);
191
+ CopyFromSharedToRegister_AFrag<4>(a_2_write, read_SPTR_Frag2);
192
+ B_FromSharedToReg<TilingConfig>(b_write, read_SPTR, (k + 1) * MMA_16);
193
+ }
194
+ // SIMT Dequant + Tensor Core computations
195
+ Dequant_32FP6_4Way(
196
+ a, a_1_read, a_2_read, RPTR_Scales); // Dequantizing FP6 to FP16 at register level,
197
+ // dequantizing a slice each time
198
+ #pragma unroll
199
+ for (int i = 0; i < WARP_ROW_MMA_TENSORS; i++) {
200
+ if (TilingConfig::WARP_COL_MMA_TENSORS == 1)
201
+ MMA_FP16_M16N8K16(c_uint_ptr[i], a[i], b_read[0]);
202
+ else {
203
+ #pragma unroll
204
+ for (int j = 0; j < TilingConfig::WARP_COL_MMA_TENSORS / 2; j++) {
205
+ MMA_FP16_M16N8K16(c_uint_ptr[i + j * WARP_ROW_MMA_TENSORS], a[i], b_read[j]);
206
+ MMA_FP16_M16N8K16(c_uint_ptr[i + j * WARP_ROW_MMA_TENSORS] + 4,
207
+ a[i],
208
+ b_read[j] + 2); // c+4; b+2
209
+ }
210
+ }
211
+ }
212
+ }
213
+ }
214
+ #endif // #ifdef PIPELINE_LEVEL_SMEM
215
+
216
+ template <typename TilingConfig>
217
+ __device__ __forceinline__ void StoreToSharedMemoryFromRegister(
218
+ float (*smem_CFrag)[TilingConfig::TILE_M + PADDING_SHARED_MEM_FOR_C_4],
219
+ float c[][REG_PER_THREAD_C_TENSOR_16_16])
220
+ {
221
+ const int lane_id = threadIdx.x % WARP_SIZE;
222
+ const int warpId = threadIdx.x / WARP_SIZE;
223
+ int warp_row_offset = warpId * (MMA_16 * WARP_ROW_MMA_TENSORS);
224
+ #pragma unroll
225
+ for (int i = 0; i < WARP_ROW_MMA_TENSORS; i++) {
226
+ #pragma unroll
227
+ for (int j = 0; j < TilingConfig::WARP_COL_MMA_TENSORS;
228
+ j++) { // Dealing with one 16*8 Tensor
229
+ int RegSetID = i + (j / 2) * WARP_ROW_MMA_TENSORS;
230
+ int RegOffset = (j % 2) * (REG_PER_THREAD_C_TENSOR_16_16 / 2);
231
+ int Tensor_row_offset = warp_row_offset + i * MMA_16;
232
+ int Tensor_col_offset = j * MMA_8;
233
+ #pragma unroll
234
+ for (int r = 0; r < REG_PER_THREAD_C_TENSOR_16_16 / 2; r++) {
235
+ int row_offset = lane_id / 4;
236
+ if (r >= 2) row_offset += 8;
237
+ int col_offset = (lane_id % 4) * 2;
238
+ if (r % 2 == 1) col_offset += 1;
239
+ smem_CFrag[Tensor_col_offset + col_offset][Tensor_row_offset + row_offset] =
240
+ c[RegSetID][r + RegOffset];
241
+ }
242
+ }
243
+ }
244
+ }
245
+
246
+ #endif
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/utils_gmem.cuh ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ // This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112
7
+
8
+ #ifndef DEEPSPEED_CUDA_LINEAR_UTILS_GMEM_CUH
9
+ #define DEEPSPEED_CUDA_LINEAR_UTILS_GMEM_CUH
10
+
11
+ #include <assert.h>
12
+ #include "configs.h"
13
+ #include "ptx_cp.async.cuh"
14
+
15
+ /*
16
+ * Copying A1/A2 from global memory to shared memory.
17
+ * Usually 1024 or 2048 Bytes
18
+ */
19
+ template <int SMEM_SIZE_IN_BYTES_PER_WARP>
20
+ __device__ __forceinline__ void CopyFromGlobalToShared_A(uint32_t* SPTR,
21
+ const uint4* GPTR,
22
+ bool pred_guard = true)
23
+ {
24
+ #ifdef DEBUG_MODE
25
+ static_assert(SMEM_SIZE_IN_BYTES_PER_WARP / WARP_SIZE % 16 == 0);
26
+ #endif
27
+ int lane_id = threadIdx.x % WARP_SIZE;
28
+ half* SPTR_HALF = reinterpret_cast<half*>(SPTR);
29
+ const half* GPTR_HALF = reinterpret_cast<const half*>(GPTR);
30
+ SPTR_HALF += lane_id * 8;
31
+ GPTR_HALF += lane_id * 8;
32
+ #pragma unroll
33
+ for (int i = 0; i < SMEM_SIZE_IN_BYTES_PER_WARP / WARP_SIZE / 16; i++) {
34
+ cp_async<16>(SPTR_HALF, GPTR_HALF, pred_guard);
35
+ SPTR_HALF += 256; // Forward 512 Bytes
36
+ GPTR_HALF += 256; // Forward 512 Bytes
37
+ }
38
+ }
39
+
40
+ /*
41
+ * Copying 64 Quant Scales (FP16) from global memory to shared memory.
42
+ */
43
+ __device__ __forceinline__ void CopyFromGlobalToShared_Scales(half* SPTR_QuantScales,
44
+ const half* GPTR_A_Scales)
45
+ {
46
+ int lane_id = threadIdx.x % WARP_SIZE;
47
+ int Offset_Shared = lane_id * 2;
48
+ int Offset_Global = lane_id / 4 + (lane_id % 4) * 16;
49
+ for (int i = 0; i < 2; i++)
50
+ SPTR_QuantScales[Offset_Shared + i] = GPTR_A_Scales[Offset_Global + i * 8];
51
+ }
52
+
53
+ /*
54
+ * (1) Copying X rows * 64 columns of FP16 values, originally in row major
55
+ * (2) Copying 64 rows * X columns of FP16 values, originally in column major
56
+ * 16 Bytes per thread -> 512 Bytes per WARP = 4 line per WARP = 1 line per 8 Threads
57
+ */
58
+ template <int MaxNumOfLinesToCopy, int BLOCK_WARPS>
59
+ __device__ __forceinline__ void CopyFromGlobalToShared(
60
+ half __restrict__ (*SharedPTR)[WARP_K + PADDING_SHARED_MEM_FOR_B_8],
61
+ const half* GlobalPTR,
62
+ const int GlobalStride,
63
+ const int NumOfLinesLeft, // To support arbitrary N dimensions.
64
+ bool Pred = true)
65
+ {
66
+ // static parameters: 1 Group (8 Threads) can copy 1 line (64 FP16) each time
67
+ const int NumOfThreads = BLOCK_WARPS * WARP_SIZE;
68
+ const int NumOfGroups = NumOfThreads / 8;
69
+ const int MaxIteration = (MaxNumOfLinesToCopy - 1) / NumOfGroups + 1;
70
+ // runtime variables
71
+ const int line_id = threadIdx.x / 8;
72
+ const int line_offset = (threadIdx.x % 8) * 8;
73
+ // PTR for source global memory and target shared memory
74
+ GlobalPTR += line_id * GlobalStride + line_offset;
75
+ SharedPTR += line_id;
76
+ #pragma unroll
77
+ for (int i = 0; i < MaxIteration; i++) {
78
+ bool AsyncCopyPred = (line_id + i * NumOfGroups) < NumOfLinesLeft && Pred;
79
+ cp_async<16>(&(*SharedPTR)[line_offset], GlobalPTR, AsyncCopyPred);
80
+ //
81
+ GlobalPTR += NumOfGroups * GlobalStride;
82
+ SharedPTR += NumOfGroups;
83
+ }
84
+ }
85
+
86
+ #endif
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/utils_paralleldequant.cuh ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ // This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112
7
+
8
+ #ifndef DEEPSPEED_CUDA_LINEAR_UTILS_PARALLELDEQUANT_CUH
9
+ #define DEEPSPEED_CUDA_LINEAR_UTILS_PARALLELDEQUANT_CUH
10
+
11
+ #include <cuda.h>
12
+ #include <cuda_fp16.h>
13
+ #include <cuda_runtime.h>
14
+
15
+ /*
16
+ * Input: R1
17
+ * Outputs: R1, R2
18
+ * Note: Simplified Exponent calculation is applied.
19
+ */
20
+ __device__ __forceinline__ void FP6_FP16_Cast_4Way(u_int32_t* R1, u_int32_t* R2)
21
+ {
22
+ *R2 = *R1 & 0x80808080;
23
+ *R1 = *R1 >> 2;
24
+ *R1 = *R1 & 0x1f1f1f1f;
25
+ *R2 = *R2 | *R1;
26
+ *R1 = *R2 & 0x9f009f00;
27
+ *R2 = *R2 & 0x009f009f;
28
+ *R2 = *R2 << 8;
29
+ }
30
+
31
+ /*
32
+ * Input: R1
33
+ * Outputs: R1, R2
34
+ * Note: Simplified Exponent calculation is NOT applied.
35
+ */
36
+ __device__ __forceinline__ void FP6_FP16_Cast_4Way_Naive(u_int32_t* R1, u_int32_t* R2)
37
+ {
38
+ //*R2 = *R1 & 0x80808080;
39
+ *R2 = *R1 & 0xc0c0c0c0;
40
+ *R1 = *R1 >> 2;
41
+ //*R1 = *R1 & 0x1f1f1f1f;
42
+ *R1 = *R1 & 0x0f0f0f0f;
43
+ *R2 = *R2 | *R1;
44
+ //
45
+ //*R1 = *R2 & 0x9f009f00;
46
+ //*R2 = *R2 & 0x009f009f;
47
+ *R1 = *R2 & 0xcf00cf00;
48
+ if (!(*R1 & 0x40000000) && (*R1 & 0x0c000000)) *R1 = *R1 | 0x30000000;
49
+ if (!(*R1 & 0x00004000) && (*R1 & 0x00000c00)) *R1 = *R1 | 0x00003000;
50
+ *R2 = *R2 & 0x00cf00cf;
51
+ if (!(*R2 & 0x00400000) && (*R2 & 0x000c0000)) *R2 = *R2 | 0x00300000;
52
+ if (!(*R2 & 0x00000040) && (*R2 & 0x0000000c)) *R2 = *R2 | 0x00000030;
53
+ //
54
+ *R2 = *R2 << 8;
55
+ //*R1 = 0x3c003c00;
56
+ //*R2 = 0x3c003c00;
57
+ }
58
+
59
+ __device__ __forceinline__ u_int32_t MultScale(u_int32_t PackedFP16Pair, half Scale)
60
+ {
61
+ half* FP16_1 = reinterpret_cast<half*>(&PackedFP16Pair);
62
+ half* FP16_2 = FP16_1 + 1;
63
+ uint32_t output;
64
+ half* output_half_ptr = reinterpret_cast<half*>(&output);
65
+ output_half_ptr[0] = __hmul(__hmul(*FP16_1, __float2half(4096.0f)), Scale);
66
+ output_half_ptr[1] = __hmul(__hmul(*FP16_2, __float2half(4096.0f)), Scale);
67
+ return output;
68
+ }
69
+
70
+ __device__ __forceinline__ void Dequant_32FP6_4Way(u_int32_t __restrict__ Reg[][4],
71
+ u_int32_t __restrict__* read_RPTR_Frag1,
72
+ u_int32_t __restrict__* read_RPTR_Frag2,
73
+ u_int32_t* Scales)
74
+ {
75
+ u_int32_t* OutputRegs = reinterpret_cast<u_int32_t*>(Reg);
76
+ u_int32_t* Frag1_PTR = read_RPTR_Frag1;
77
+ u_int32_t* Frag2_PTR = read_RPTR_Frag2;
78
+ half* Scale_RPTR = reinterpret_cast<half*>(Scales);
79
+ u_int32_t Packed_FP6 = 0;
80
+ u_int32_t tmp = 0;
81
+ // Dequantizing 32 FP6, each Loop dequantizing 4 FP6
82
+ #pragma unroll(8)
83
+ for (int i = 0; i < 8; i++) {
84
+ // Frag1
85
+ Packed_FP6 = (*Frag1_PTR) & 0xc0c0c0c0;
86
+ if (i % 4 == 3)
87
+ Frag1_PTR++;
88
+ else
89
+ (*Frag1_PTR) = (*Frag1_PTR) << 2;
90
+ // Frag2
91
+ tmp = (*Frag2_PTR) & 0xf0f0f0f0;
92
+ tmp = tmp >> 2;
93
+ if (i % 2 == 1)
94
+ Frag2_PTR++;
95
+ else
96
+ (*Frag2_PTR) = (*Frag2_PTR) << 4;
97
+ // Packed_FP6
98
+ Packed_FP6 = Packed_FP6 | tmp;
99
+ //
100
+ FP6_FP16_Cast_4Way(&Packed_FP6, &tmp);
101
+ //
102
+ *OutputRegs = MultScale(Packed_FP6, Scale_RPTR[0]); // Muliply FP16 scales
103
+ OutputRegs += 1;
104
+ *OutputRegs = MultScale(tmp, Scale_RPTR[1]); // Muliply FP16 scales
105
+ OutputRegs += 1;
106
+ // Updating offset for FP16 scales for every two iterations
107
+ if (i % 2 == 1) Scale_RPTR += 2;
108
+ }
109
+ }
110
+
111
+ /*
112
+ *
113
+ */
114
+ __device__ __forceinline__ void ExtractFromSharedToReg_Scales(uint32_t* Scales,
115
+ half* WARP_SPTR_Scales)
116
+ {
117
+ int lane_id = threadIdx.x % WARP_SIZE;
118
+ uint32_t* SPTR_uint = reinterpret_cast<uint32_t*>(WARP_SPTR_Scales);
119
+ uint32_t tmpReg = SPTR_uint[lane_id];
120
+ #pragma unroll
121
+ for (int i = 0; i < 4; i++) {
122
+ // T __shfl_sync(unsigned mask, T var, int srcLane, int width=warpSize);
123
+ Scales[i] = __shfl_sync(0xffffffff, tmpReg, i, 4);
124
+ }
125
+ }
126
+
127
+ #endif
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/weight_prepacking.h ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ // This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112
7
+
8
+ #ifndef DEEPSPEED_CUDA_LINEAR_WEIGHT_PREPACKING_H
9
+ #define DEEPSPEED_CUDA_LINEAR_WEIGHT_PREPACKING_H
10
+
11
+ #include <assert.h>
12
+ #include <stdio.h>
13
+ #include <vector>
14
+
15
+ using namespace std;
16
+
17
+ void Padding_8_FP6_To_8_Bytes(unsigned char Padded_FP6[],
18
+ unsigned char* FP6_Array) // padding 0 to the lowerest bit location
19
+ {
20
+ Padded_FP6[0] = FP6_Array[0] & 0xfc;
21
+ Padded_FP6[1] = (FP6_Array[0] << 6) | ((FP6_Array[1] >> 2) & 0xfc);
22
+ Padded_FP6[2] = (FP6_Array[1] << 4) | ((FP6_Array[2] >> 4) & 0xfc);
23
+ Padded_FP6[3] = FP6_Array[2] << 2;
24
+ Padded_FP6[4] = FP6_Array[3] & 0xfc;
25
+ Padded_FP6[5] = (FP6_Array[3] << 6) | ((FP6_Array[4] >> 2) & 0xfc);
26
+ Padded_FP6[6] = (FP6_Array[4] << 4) | ((FP6_Array[5] >> 4) & 0xfc);
27
+ Padded_FP6[7] = FP6_Array[5] << 2;
28
+ }
29
+
30
+ unsigned char Extract_2_Bits_From_4_PaddedFP6(unsigned char B1,
31
+ unsigned char B2,
32
+ unsigned char B3,
33
+ unsigned char B4)
34
+ {
35
+ unsigned char out;
36
+ out = (B1 & 0xc0) | ((B2 & 0xc0) >> 2) | ((B3 & 0xc0) >> 4) | ((B4 & 0xc0) >> 6);
37
+ return out;
38
+ }
39
+
40
+ unsigned char Extract_4_Bits_From_2_PaddedFP6(
41
+ unsigned char B1,
42
+ unsigned char
43
+ B2) // The highest two bits are already extracted by Extract_2_Bits_From_4_PaddedFP6();
44
+ {
45
+ unsigned char out;
46
+ out = ((B1 << 2) & 0xf0) | ((B2 >> 2) & 0x0f);
47
+ return out;
48
+ }
49
+
50
+ // dealing with 4 1*8 blocks of FP6
51
+ void Assign_32_FP6_To_4_Thread(vector<unsigned char> Seg_2bit[],
52
+ vector<unsigned char> Seg_4bit[],
53
+ unsigned char* PTR_1,
54
+ unsigned char* PTR_2,
55
+ unsigned char* PTR_3,
56
+ unsigned char* PTR_4)
57
+ {
58
+ unsigned char Padded_8_FP8[4][8];
59
+ Padding_8_FP6_To_8_Bytes(Padded_8_FP8[0], PTR_1);
60
+ Padding_8_FP6_To_8_Bytes(Padded_8_FP8[1], PTR_2);
61
+ Padding_8_FP6_To_8_Bytes(Padded_8_FP8[2], PTR_3);
62
+ Padding_8_FP6_To_8_Bytes(Padded_8_FP8[3], PTR_4);
63
+ //
64
+ unsigned char Seg1_Byte1_T[4];
65
+ unsigned char Seg1_Byte2_T[4];
66
+ unsigned char Seg2_Byte1_T[4];
67
+ unsigned char Seg2_Byte2_T[4];
68
+ unsigned char Seg2_Byte3_T[4];
69
+ unsigned char Seg2_Byte4_T[4];
70
+ for (int t = 0; t < 4; t++) {
71
+ Seg1_Byte1_T[t] = Extract_2_Bits_From_4_PaddedFP6(Padded_8_FP8[0][0 + t * 2],
72
+ Padded_8_FP8[0][1 + t * 2],
73
+ Padded_8_FP8[1][0 + t * 2],
74
+ Padded_8_FP8[1][1 + t * 2]);
75
+ Seg1_Byte2_T[t] = Extract_2_Bits_From_4_PaddedFP6(Padded_8_FP8[2][0 + t * 2],
76
+ Padded_8_FP8[2][1 + t * 2],
77
+ Padded_8_FP8[3][0 + t * 2],
78
+ Padded_8_FP8[3][1 + t * 2]);
79
+ Seg2_Byte1_T[t] =
80
+ Extract_4_Bits_From_2_PaddedFP6(Padded_8_FP8[0][0 + t * 2], Padded_8_FP8[0][1 + t * 2]);
81
+ Seg2_Byte2_T[t] =
82
+ Extract_4_Bits_From_2_PaddedFP6(Padded_8_FP8[1][0 + t * 2], Padded_8_FP8[1][1 + t * 2]);
83
+ Seg2_Byte3_T[t] =
84
+ Extract_4_Bits_From_2_PaddedFP6(Padded_8_FP8[2][0 + t * 2], Padded_8_FP8[2][1 + t * 2]);
85
+ Seg2_Byte4_T[t] =
86
+ Extract_4_Bits_From_2_PaddedFP6(Padded_8_FP8[3][0 + t * 2], Padded_8_FP8[3][1 + t * 2]);
87
+ }
88
+ //
89
+ for (int t = 0; t < 4; t++) {
90
+ Seg_2bit[t].push_back(Seg1_Byte1_T[t]);
91
+ Seg_2bit[t].push_back(Seg1_Byte2_T[t]);
92
+ Seg_4bit[t].push_back(Seg2_Byte1_T[t]);
93
+ Seg_4bit[t].push_back(Seg2_Byte2_T[t]);
94
+ Seg_4bit[t].push_back(Seg2_Byte3_T[t]);
95
+ Seg_4bit[t].push_back(Seg2_Byte4_T[t]);
96
+ }
97
+ return;
98
+ }
99
+
100
+ void BitInterleaving_2bit(unsigned char* PTR_4Bytes)
101
+ {
102
+ unsigned int* PTR_UINT = reinterpret_cast<unsigned int*>(PTR_4Bytes);
103
+ unsigned int input = *PTR_UINT;
104
+ //
105
+ // int order_2bit[16] = {1,5,9,13,3,7,11,15,2,6,10,14,4,8,12,16}; // pre-defined order for
106
+ // bit-interleaving in QuantLLM
107
+ int order_2bit[16] = {
108
+ 2, 6, 10, 14, 4, 8, 12, 16, 1, 5, 9, 13, 3, 7, 11, 15}; // pre-defined order for
109
+ // bit-interleaving in QuantLLM
110
+ unsigned int Frags_2bit[16]; // The highest 2 bits are used to store the extracted fragments.
111
+ for (int i = 0; i < 16; i++) Frags_2bit[i] = (input << 2 * (order_2bit[i] - 1)) & 0xc0000000;
112
+ //
113
+ unsigned int output = 0x00000000;
114
+ for (int i = 0; i < 16; i++) output |= (Frags_2bit[i] >> (i * 2));
115
+ //
116
+ *PTR_UINT = output;
117
+ }
118
+
119
+ void BitInterleaving_4bit(unsigned char* PTR_4Bytes)
120
+ {
121
+ unsigned int* PTR_UINT = reinterpret_cast<unsigned int*>(PTR_4Bytes);
122
+ unsigned int input = *PTR_UINT;
123
+ //
124
+ // int order_4bit[8] = {1,5,3,7,2,6,4,8}; // pre-defined order for bit-interleaving in QuantLLM
125
+ int order_4bit[8] = {
126
+ 2, 6, 4, 8, 1, 5, 3, 7}; // pre-defined order for bit-interleaving in QuantLLM
127
+ unsigned int Frags_4bit[8]; // The highest4 bits are used to store the extracted fragments.
128
+ for (int i = 0; i < 8; i++) Frags_4bit[i] = (input << 4 * (order_4bit[i] - 1)) & 0xf0000000;
129
+ //
130
+ unsigned int output = 0x00000000;
131
+ for (int i = 0; i < 8; i++) output |= (Frags_4bit[i] >> (i * 4));
132
+ //
133
+ *PTR_UINT = output;
134
+ }
135
+
136
+ /*
137
+ * Inputs:
138
+ * (1) unsigned char Weight_6bit [M*K*6/8]
139
+ * Outputs:
140
+ * (1) unsigned char Weight_2bit [M*K*2/8]
141
+ * (2) unsigned char Weight_4bit [M*K*4/8]
142
+ *
143
+ * Assumption: Weight_6bit, Weight_2bit, Weight_4bit all stored continuously in row-major.
144
+ * 8 FP6 = 6 Bytes
145
+ * 8 FP4 = 4 Bytes
146
+ * 8 FP2 = 2 Bytes
147
+ */
148
+ void weight_matrix_prepacking(int* FP6Weights, size_t M, size_t K)
149
+ {
150
+ assert(M % 64 == 0);
151
+ assert(K % 64 == 0);
152
+ //
153
+ unsigned char* Weight_6bit = reinterpret_cast<unsigned char*>(FP6Weights);
154
+ unsigned char* Weight_2bit = Weight_6bit;
155
+ unsigned char* Weight_4bit = Weight_6bit + M * K * 2 / 8;
156
+ //
157
+ vector<unsigned char> A_Segment_2bit[32];
158
+ vector<unsigned char> A_Segment_4bit[32];
159
+ //
160
+ size_t BytesPerRow = K * 6 / 8;
161
+ // Pass-1: (1) 2+4 split; (2) assign weights to 32 threads.
162
+ for (size_t i = 0; i < M / 64; i++) //
163
+ {
164
+ for (size_t j = 0; j < K / 16; j++) {
165
+ for (size_t k = 0; k < 64 / 16; k++) {
166
+ size_t row = i * 64 + k * 16;
167
+ size_t col = j * 16;
168
+ unsigned char* StartPTR_1 = Weight_6bit + row * BytesPerRow + col * 6 / 8;
169
+ unsigned char* StartPTR_2 = StartPTR_1 + 8 * BytesPerRow;
170
+ unsigned char* StartPTR_3 = StartPTR_1 + 8 * 6 / 8;
171
+ unsigned char* StartPTR_4 = StartPTR_2 + 8 * 6 / 8;
172
+ // Dealing with each 16*16 blocks then...
173
+ for (int l = 0; l < 8; l++)
174
+ Assign_32_FP6_To_4_Thread(&A_Segment_2bit[l * 4],
175
+ &A_Segment_4bit[l * 4],
176
+ StartPTR_1 + l * BytesPerRow,
177
+ StartPTR_2 + l * BytesPerRow,
178
+ StartPTR_3 + l * BytesPerRow,
179
+ StartPTR_4 + l * BytesPerRow);
180
+ }
181
+ }
182
+ }
183
+ // Verifying the length of 2_bit segments and 4_bit segments
184
+ size_t BytesPerThread_2bit = M * K * 2 / 8 / 32;
185
+ size_t BytesPerThread_4bit = M * K * 4 / 8 / 32;
186
+ for (int i = 0; i < 32; i++) {
187
+ assert(A_Segment_2bit[i].size() == BytesPerThread_2bit);
188
+ assert(A_Segment_4bit[i].size() == BytesPerThread_4bit);
189
+ }
190
+ // Pass-2: Optimizing coleasced global memory access
191
+ for (size_t i = 0; i < BytesPerThread_2bit / 4; i++)
192
+ for (int t = 0; t < 32; t++)
193
+ for (int b = 0; b < 4; b++)
194
+ Weight_2bit[i * 128 + t * 4 + (3 - b)] =
195
+ A_Segment_2bit[t]
196
+ [i * 4 + b]; // why (3-b): special byte order within a register
197
+ for (size_t i = 0; i < BytesPerThread_4bit / 4; i++)
198
+ for (int t = 0; t < 32; t++)
199
+ for (int b = 0; b < 4; b++)
200
+ Weight_4bit[i * 128 + t * 4 + (3 - b)] =
201
+ A_Segment_4bit[t][i * 4 + b]; // why (3-b):special byte order within a register
202
+ // Pass-3: Bit-level interleaving
203
+ for (size_t i = 0; i < BytesPerThread_2bit * 32 / 4; i++)
204
+ BitInterleaving_2bit(Weight_2bit + 4 * i);
205
+ for (size_t i = 0; i < BytesPerThread_4bit * 32 / 4; i++)
206
+ BitInterleaving_4bit(Weight_4bit + 4 * i);
207
+ }
208
+
209
+ #endif
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .mixed_gemm import *
7
+ from .moe_gemm import *
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (257 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/cutlass_ops.cpp ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <torch/extension.h>
7
+
8
+ #include "mixed_gemm.h"
9
+ #include "moe_gemm.h"
10
+
11
+ PYBIND11_MODULE(TORCH_EXTENSION_NAME, m)
12
+ {
13
+ // mixed_gemm.h
14
+ m.def("mixed_gemm", &mixed_gemm, "Mixed-precision GEMM");
15
+
16
+ // moe_gemm.h
17
+ m.def("moe_gemm", &moe_gemm, "MultiGEMM for MoE (16-bit weights)");
18
+ m.def("mixed_moe_gemm", &mixed_moe_gemm, "MultiGEMM for MoE (4-bit/8-bit weights)");
19
+ }
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .mixed_gemm import *
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (248 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/__pycache__/mixed_gemm.cpython-310.pyc ADDED
Binary file (2.63 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm.cu ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <c10/cuda/CUDAStream.h>
7
+ #include "mixed_gemm.h"
8
+ #include "mixed_gemm_api.h"
9
+ #include "weight_variant.h"
10
+
11
+ // Switch helpers inspired by
12
+ // https://github.com/NVIDIA/DALI/blob/main/include/dali/core/static_switch.h
13
+ // and https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Dispatch.h
14
+
15
+ #define ACT_DTYPE_SWITCH(COND, ...) \
16
+ [&] { \
17
+ if (COND) { \
18
+ using ActivationDtype = __half; \
19
+ return __VA_ARGS__(); \
20
+ } else { \
21
+ using ActivationDtype = __nv_bfloat16; \
22
+ return __VA_ARGS__(); \
23
+ } \
24
+ }()
25
+
26
+ #define WEIGHT_VARIANT_SWITCH(COND, ...) \
27
+ [&] { \
28
+ if (COND) { \
29
+ constexpr WeightVariant WVariant = WeightVariant::kFP8; \
30
+ return __VA_ARGS__(); \
31
+ } else { \
32
+ constexpr WeightVariant WVariant = WeightVariant::kFP4; \
33
+ return __VA_ARGS__(); \
34
+ } \
35
+ }()
36
+
37
+ void mixed_gemm(at::Tensor& output,
38
+ at::Tensor& hidden_states,
39
+ at::Tensor& weight,
40
+ at::Tensor& scales,
41
+ c10::optional<at::Tensor>& bias,
42
+ int num_bits,
43
+ int activation_raw)
44
+ {
45
+ TORCH_CHECK(output.dtype() == hidden_states.dtype(),
46
+ "Output and hidden states must have the same dtype");
47
+ TORCH_CHECK(num_bits == 4 || num_bits == 8, "Data width must be 4 or 8");
48
+ TORCH_CHECK(output.size(0) == hidden_states.size(0), "Token dimension mismatch");
49
+
50
+ int32_t m = output.size(0);
51
+ int32_t k = hidden_states.size(1);
52
+ int32_t n = weight.size(1);
53
+
54
+ TORCH_CHECK(weight.size(0) == k, "Weight dimension mismatch");
55
+
56
+ ACT_DTYPE_SWITCH(hidden_states.dtype() == torch::kFloat16, [&] {
57
+ WEIGHT_VARIANT_SWITCH(num_bits == 8, [&] {
58
+ fastertransformer::CutlassFpAIntBGemmRunner<ActivationDtype, WVariant> runner =
59
+ *MixedGemmContext<ActivationDtype, WVariant>::Instance().GeMM_Runner();
60
+
61
+ ActivationType activation_type = (ActivationType)activation_raw;
62
+ if (!bias.has_value() && activation_type == ActivationType::IDENTITY) {
63
+ runner.gemm((ActivationDtype*)hidden_states.data_ptr(),
64
+ (const char*)weight.data_ptr(),
65
+ (ActivationDtype*)scales.data_ptr(),
66
+ (ActivationDtype*)output.data_ptr(),
67
+ m,
68
+ n,
69
+ k,
70
+ nullptr,
71
+ 0,
72
+ at::cuda::getCurrentCUDAStream());
73
+ return;
74
+ } else {
75
+ ActivationDtype* bias_ptr = nullptr;
76
+ if (bias.has_value()) { bias_ptr = (ActivationDtype*)bias.value().data_ptr(); }
77
+ runner.gemm_bias_act((ActivationDtype*)hidden_states.data_ptr(),
78
+ (char*)weight.data_ptr(),
79
+ (ActivationDtype*)scales.data_ptr(),
80
+ bias_ptr,
81
+ (ActivationDtype*)output.data_ptr(),
82
+ m,
83
+ n,
84
+ k,
85
+ activation_type,
86
+ nullptr,
87
+ 0,
88
+ at::cuda::getCurrentCUDAStream());
89
+ return;
90
+ }
91
+ });
92
+ });
93
+ }
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from ... import DSKernelBase
9
+ from ....inference_utils import ActivationType, DtypeEnum
10
+ from deepspeed.ops.op_builder import InferenceCutlassBuilder
11
+
12
+ from typing import Optional
13
+
14
+
15
+ class MixedGEMM(DSKernelBase):
16
+ """
17
+ CUTLASS implementation of MoE GEMM.
18
+ """
19
+
20
+ supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16]
21
+ supported_act_fns = [ActivationType.GELU, ActivationType.SILU, ActivationType.RELU, ActivationType.IDENTITY]
22
+
23
+ def __init__(self, fp_dtype: DtypeEnum, act_fn: ActivationType, num_bits: int) -> None:
24
+
25
+ if not isinstance(fp_dtype, DtypeEnum):
26
+ fp_dtype = DtypeEnum(fp_dtype)
27
+
28
+ if fp_dtype not in MixedGEMM.supported_dtypes:
29
+ raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format(
30
+ fp_dtype, MixedGEMM.supported_dtypes))
31
+
32
+ if act_fn not in MixedGEMM.supported_act_fns:
33
+ raise ValueError("Unsupported activation function: {}, supported_act_fns are {}".format(
34
+ act_fn, MixedGEMM.supported_act_fns))
35
+
36
+ if num_bits != 4 and num_bits != 8:
37
+ raise ValueError("Unsupported num_bits: {}, supported num_bits are 4 and 8".format(num_bits))
38
+
39
+ inf_module = InferenceCutlassBuilder().load()
40
+ self.num_bits = num_bits
41
+ self.kernel = inf_module.moe_gemm
42
+ self.act_fn = act_fn
43
+
44
+ def __call__(self,
45
+ output: torch.Tensor,
46
+ hidden_states: torch.Tensor,
47
+ weights: torch.Tensor,
48
+ scales: torch.Tensor,
49
+ biases: Optional[torch.Tensor] = None) -> None:
50
+ """
51
+ Performs a MoE GEMM. Note that the stride between token inputs must be even (the distance between byte 1 of token 0 and token 1 must be the same as the distance between byte 1 of token 1 and token 2).
52
+
53
+ Arguments:
54
+ output (torch.Tensor): The output of the MoE GEMM of shape [n_tokens, out_neurons].
55
+ hidden_states (torch.Tensor): The direct input for the MoE GEMM of shape [n_tokens, in_neurons].
56
+ weights (torch.Tensor): The weights of shape [in_neurons, out_neurons]. These weights must be contiguous.
57
+ scales (torch.Tensor): The scales of shape [out_neurons]. These scales must be contiguous.
58
+ biases (torch.Tensor): The biases of shape [out_neurons]. These biases must be contiguous.
59
+
60
+ Returns:
61
+ output
62
+ """
63
+ self.kernel(output, hidden_states, weights, biases, self.num_bits, self.act_fn)
64
+ return output
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm_api.h ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "activation_type.h"
7
+ #include "weight_variant.h"
8
+
9
+ namespace fastertransformer {
10
+
11
+ template <typename T, WeightVariant V>
12
+ class CutlassFpAIntBGemmRunner {
13
+ public:
14
+ void gemm(const T* A,
15
+ const char* B,
16
+ const T* weight_scales,
17
+ T* C,
18
+ int m,
19
+ int n,
20
+ int k,
21
+ char* workspace_ptr,
22
+ const size_t workspace_bytes,
23
+ cudaStream_t stream);
24
+
25
+ void gemm_bias_act(const T* A,
26
+ const char* B,
27
+ const T* weight_scales,
28
+ const T* biases,
29
+ T* C,
30
+ int m,
31
+ int n,
32
+ int k,
33
+ ActivationType activation_type,
34
+ char* workspace_ptr,
35
+ const size_t workspace_bytes,
36
+ cudaStream_t stream);
37
+ };
38
+
39
+ } // namespace fastertransformer
40
+
41
+ template <typename T, WeightVariant V>
42
+ class MixedGemmContext {
43
+ public:
44
+ MixedGemmContext() { _runner = new fastertransformer::CutlassFpAIntBGemmRunner<T, V>(); }
45
+
46
+ virtual ~MixedGemmContext() { delete _runner; }
47
+
48
+ static MixedGemmContext& Instance()
49
+ {
50
+ static MixedGemmContext _ctx;
51
+ return _ctx;
52
+ }
53
+
54
+ fastertransformer::CutlassFpAIntBGemmRunner<T, V>* GeMM_Runner() const { return _runner; }
55
+
56
+ fastertransformer::CutlassFpAIntBGemmRunner<T, V>* _runner;
57
+ };
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .mixed_moe_gemm import *
7
+ from .moe_gemm import *
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (270 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/__pycache__/mixed_moe_gemm.cpython-310.pyc ADDED
Binary file (2.87 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/__pycache__/moe_gemm.cpython-310.pyc ADDED
Binary file (2.55 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/mixed_moe_gemm.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from ... import DSKernelBase
9
+ from ....inference_utils import ActivationType, DtypeEnum
10
+ from deepspeed.ops.op_builder import InferenceCutlassBuilder
11
+
12
+ from typing import Optional
13
+
14
+
15
+ class MixedMoEGEMM(DSKernelBase):
16
+ """
17
+ CUTLASS implementation of MoE GEMM.
18
+ """
19
+
20
+ supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16]
21
+ supported_act_fns = [ActivationType.GELU, ActivationType.SILU, ActivationType.RELU, ActivationType.IDENTITY]
22
+
23
+ def __init__(self, fp_dtype: DtypeEnum, act_fn: ActivationType, num_bits: int) -> None:
24
+
25
+ if not isinstance(fp_dtype, DtypeEnum):
26
+ fp_dtype = DtypeEnum(fp_dtype)
27
+
28
+ if fp_dtype not in MixedMoEGEMM.supported_dtypes:
29
+ raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format(
30
+ fp_dtype, MixedMoEGEMM.supported_dtypes))
31
+
32
+ if act_fn not in MixedMoEGEMM.supported_act_fns:
33
+ raise ValueError("Unsupported activation function: {}, supported_act_fns are {}".format(
34
+ act_fn, MixedMoEGEMM.supported_act_fns))
35
+
36
+ if num_bits != 4 and num_bits != 8:
37
+ raise ValueError("Unsupported num_bits: {}, supported num_bits are 4 and 8".format(num_bits))
38
+
39
+ inf_module = InferenceCutlassBuilder().load()
40
+ self.num_bits = num_bits
41
+ self.kernel = inf_module.moe_gemm
42
+ self.act_fn = act_fn
43
+
44
+ def __call__(self,
45
+ ordered_output: torch.Tensor,
46
+ ordered_input: torch.Tensor,
47
+ weights: torch.Tensor,
48
+ scales: torch.Tensor,
49
+ total_rows_before_expert: torch.Tensor,
50
+ biases: Optional[torch.Tensor] = None) -> None:
51
+ """
52
+ Performs a MoE GEMM. Note that the stride between token inputs must be even (the distance between byte 1 of token 0 and token 1 must be the same as the distance between byte 1 of token 1 and token 2).
53
+
54
+ Arguments:
55
+ ordered_output (torch.Tensor): The output of the MoE GEMM of shape [n_tokens, out_neurons].
56
+ ordered_input (torch.Tensor): The direct input for the MoE GEMM of shape [n_tokens, in_neurons].
57
+ weights (torch.Tensor): The weights of shape [n_experts, in_neurons, out_neurons]. These weights must be contiguous.
58
+ scales (torch.Tensor): The scales of shape [n_experts, out_neurons]. These scales must be contiguous.
59
+ total_rows_before_expert (torch.Tensor): The total number of rows before each expert of shape [n_experts].
60
+ biases (torch.Tensor): The biases of shape [n_experts, out_neurons]. These biases must be contiguous.
61
+
62
+ Returns:
63
+ ordered_output
64
+ """
65
+ self.kernel(ordered_output, ordered_input, weights, scales, biases, total_rows_before_expert, self.num_bits,
66
+ self.act_fn)
67
+ return ordered_output
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm.cu ADDED
@@ -0,0 +1,175 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include <c10/cuda/CUDAStream.h>
7
+ #include "moe_gemm.h"
8
+ #include "moe_gemm_api.h"
9
+ #include "weight_variant.h"
10
+
11
+ // Switch helpers inspired by
12
+ // https://github.com/NVIDIA/DALI/blob/main/include/dali/core/static_switch.h
13
+ // and https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Dispatch.h
14
+
15
+ #define HIDDEN_DTYPE_SWITCH(COND, ...) \
16
+ [&] { \
17
+ if (COND) { \
18
+ using ActivationDtype = __half; \
19
+ constexpr WeightVariant WVariant = WeightVariant::kFP16; \
20
+ return __VA_ARGS__(); \
21
+ } else { \
22
+ using ActivationDtype = __nv_bfloat16; \
23
+ constexpr WeightVariant WVariant = WeightVariant::kBF16; \
24
+ return __VA_ARGS__(); \
25
+ } \
26
+ }()
27
+
28
+ void moe_gemm(at::Tensor& output,
29
+ at::Tensor& hidden_states,
30
+ at::Tensor& weight,
31
+ c10::optional<at::Tensor>& bias,
32
+ at::Tensor& total_rows_before_expert,
33
+ int activation_raw)
34
+ {
35
+ TORCH_CHECK(output.dtype() == hidden_states.dtype(),
36
+ "Output and hidden states must have the same dtype");
37
+ TORCH_CHECK(output.dtype() == weight.dtype(), "Output and weight must have the same dtype");
38
+
39
+ int64_t total_rows = hidden_states.size(0);
40
+ int64_t gemm_k = hidden_states.size(1);
41
+ int64_t gemm_n = weight.size(2);
42
+ int num_experts = weight.size(0);
43
+
44
+ TORCH_CHECK(total_rows == output.size(0), "Total rows dimension mismatch");
45
+ TORCH_CHECK(gemm_k == weight.size(1), "GEMM K dimension mismatch");
46
+ TORCH_CHECK(gemm_n == output.size(1), "GEMM N dimension mismatch");
47
+ TORCH_CHECK(num_experts == total_rows_before_expert.size(0), "Number of experts mismatch");
48
+
49
+ HIDDEN_DTYPE_SWITCH(hidden_states.dtype() == torch::kFloat16, [&] {
50
+ fastertransformer::MoeGemmRunner<ActivationDtype, WVariant> runner =
51
+ *MoeGemmContext<ActivationDtype, WVariant>::Instance().GeMM_Runner();
52
+
53
+ ActivationType activation_type = (ActivationType)activation_raw;
54
+ if (!bias.has_value() && activation_type == ActivationType::IDENTITY) {
55
+ runner.moe_gemm((ActivationDtype*)hidden_states.data_ptr(),
56
+ (char*)weight.data_ptr(),
57
+ nullptr,
58
+ (ActivationDtype*)output.data_ptr(),
59
+ (int64_t*)total_rows_before_expert.data_ptr(),
60
+ total_rows,
61
+ gemm_n,
62
+ gemm_k,
63
+ num_experts,
64
+ at::cuda::getCurrentCUDAStream());
65
+ return;
66
+ } else {
67
+ ActivationDtype* bias_ptr = nullptr;
68
+ if (bias.has_value()) {
69
+ bias_ptr = (ActivationDtype*)bias.value().data_ptr();
70
+ TORCH_CHECK(num_experts == bias.value().size(0), "Number of experts mismatch");
71
+ TORCH_CHECK(gemm_n == bias.value().size(1), "GEMM N dimension mismatch");
72
+ }
73
+ runner.moe_gemm_bias_act((ActivationDtype*)hidden_states.data_ptr(),
74
+ (char*)weight.data_ptr(),
75
+ nullptr,
76
+ bias_ptr,
77
+ (ActivationDtype*)output.data_ptr(),
78
+ (int64_t*)total_rows_before_expert.data_ptr(),
79
+ total_rows,
80
+ gemm_n,
81
+ gemm_k,
82
+ num_experts,
83
+ activation_type,
84
+ at::cuda::getCurrentCUDAStream());
85
+ return;
86
+ }
87
+ });
88
+ }
89
+
90
+ #define ACT_DTYPE_SWITCH(COND, ...) \
91
+ [&] { \
92
+ if (COND) { \
93
+ using ActivationDtype = __half; \
94
+ return __VA_ARGS__(); \
95
+ } else { \
96
+ using ActivationDtype = __nv_bfloat16; \
97
+ return __VA_ARGS__(); \
98
+ } \
99
+ }()
100
+
101
+ #define WEIGHT_VARIANT_SWITCH(COND, ...) \
102
+ [&] { \
103
+ if (COND) { \
104
+ constexpr WeightVariant WVariant = WeightVariant::kFP8; \
105
+ return __VA_ARGS__(); \
106
+ } else { \
107
+ constexpr WeightVariant WVariant = WeightVariant::kFP4; \
108
+ return __VA_ARGS__(); \
109
+ } \
110
+ }()
111
+
112
+ void mixed_moe_gemm(at::Tensor& output,
113
+ at::Tensor& hidden_states,
114
+ at::Tensor& weight,
115
+ at::Tensor& scales,
116
+ c10::optional<at::Tensor>& bias,
117
+ at::Tensor& total_rows_before_expert,
118
+ int num_bits,
119
+ int activation_raw)
120
+ {
121
+ TORCH_CHECK(output.dtype() == hidden_states.dtype(),
122
+ "Output and hidden states must have the same dtype");
123
+
124
+ int64_t total_rows = hidden_states.size(0);
125
+ int64_t gemm_k = hidden_states.size(1);
126
+ int64_t gemm_n = weight.size(2);
127
+ int num_experts = weight.size(0);
128
+
129
+ TORCH_CHECK(total_rows == output.size(0), "Total rows dimension mismatch");
130
+ TORCH_CHECK(gemm_k == weight.size(1), "GEMM K dimension mismatch");
131
+ TORCH_CHECK(gemm_n == output.size(1), "GEMM N dimension mismatch");
132
+ TORCH_CHECK(num_experts == total_rows_before_expert.size(0), "Number of experts mismatch");
133
+
134
+ ACT_DTYPE_SWITCH(hidden_states.dtype() == torch::kFloat16, [&] {
135
+ WEIGHT_VARIANT_SWITCH(num_bits == 8, [&] {
136
+ fastertransformer::MoeGemmRunner<ActivationDtype, WVariant> runner =
137
+ *MoeGemmContext<ActivationDtype, WVariant>::Instance().GeMM_Runner();
138
+
139
+ ActivationType activation_type = (ActivationType)activation_raw;
140
+ if (!bias.has_value() && activation_type == ActivationType::IDENTITY) {
141
+ runner.moe_gemm((ActivationDtype*)hidden_states.data_ptr(),
142
+ (char*)weight.data_ptr(),
143
+ (ActivationDtype*)scales.data_ptr(),
144
+ (ActivationDtype*)output.data_ptr(),
145
+ (int64_t*)total_rows_before_expert.data_ptr(),
146
+ total_rows,
147
+ gemm_n,
148
+ gemm_k,
149
+ num_experts,
150
+ at::cuda::getCurrentCUDAStream());
151
+ return;
152
+ } else {
153
+ ActivationDtype* bias_ptr = nullptr;
154
+ if (bias.has_value()) {
155
+ bias_ptr = (ActivationDtype*)bias.value().data_ptr();
156
+ TORCH_CHECK(num_experts == bias.value().size(0), "Number of experts mismatch");
157
+ TORCH_CHECK(gemm_n == bias.value().size(1), "GEMM N dimension mismatch");
158
+ }
159
+ runner.moe_gemm_bias_act((ActivationDtype*)hidden_states.data_ptr(),
160
+ (char*)weight.data_ptr(),
161
+ (ActivationDtype*)scales.data_ptr(),
162
+ bias_ptr,
163
+ (ActivationDtype*)output.data_ptr(),
164
+ (int64_t*)total_rows_before_expert.data_ptr(),
165
+ total_rows,
166
+ gemm_n,
167
+ gemm_k,
168
+ num_experts,
169
+ activation_type,
170
+ at::cuda::getCurrentCUDAStream());
171
+ return;
172
+ }
173
+ });
174
+ });
175
+ }
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm.h ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <torch/extension.h>
9
+
10
+ void moe_gemm(at::Tensor& output,
11
+ at::Tensor& hidden_states,
12
+ at::Tensor& weight,
13
+ c10::optional<at::Tensor>& bias,
14
+ at::Tensor& total_rows_before_expert,
15
+ int activation_raw);
16
+
17
+ void mixed_moe_gemm(at::Tensor& output,
18
+ at::Tensor& hidden_states,
19
+ at::Tensor& weight,
20
+ at::Tensor& scales,
21
+ c10::optional<at::Tensor>& bias,
22
+ at::Tensor& total_rows_before_expert,
23
+ int num_bits,
24
+ int activation_raw);
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from ... import DSKernelBase
9
+ from ....inference_utils import ActivationType, DtypeEnum
10
+ from deepspeed.ops.op_builder import InferenceCutlassBuilder
11
+
12
+ from typing import Optional
13
+
14
+
15
+ class MoEGEMM(DSKernelBase):
16
+ """
17
+ CUTLASS implementation of MoE GEMM.
18
+ """
19
+
20
+ supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16]
21
+ supported_act_fns = [ActivationType.GELU, ActivationType.SILU, ActivationType.RELU, ActivationType.IDENTITY]
22
+
23
+ def __init__(self, fp_dtype: DtypeEnum, act_fn: ActivationType) -> None:
24
+
25
+ if not isinstance(fp_dtype, DtypeEnum):
26
+ fp_dtype = DtypeEnum(fp_dtype)
27
+
28
+ if fp_dtype not in MoEGEMM.supported_dtypes:
29
+ raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format(
30
+ fp_dtype, MoEGEMM.supported_dtypes))
31
+
32
+ if act_fn not in MoEGEMM.supported_act_fns:
33
+ raise ValueError("Unsupported activation function: {}, supported_act_fns are {}".format(
34
+ act_fn, MoEGEMM.supported_act_fns))
35
+
36
+ inf_module = InferenceCutlassBuilder().load()
37
+ self.kernel = inf_module.moe_gemm
38
+ self.act_fn = act_fn
39
+
40
+ def __call__(self,
41
+ ordered_output: torch.Tensor,
42
+ ordered_input: torch.Tensor,
43
+ weights: torch.Tensor,
44
+ total_rows_before_expert: torch.Tensor,
45
+ biases: Optional[torch.Tensor] = None) -> None:
46
+ """
47
+ Performs a MoE GEMM. Note that the stride between token inputs must be even (the distance between byte 1 of token 0 and token 1 must be the same as the distance between byte 1 of token 1 and token 2).
48
+
49
+ Arguments:
50
+ ordered_output (torch.Tensor): The output of the MoE GEMM of shape [n_tokens, out_neurons].
51
+ ordered_input (torch.Tensor): The direct input for the MoE GEMM of shape [n_tokens, in_neurons].
52
+ weights (torch.Tensor): The weights of shape [n_experts, in_neurons, out_neurons]. These weights must be contiguous.
53
+ total_rows_before_expert (torch.Tensor): The total number of rows before each expert of shape [n_experts].
54
+ biases (torch.Tensor): The biases of shape [n_experts, out_neurons]. These biases must be contiguous.
55
+
56
+ Returns:
57
+ ordered_output
58
+ """
59
+ self.kernel(ordered_output, ordered_input, weights, biases, total_rows_before_expert, self.act_fn)
60
+ return ordered_output
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm_api.h ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #include "activation_type.h"
7
+ #include "weight_variant.h"
8
+
9
+ namespace fastertransformer {
10
+
11
+ template <typename T, /*The type used for activations/scales/compute*/
12
+ WeightVariant V /* The type for the MoE weights */>
13
+ class MoeGemmRunner {
14
+ public:
15
+ MoeGemmRunner();
16
+
17
+ void moe_gemm_bias_act(const T* A,
18
+ const char* B,
19
+ const T* weight_scales,
20
+ const T* biases,
21
+ T* C,
22
+ int64_t* total_rows_before_expert,
23
+ int64_t total_rows,
24
+ int64_t gemm_n,
25
+ int64_t gemm_k,
26
+ int num_experts,
27
+ ActivationType activation_type,
28
+ cudaStream_t stream);
29
+
30
+ void moe_gemm(const T* A,
31
+ const char* B,
32
+ const T* weight_scales,
33
+ T* C,
34
+ int64_t* total_rows_before_expert,
35
+ int64_t total_rows,
36
+ int64_t gemm_n,
37
+ int64_t gemm_k,
38
+ int num_experts,
39
+ cudaStream_t stream);
40
+
41
+ private:
42
+ int sm_;
43
+ int multi_processor_count_;
44
+ };
45
+
46
+ } // namespace fastertransformer
47
+
48
+ template <typename T, WeightVariant V>
49
+ class MoeGemmContext {
50
+ public:
51
+ MoeGemmContext() { _runner = new fastertransformer::MoeGemmRunner<T, V>(); }
52
+
53
+ virtual ~MoeGemmContext() { delete _runner; }
54
+
55
+ static MoeGemmContext& Instance()
56
+ {
57
+ static MoeGemmContext _ctx;
58
+ return _ctx;
59
+ }
60
+
61
+ fastertransformer::MoeGemmRunner<T, V>* GeMM_Runner() const { return _runner; }
62
+
63
+ fastertransformer::MoeGemmRunner<T, V>* _runner;
64
+ };
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/shared_resources/weight_variant.h ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ // Data structure that allows us to abstract internal CUTLASS datatypes/mappings
7
+ // to the DeepSpeed-Kernels repo.
8
+
9
+ #pragma once
10
+
11
+ enum WeightVariant { kFP16, kBF16, kFP8, kFP4 };
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/activation_type.h ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ enum ActivationType {
9
+ GELU = 0,
10
+ RELU = 1,
11
+ SILU = 2,
12
+ GEGLU = 3,
13
+ ReGLU = 4,
14
+ SiGLU = 5,
15
+ IDENTITY = 6,
16
+ InvalidType = -1
17
+ };
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/conversion_utils.h ADDED
@@ -0,0 +1,640 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include "ds_kernel_utils.h"
9
+
10
+ #include <stdint.h>
11
+
12
+ #ifdef BF16_AVAILABLE
13
+ #include <cuda_bf16.h>
14
+ #endif
15
+
16
+ namespace conversion {
17
+
18
+ // Basic primitive for constructing conversions
19
+ template <typename TO, typename FROM>
20
+ DS_D_INLINE TO to(FROM val)
21
+ {
22
+ return to(val);
23
+ }
24
+
25
+ // Specializations
26
+
27
+ /********************* Identity Conversions *********************/
28
+ /*
29
+ Identity conversions are useful in templated functions where we might have
30
+ a fixed destination type. For example, I might have a kernel that accepts
31
+ __half, __nv_bfloat16, and float but always want to do the core computation
32
+ at floating point:
33
+
34
+ T mem_value = input[idx];
35
+ float compute_value = conversion::to<float, T>(mem_value);
36
+
37
+ In practice, we should be able to elide the second template parameter:
38
+ float compute_val = conversion::to<float>(mem_value);
39
+
40
+ In this case, we need an implementation to handle the T = float case
41
+
42
+ NOTE: The type inferencing system appears to be unable to handle inferring the first
43
+ template parameter, even in the trivial case.
44
+ */
45
+
46
+ // Floating point types
47
+ template <>
48
+ DS_D_INLINE double to(double val)
49
+ {
50
+ return val;
51
+ }
52
+ template <>
53
+ DS_D_INLINE float to(float val)
54
+ {
55
+ return val;
56
+ }
57
+ template <>
58
+ DS_D_INLINE __half to(__half val)
59
+ {
60
+ return val;
61
+ }
62
+ #ifdef BF16_AVAILABLE
63
+ template <>
64
+ DS_D_INLINE __nv_bfloat16 to(__nv_bfloat16 val)
65
+ {
66
+ return val;
67
+ }
68
+ #endif
69
+
70
+ // Integer types
71
+ template <>
72
+ DS_D_INLINE int8_t to(int8_t val)
73
+ {
74
+ return val;
75
+ }
76
+ template <>
77
+ DS_D_INLINE uint8_t to(uint8_t val)
78
+ {
79
+ return val;
80
+ }
81
+ template <>
82
+ DS_D_INLINE int16_t to(int16_t val)
83
+ {
84
+ return val;
85
+ }
86
+ template <>
87
+ DS_D_INLINE uint16_t to(uint16_t val)
88
+ {
89
+ return val;
90
+ }
91
+ template <>
92
+ DS_D_INLINE int32_t to(int32_t val)
93
+ {
94
+ return val;
95
+ }
96
+ template <>
97
+ DS_D_INLINE uint32_t to(uint32_t val)
98
+ {
99
+ return val;
100
+ }
101
+ template <>
102
+ DS_D_INLINE int64_t to(int64_t val)
103
+ {
104
+ return val;
105
+ }
106
+ template <>
107
+ DS_D_INLINE uint64_t to(uint64_t val)
108
+ {
109
+ return val;
110
+ }
111
+
112
+ // TODO: evaluate if we want bools
113
+
114
+ /********************* To Double Conversions *********************/
115
+
116
+ // * to double variants
117
+
118
+ // Would normally like to not use C cast, but this is an important enough conversion
119
+ // to keep
120
+ template <>
121
+ DS_D_INLINE double to(float val)
122
+ {
123
+ #ifdef PTX_AVAILABLE
124
+ double ret_val;
125
+ asm("ctv.rn.f64.f32 %0, %1;\n" : "=d"(ret_val) : "f"(val));
126
+ return ret_val;
127
+ #else
128
+ return double(val);
129
+ #endif
130
+ }
131
+ // Note: there is a CVT instruction for __half -> double, but there's no inline interface
132
+ // for passing a single half value
133
+ template <>
134
+ DS_D_INLINE double to(__half val)
135
+ {
136
+ return to<double>(__half2float(val));
137
+ }
138
+ template <>
139
+ DS_D_INLINE double to(int64_t val)
140
+ {
141
+ return __ll2double_rn(val);
142
+ }
143
+ template <>
144
+ DS_D_INLINE double to(int32_t val)
145
+ {
146
+ return __int2double_rn(val);
147
+ }
148
+ template <>
149
+ DS_D_INLINE double to(int16_t val)
150
+ {
151
+ return __int2double_rn(val);
152
+ }
153
+ template <>
154
+ DS_D_INLINE double to(int8_t val)
155
+ {
156
+ return __int2double_rn(val);
157
+ }
158
+ template <>
159
+ DS_D_INLINE double to(uint64_t val)
160
+ {
161
+ return __ull2double_rn(val);
162
+ }
163
+ template <>
164
+ DS_D_INLINE double to(uint32_t val)
165
+ {
166
+ return __uint2double_rn(val);
167
+ }
168
+ template <>
169
+ DS_D_INLINE double to(uint16_t val)
170
+ {
171
+ return __uint2double_rn(val);
172
+ }
173
+ template <>
174
+ DS_D_INLINE double to(uint8_t val)
175
+ {
176
+ return __uint2double_rn(val);
177
+ }
178
+
179
+ // Same applies here
180
+ #ifdef BF16_AVAILABLE
181
+ template <>
182
+ DS_D_INLINE double to(__nv_bfloat16 val)
183
+ {
184
+ return to<double>(__bfloat162float(val));
185
+ }
186
+ #endif
187
+
188
+ /********************* To Float Conversions *********************/
189
+
190
+ template <>
191
+ DS_D_INLINE float to(double val)
192
+ {
193
+ return __double2float_rn(val);
194
+ }
195
+ template <>
196
+ DS_D_INLINE float to(__half val)
197
+ {
198
+ return __half2float(val);
199
+ }
200
+ template <>
201
+ DS_D_INLINE float to(int64_t val)
202
+ {
203
+ return __ll2float_rn(val);
204
+ }
205
+ template <>
206
+ DS_D_INLINE float to(int32_t val)
207
+ {
208
+ return __int2float_rn(val);
209
+ }
210
+ template <>
211
+ DS_D_INLINE float to(int16_t val)
212
+ {
213
+ return __int2float_rn(val);
214
+ }
215
+ template <>
216
+ DS_D_INLINE float to(int8_t val)
217
+ {
218
+ return __int2float_rn(val);
219
+ }
220
+ template <>
221
+ DS_D_INLINE float to(uint64_t val)
222
+ {
223
+ return __ull2float_rn(val);
224
+ }
225
+ template <>
226
+ DS_D_INLINE float to(uint32_t val)
227
+ {
228
+ return __uint2float_rn(val);
229
+ }
230
+ template <>
231
+ DS_D_INLINE float to(uint16_t val)
232
+ {
233
+ return __uint2float_rn(val);
234
+ }
235
+ template <>
236
+ DS_D_INLINE float to(uint8_t val)
237
+ {
238
+ return __uint2float_rn(val);
239
+ }
240
+
241
+ #ifdef BF16_AVAILABLE
242
+ template <>
243
+ DS_D_INLINE float to(__nv_bfloat16 val)
244
+ {
245
+ return __bfloat162float(val);
246
+ }
247
+ #endif
248
+
249
+ /********************* To Float2 Conversions *********************/
250
+ template <>
251
+ DS_D_INLINE float2 to(__half2 val)
252
+ {
253
+ return __half22float2(val);
254
+ }
255
+
256
+ #ifdef BF16_AVAILABLE
257
+ template <>
258
+ DS_D_INLINE float2 to(__nv_bfloat162 val)
259
+ {
260
+ return __bfloat1622float2(val);
261
+ }
262
+ #endif
263
+
264
+ /********************* To Half Conversions *********************/
265
+ template <>
266
+ DS_D_INLINE __half to(double val)
267
+ {
268
+ #ifdef __HIP_PLATFORM_AMD__
269
+ float val_f = __double2float_rn(val);
270
+ return __float2half(val_f);
271
+ #else
272
+ return __double2half(val);
273
+ #endif
274
+ }
275
+ template <>
276
+ DS_D_INLINE __half to(float val)
277
+ {
278
+ return __float2half(val);
279
+ }
280
+ template <>
281
+ DS_D_INLINE __half to(int64_t val)
282
+ {
283
+ return __ll2half_rn(val);
284
+ }
285
+ template <>
286
+ DS_D_INLINE __half to(int32_t val)
287
+ {
288
+ return __int2half_rn(val);
289
+ }
290
+ template <>
291
+ DS_D_INLINE __half to(int16_t val)
292
+ {
293
+ return __short2half_rn(val);
294
+ }
295
+ template <>
296
+ DS_D_INLINE __half to(int8_t val)
297
+ {
298
+ return __int2half_rn(val);
299
+ }
300
+ template <>
301
+ DS_D_INLINE __half to(uint64_t val)
302
+ {
303
+ return __ull2half_rn(val);
304
+ }
305
+ template <>
306
+ DS_D_INLINE __half to(uint32_t val)
307
+ {
308
+ return __uint2half_rn(val);
309
+ }
310
+ template <>
311
+ DS_D_INLINE __half to(uint16_t val)
312
+ {
313
+ return __ushort2half_rn(val);
314
+ }
315
+ template <>
316
+ DS_D_INLINE __half to(uint8_t val)
317
+ {
318
+ return __uint2half_rn(val);
319
+ }
320
+
321
+ #ifdef BF16_AVAILABLE
322
+ // No direct conversion
323
+ template <>
324
+ DS_D_INLINE __half to(__nv_bfloat16 val)
325
+ {
326
+ return to<__half>(to<float>(val));
327
+ }
328
+ #endif
329
+
330
+ /********************* To Half2 Conversions *********************/
331
+ template <>
332
+ DS_D_INLINE __half2 to(float2 val)
333
+ {
334
+ return __float22half2_rn(val);
335
+ }
336
+ template <>
337
+ DS_D_INLINE __half2 to(float val)
338
+ {
339
+ return __float2half2_rn(val);
340
+ }
341
+
342
+ #ifdef BF16_AVAILABLE
343
+ // No direct conversion
344
+ template <>
345
+ DS_D_INLINE __half2 to(__nv_bfloat162 val)
346
+ {
347
+ return to<__half2>(to<float2>(val));
348
+ }
349
+ #endif
350
+
351
+ /********************* To BF16 Conversions *********************/
352
+ #ifdef BF16_AVAILABLE
353
+ template <>
354
+ DS_D_INLINE __nv_bfloat16 to(double val)
355
+ {
356
+ return __double2bfloat16(val);
357
+ }
358
+ template <>
359
+ DS_D_INLINE __nv_bfloat16 to(float val)
360
+ {
361
+ return __float2bfloat16(val);
362
+ }
363
+ template <>
364
+ DS_D_INLINE __nv_bfloat16 to(int64_t val)
365
+ {
366
+ return __ll2bfloat16_rn(val);
367
+ }
368
+ template <>
369
+ DS_D_INLINE __nv_bfloat16 to(int32_t val)
370
+ {
371
+ return __int2bfloat16_rn(val);
372
+ }
373
+ template <>
374
+ DS_D_INLINE __nv_bfloat16 to(int16_t val)
375
+ {
376
+ return __short2bfloat16_rn(val);
377
+ }
378
+ template <>
379
+ DS_D_INLINE __nv_bfloat16 to(int8_t val)
380
+ {
381
+ return __int2bfloat16_rn(val);
382
+ }
383
+ template <>
384
+ DS_D_INLINE __nv_bfloat16 to(uint64_t val)
385
+ {
386
+ return __ull2bfloat16_rn(val);
387
+ }
388
+ template <>
389
+ DS_D_INLINE __nv_bfloat16 to(uint32_t val)
390
+ {
391
+ return __uint2bfloat16_rn(val);
392
+ }
393
+ template <>
394
+ DS_D_INLINE __nv_bfloat16 to(uint16_t val)
395
+ {
396
+ return __ushort2bfloat16_rn(val);
397
+ }
398
+ template <>
399
+ DS_D_INLINE __nv_bfloat16 to(uint8_t val)
400
+ {
401
+ return __uint2bfloat16_rn(val);
402
+ }
403
+ #endif
404
+
405
+ /********************* To BF162 Conversions *********************/
406
+ #ifdef BF16_AVAILABLE
407
+ template <>
408
+ DS_D_INLINE __nv_bfloat162 to(float2 val)
409
+ {
410
+ return __float22bfloat162_rn(val);
411
+ }
412
+ template <>
413
+ DS_D_INLINE __nv_bfloat162 to(float val)
414
+ {
415
+ return __float2bfloat162_rn(val);
416
+ }
417
+ template <>
418
+ DS_D_INLINE __nv_bfloat162 to(__half2 val)
419
+ {
420
+ return to<__nv_bfloat162>(to<float2>(val));
421
+ }
422
+ #endif
423
+
424
+ /********************* To INT64_T Conversions *********************/
425
+ template <>
426
+ DS_D_INLINE int64_t to(double val)
427
+ {
428
+ return __double2ll_rn(val);
429
+ }
430
+ template <>
431
+ DS_D_INLINE int64_t to(float val)
432
+ {
433
+ return __float2ll_rn(val);
434
+ }
435
+ template <>
436
+ DS_D_INLINE int64_t to(__half val)
437
+ {
438
+ return __half2ll_rn(val);
439
+ }
440
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
441
+ // to demand an PTX at this time
442
+
443
+ #ifdef BF16_AVAILABLE
444
+ template <>
445
+ DS_D_INLINE int64_t to(__nv_bfloat16 val)
446
+ {
447
+ return __bfloat162ll_rn(val);
448
+ }
449
+ #endif
450
+
451
+ /********************* To INT32_T Conversions *********************/
452
+ template <>
453
+ DS_D_INLINE int32_t to(double val)
454
+ {
455
+ return __double2int_rn(val);
456
+ }
457
+ template <>
458
+ DS_D_INLINE int32_t to(float val)
459
+ {
460
+ return __float2int_rn(val);
461
+ }
462
+ template <>
463
+ DS_D_INLINE int32_t to(__half val)
464
+ {
465
+ return __half2int_rn(val);
466
+ }
467
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
468
+ // to demand an PTX at this time
469
+
470
+ #ifdef BF16_AVAILABLE
471
+ template <>
472
+ DS_D_INLINE int32_t to(__nv_bfloat16 val)
473
+ {
474
+ return __bfloat162int_rn(val);
475
+ }
476
+ #endif
477
+
478
+ /********************* To INT16_T Conversions *********************/
479
+ template <>
480
+ DS_D_INLINE int16_t to(double val)
481
+ {
482
+ return __double2int_rn(val);
483
+ }
484
+ template <>
485
+ DS_D_INLINE int16_t to(float val)
486
+ {
487
+ return __float2int_rn(val);
488
+ }
489
+ template <>
490
+ DS_D_INLINE int16_t to(__half val)
491
+ {
492
+ return __half2int_rn(val);
493
+ }
494
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
495
+ // to demand an PTX at this time
496
+
497
+ #ifdef BF16_AVAILABLE
498
+ template <>
499
+ DS_D_INLINE int16_t to(__nv_bfloat16 val)
500
+ {
501
+ return __bfloat162int_rn(val);
502
+ }
503
+ #endif
504
+
505
+ /********************* To INT8_T Conversions *********************/
506
+ template <>
507
+ DS_D_INLINE int8_t to(double val)
508
+ {
509
+ return __double2int_rn(val);
510
+ }
511
+ template <>
512
+ DS_D_INLINE int8_t to(float val)
513
+ {
514
+ return __float2int_rn(val);
515
+ }
516
+ template <>
517
+ DS_D_INLINE int8_t to(__half val)
518
+ {
519
+ return __half2int_rn(val);
520
+ }
521
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
522
+ // to demand an PTX at this time
523
+
524
+ #ifdef BF16_AVAILABLE
525
+ template <>
526
+ DS_D_INLINE int8_t to(__nv_bfloat16 val)
527
+ {
528
+ return __bfloat162int_rn(val);
529
+ }
530
+ #endif
531
+
532
+ /********************* To UINT64_T Conversions *********************/
533
+ template <>
534
+ DS_D_INLINE uint64_t to(double val)
535
+ {
536
+ return __double2ull_rn(val);
537
+ }
538
+ template <>
539
+ DS_D_INLINE uint64_t to(float val)
540
+ {
541
+ return __float2ull_rn(val);
542
+ }
543
+ template <>
544
+ DS_D_INLINE uint64_t to(__half val)
545
+ {
546
+ return __half2ull_rn(val);
547
+ }
548
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
549
+ // to demand an PTX at this time
550
+
551
+ #ifdef BF16_AVAILABLE
552
+ template <>
553
+ DS_D_INLINE uint64_t to(__nv_bfloat16 val)
554
+ {
555
+ return __bfloat162ull_rn(val);
556
+ }
557
+ #endif
558
+
559
+ /********************* To UINT32_T Conversions *********************/
560
+ template <>
561
+ DS_D_INLINE uint32_t to(double val)
562
+ {
563
+ return __double2uint_rn(val);
564
+ }
565
+ template <>
566
+ DS_D_INLINE uint32_t to(float val)
567
+ {
568
+ return __float2uint_rn(val);
569
+ }
570
+ template <>
571
+ DS_D_INLINE uint32_t to(__half val)
572
+ {
573
+ return __half2uint_rn(val);
574
+ }
575
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
576
+ // to demand an PTX at this time
577
+
578
+ #ifdef BF16_AVAILABLE
579
+ template <>
580
+ DS_D_INLINE uint32_t to(__nv_bfloat16 val)
581
+ {
582
+ return __bfloat162uint_rn(val);
583
+ }
584
+ #endif
585
+
586
+ /********************* To UINT16_T Conversions *********************/
587
+ template <>
588
+ DS_D_INLINE uint16_t to(double val)
589
+ {
590
+ return __double2uint_rn(val);
591
+ }
592
+ template <>
593
+ DS_D_INLINE uint16_t to(float val)
594
+ {
595
+ return __float2uint_rn(val);
596
+ }
597
+ template <>
598
+ DS_D_INLINE uint16_t to(__half val)
599
+ {
600
+ return __half2uint_rn(val);
601
+ }
602
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
603
+ // to demand an PTX at this time
604
+
605
+ #ifdef BF16_AVAILABLE
606
+ template <>
607
+ DS_D_INLINE uint16_t to(__nv_bfloat16 val)
608
+ {
609
+ return __bfloat162uint_rn(val);
610
+ }
611
+ #endif
612
+
613
+ /********************* To UINT8_T Conversions *********************/
614
+ template <>
615
+ DS_D_INLINE uint8_t to(double val)
616
+ {
617
+ return __double2uint_rn(val);
618
+ }
619
+ template <>
620
+ DS_D_INLINE uint8_t to(float val)
621
+ {
622
+ return __float2uint_rn(val);
623
+ }
624
+ template <>
625
+ DS_D_INLINE uint8_t to(__half val)
626
+ {
627
+ return __half2uint_rn(val);
628
+ }
629
+ // No direct support for integer casts at the C++ level and I don't feel they're so important
630
+ // to demand an PTX at this time
631
+
632
+ #ifdef BF16_AVAILABLE
633
+ template <>
634
+ DS_D_INLINE uint8_t to(__nv_bfloat16 val)
635
+ {
636
+ return __bfloat162uint_rn(val);
637
+ }
638
+ #endif
639
+
640
+ } // namespace conversion
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/ds_kernel_utils.h ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /*
7
+ Centralized header file for preprocessor macros and constants
8
+ used throughout the codebase.
9
+ */
10
+
11
+ #pragma once
12
+
13
+ #include <cuda.h>
14
+ #include <cuda_fp16.h>
15
+
16
+ #ifdef BF16_AVAILABLE
17
+ #include <cuda_bf16.h>
18
+ #endif
19
+
20
+ #define DS_HD_INLINE __host__ __device__ __forceinline__
21
+ #define DS_D_INLINE __device__ __forceinline__
22
+
23
+ #ifdef __HIP_PLATFORM_AMD__
24
+
25
+ // constexpr variant of warpSize for templating
26
+ constexpr int hw_warp_size = 64;
27
+ #define HALF_PRECISION_AVAILABLE = 1
28
+ #include <hip/hip_cooperative_groups.h>
29
+ #include <hip/hip_fp16.h>
30
+
31
+ #else // !__HIP_PLATFORM_AMD__
32
+
33
+ // constexpr variant of warpSize for templating
34
+ constexpr int hw_warp_size = 32;
35
+
36
+ #if __CUDA_ARCH__ >= 530
37
+ #define HALF_PRECISION_AVAILABLE = 1
38
+ #define PTX_AVAILABLE
39
+ #endif // __CUDA_ARCH__ >= 530
40
+
41
+ #if __CUDA_ARCH__ >= 800
42
+ #define ASYNC_COPY_AVAILABLE
43
+ #endif // __CUDA_ARCH__ >= 800
44
+
45
+ #include <cooperative_groups.h>
46
+ #include <cuda_fp16.h>
47
+
48
+ #endif //__HIP_PLATFORM_AMD__
49
+
50
+ inline int next_pow2(const int val)
51
+ {
52
+ int rounded_val = val - 1;
53
+ rounded_val |= rounded_val >> 1;
54
+ rounded_val |= rounded_val >> 2;
55
+ rounded_val |= rounded_val >> 4;
56
+ rounded_val |= rounded_val >> 8;
57
+ return rounded_val + 1;
58
+ }
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/memory_access_utils.h ADDED
@@ -0,0 +1,1115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <cuda.h>
9
+ #include "ds_kernel_utils.h"
10
+
11
+ /////////////////////////////// Memory Access Utils ///////////////////////////////
12
+ namespace mem_access {
13
+
14
+ enum class LoadPolicy {
15
+ CacheAll, // Cache at all levels
16
+ CacheGlobal, // Cache at L2 only
17
+ CacheStreaming // Cache with evict first policy
18
+ };
19
+
20
+ enum class StorePolicy {
21
+ Writeback, // Cache in L1, write-back on eviction
22
+ CacheGlobal, // Bypass L1, write-back on eviction
23
+ CacheStreaming // Allocate cache line with evict first policy
24
+ };
25
+
26
+ template <int AccessSize, LoadPolicy policy = LoadPolicy::CacheAll>
27
+ __device__ __forceinline__ void load_global(void* dst, const void* src);
28
+
29
+ template <int AccessSize, LoadPolicy policy = LoadPolicy::CacheAll>
30
+ __device__ __forceinline__ void load_global(void* dst, const void* src, bool do_access);
31
+
32
+ // Shared accesses have no cache policy
33
+ template <int AccessSize>
34
+ __device__ __forceinline__ void load_shared(void* dst, const void* src);
35
+
36
+ template <int AccessSize>
37
+ __device__ __forceinline__ void load_shared(void* dst, const void* src, bool do_access);
38
+
39
+ template <int AccessSize, StorePolicy policy = StorePolicy::Writeback>
40
+ __device__ __forceinline__ void store_global(void* dst, const void* src);
41
+
42
+ // Shared accesses have no cache policy
43
+ template <int AccessSize>
44
+ __device__ __forceinline__ void store_shared(void* dst, const void* src);
45
+
46
+ #ifdef ASYNC_COPY_AVAILABLE
47
+ template <int AccessSize>
48
+ __device__ __forceinline__ void memcpy_async(void* shr, const void* gbl);
49
+
50
+ template <int AccessSize>
51
+ __device__ __forceinline__ void memcpy_async_nop(void* shr, const void* gbl, bool predicate);
52
+
53
+ template <int AccessSize>
54
+ __device__ __forceinline__ void memcpy_async_zero(void* shr, const void* gbl, bool predicate);
55
+
56
+ __device__ __forceinline__ void memcpy_async_fence();
57
+
58
+ template <int stages>
59
+ __device__ __forceinline__ void memcpy_async_wait();
60
+
61
+ template <int stages>
62
+ __device__ __forceinline__ void tail_complete_wait(int remaining_stages);
63
+ #endif
64
+
65
+ // Util for tracking pipeline buffers
66
+ // TODO: Evaluate whether this should also be guarded by ASYNC_COPY_AVAILABLE
67
+ template <int max>
68
+ class BufferTracker {
69
+ public:
70
+ int current_state;
71
+
72
+ __device__ __forceinline__ BufferTracker() : current_state(0) {}
73
+
74
+ __device__ __forceinline__ int get()
75
+ {
76
+ int return_val = current_state++;
77
+ current_state = (current_state == max ? 0 : current_state);
78
+ return return_val;
79
+ }
80
+ };
81
+
82
+ __device__ __forceinline__ uint32_t lane_id()
83
+ {
84
+ #ifdef PTX_AVAILABLE
85
+ unsigned int lane_id;
86
+ asm volatile("mov.u32 %0, %%laneid;" : "=r"(lane_id));
87
+ return lane_id;
88
+ #else
89
+ return threadIdx.x & (warpSize - 1); // Portable
90
+ #endif
91
+ }
92
+
93
+ /////////// Load Global ///////////
94
+ template <>
95
+ __device__ __forceinline__ void load_global<16>(void* dst, const void* src)
96
+ {
97
+ uint4* data = reinterpret_cast<uint4*>(dst);
98
+ #ifdef PTX_AVAILABLE
99
+ asm volatile("ld.global.ca.v4.u32 {%0, %1, %2, %3}, [%4];\n"
100
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
101
+ : "l"(src));
102
+ #else
103
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
104
+ data[0] = src_cast[0];
105
+ #endif
106
+ }
107
+
108
+ template <>
109
+ __device__ __forceinline__ void load_global<16>(void* dst, const void* src, bool do_access)
110
+ {
111
+ uint4* data = reinterpret_cast<uint4*>(dst);
112
+ #ifdef PTX_AVAILABLE
113
+ asm volatile(
114
+ "{\n"
115
+ "\t.reg .pred p;\n"
116
+ "\tsetp.ne.b32 p, %5, 0;\n"
117
+ "\tmov.b32 %0, 0;\n"
118
+ "\tmov.b32 %1, 0;\n"
119
+ "\tmov.b32 %2, 0;\n"
120
+ "\tmov.b32 %3, 0;\n"
121
+ "\t@p ld.global.v4.u32 {%0, %1, %2, %3}, [%4];\n"
122
+ "}\n"
123
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
124
+ : "l"(src), "r"((int)do_access));
125
+ #else
126
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
127
+ if (do_access) {
128
+ data[0] = src_cast[0];
129
+ } else {
130
+ data[0].x = 0;
131
+ data[0].y = 0;
132
+ data[0].z = 0;
133
+ data[0].w = 0;
134
+ }
135
+ #endif
136
+ }
137
+
138
+ template <>
139
+ __device__ __forceinline__ void load_global<16, LoadPolicy::CacheGlobal>(void* dst, const void* src)
140
+ {
141
+ uint4* data = reinterpret_cast<uint4*>(dst);
142
+ #ifdef PTX_AVAILABLE
143
+ asm volatile("ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n"
144
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
145
+ : "l"(src));
146
+ #else
147
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
148
+ data[0] = src_cast[0];
149
+ #endif
150
+ }
151
+
152
+ template <>
153
+ __device__ __forceinline__ void load_global<16, LoadPolicy::CacheGlobal>(void* dst,
154
+ const void* src,
155
+ bool do_access)
156
+ {
157
+ uint4* data = reinterpret_cast<uint4*>(dst);
158
+ #ifdef PTX_AVAILABLE
159
+ asm volatile(
160
+ "{\n"
161
+ "\t.reg .pred p;\n"
162
+ "\tsetp.ne.b32 p, %5, 0;\n"
163
+ "\tmov.b32 %0, 0;\n"
164
+ "\tmov.b32 %1, 0;\n"
165
+ "\tmov.b32 %2, 0;\n"
166
+ "\tmov.b32 %3, 0;\n"
167
+ "\t@p ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n"
168
+ "}\n"
169
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
170
+ : "l"(src), "r"((int)do_access));
171
+ #else
172
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
173
+ if (do_access) {
174
+ data[0] = src_cast[0];
175
+ } else {
176
+ data[0].x = 0;
177
+ data[0].y = 0;
178
+ data[0].z = 0;
179
+ data[0].w = 0;
180
+ }
181
+ #endif
182
+ }
183
+
184
+ template <>
185
+ __device__ __forceinline__ void load_global<16, LoadPolicy::CacheStreaming>(void* dst,
186
+ const void* src)
187
+ {
188
+ uint4* data = reinterpret_cast<uint4*>(dst);
189
+ #ifdef PTX_AVAILABLE
190
+ asm volatile("ld.global.cs.v4.u32 {%0, %1, %2, %3}, [%4];\n"
191
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
192
+ : "l"(src));
193
+ #else
194
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
195
+ data[0] = src_cast[0];
196
+ #endif
197
+ }
198
+
199
+ template <>
200
+ __device__ __forceinline__ void load_global<16, LoadPolicy::CacheStreaming>(void* dst,
201
+ const void* src,
202
+ bool do_access)
203
+ {
204
+ uint4* data = reinterpret_cast<uint4*>(dst);
205
+ #ifdef PTX_AVAILABLE
206
+ asm volatile(
207
+ "{\n"
208
+ "\t.reg .pred p;\n"
209
+ "\tsetp.ne.b32 p, %5, 0;\n"
210
+ "\tmov.b32 %0, 0;\n"
211
+ "\tmov.b32 %1, 0;\n"
212
+ "\tmov.b32 %2, 0;\n"
213
+ "\tmov.b32 %3, 0;\n"
214
+ "\t@p ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n"
215
+ "}\n"
216
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
217
+ : "l"(src), "r"((int)do_access));
218
+ #else
219
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
220
+ if (do_access) {
221
+ data[0] = src_cast[0];
222
+ } else {
223
+ data[0].x = 0;
224
+ data[0].y = 0;
225
+ data[0].z = 0;
226
+ data[0].w = 0;
227
+ }
228
+ #endif
229
+ }
230
+
231
+ template <>
232
+ __device__ __forceinline__ void load_global<8>(void* dst, const void* src)
233
+ {
234
+ uint2* data = reinterpret_cast<uint2*>(dst);
235
+ #ifdef PTX_AVAILABLE
236
+ asm volatile("ld.global.ca.v2.u32 {%0, %1}, [%2];\n"
237
+ : "=r"(data[0].x), "=r"(data[0].y)
238
+ : "l"(src));
239
+ #else
240
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
241
+ data[0] = src_cast[0];
242
+ #endif
243
+ }
244
+
245
+ template <>
246
+ __device__ __forceinline__ void load_global<8>(void* dst, const void* src, bool do_access)
247
+ {
248
+ uint2* data = reinterpret_cast<uint2*>(dst);
249
+ #ifdef PTX_AVAILABLE
250
+ asm volatile(
251
+ "{\n"
252
+ "\t.reg .pred p;\n"
253
+ "\tsetp.ne.b32 p, %3, 0;\n"
254
+ "\tmov.b32 %0, 0;\n"
255
+ "\tmov.b32 %1, 0;\n"
256
+ "\t@p ld.global.v2.u32 {%0, %1}, [%2];\n"
257
+ "}\n"
258
+ : "=r"(data[0].x), "=r"(data[0].y)
259
+ : "l"(src), "r"((int)do_access));
260
+ #else
261
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
262
+ if (do_access) {
263
+ data[0] = src_cast[0];
264
+ } else {
265
+ data[0].x = 0;
266
+ data[0].y = 0;
267
+ }
268
+ #endif
269
+ }
270
+
271
+ template <>
272
+ __device__ __forceinline__ void load_global<8, LoadPolicy::CacheGlobal>(void* dst, const void* src)
273
+ {
274
+ uint2* data = reinterpret_cast<uint2*>(dst);
275
+ #ifdef PTX_AVAILABLE
276
+ asm volatile("ld.global.cg.v2.u32 {%0, %1}, [%2];\n"
277
+ : "=r"(data[0].x), "=r"(data[0].y)
278
+ : "l"(src));
279
+ #else
280
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
281
+ data[0] = src_cast[0];
282
+ #endif
283
+ }
284
+
285
+ template <>
286
+ __device__ __forceinline__ void load_global<8, LoadPolicy::CacheGlobal>(void* dst,
287
+ const void* src,
288
+ bool do_access)
289
+ {
290
+ uint2* data = reinterpret_cast<uint2*>(dst);
291
+ #ifdef PTX_AVAILABLE
292
+ asm volatile(
293
+ "{\n"
294
+ "\t.reg .pred p;\n"
295
+ "\tsetp.ne.b32 p, %3, 0;\n"
296
+ "\tmov.b32 %0, 0;\n"
297
+ "\tmov.b32 %1, 0;\n"
298
+ "\t@p ld.global.cg.v2.u32 {%0, %1}, [%2];\n"
299
+ "}\n"
300
+ : "=r"(data[0].x), "=r"(data[0].y)
301
+ : "l"(src), "r"((int)do_access));
302
+ #else
303
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
304
+ if (do_access) {
305
+ data[0] = src_cast[0];
306
+ } else {
307
+ data[0].x = 0;
308
+ data[0].y = 0;
309
+ }
310
+ #endif
311
+ }
312
+
313
+ template <>
314
+ __device__ __forceinline__ void load_global<8, LoadPolicy::CacheStreaming>(void* dst,
315
+ const void* src)
316
+ {
317
+ uint2* data = reinterpret_cast<uint2*>(dst);
318
+ #ifdef PTX_AVAILABLE
319
+ asm volatile("ld.global.cs.v2.u32 {%0, %1}, [%2];\n"
320
+ : "=r"(data[0].x), "=r"(data[0].y)
321
+ : "l"(src));
322
+ #else
323
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
324
+ data[0] = src_cast[0];
325
+ #endif
326
+ }
327
+
328
+ template <>
329
+ __device__ __forceinline__ void load_global<8, LoadPolicy::CacheStreaming>(void* dst,
330
+ const void* src,
331
+ bool do_access)
332
+ {
333
+ uint2* data = reinterpret_cast<uint2*>(dst);
334
+ #ifdef PTX_AVAILABLE
335
+ asm volatile(
336
+ "{\n"
337
+ "\t.reg .pred p;\n"
338
+ "\tsetp.ne.b32 p, %3, 0;\n"
339
+ "\tmov.b32 %0, 0;\n"
340
+ "\tmov.b32 %1, 0;\n"
341
+ "\t@p ld.global.cs.v2.u32 {%0, %1}, [%2];\n"
342
+ "}\n"
343
+ : "=r"(data[0].x), "=r"(data[0].y)
344
+ : "l"(src), "r"((int)do_access));
345
+ #else
346
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
347
+ if (do_access) {
348
+ data[0] = src_cast[0];
349
+ } else {
350
+ data[0].x = 0;
351
+ data[0].y = 0;
352
+ }
353
+ #endif
354
+ }
355
+
356
+ template <>
357
+ __device__ __forceinline__ void load_global<4>(void* dst, const void* src)
358
+ {
359
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
360
+ #ifdef PTX_AVAILABLE
361
+ asm volatile("ld.global.ca.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src));
362
+ #else
363
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
364
+ data[0] = src_cast[0];
365
+ #endif
366
+ }
367
+
368
+ template <>
369
+ __device__ __forceinline__ void load_global<4>(void* dst, const void* src, bool do_access)
370
+ {
371
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
372
+ #ifdef PTX_AVAILABLE
373
+ asm volatile(
374
+ "{\n"
375
+ "\t.reg .pred p;\n"
376
+ "\tsetp.ne.b32 p, %2, 0;\n"
377
+ "\tmov.b32 %0, 0;\n"
378
+ "\t@p ld.global.u32 {%0}, [%1];\n"
379
+ "}\n"
380
+ : "=r"(data[0])
381
+ : "l"(src), "r"((int)do_access));
382
+ #else
383
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
384
+ if (do_access) {
385
+ data[0] = src_cast[0];
386
+ } else {
387
+ data[0] = 0;
388
+ }
389
+ #endif
390
+ }
391
+
392
+ template <>
393
+ __device__ __forceinline__ void load_global<4, LoadPolicy::CacheGlobal>(void* dst, const void* src)
394
+ {
395
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
396
+ #ifdef PTX_AVAILABLE
397
+ asm volatile("ld.global.cg.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src));
398
+ #else
399
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
400
+ data[0] = src_cast[0];
401
+ #endif
402
+ }
403
+
404
+ template <>
405
+ __device__ __forceinline__ void load_global<4, LoadPolicy::CacheGlobal>(void* dst,
406
+ const void* src,
407
+ bool do_access)
408
+ {
409
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
410
+ #ifdef PTX_AVAILABLE
411
+ asm volatile(
412
+ "{\n"
413
+ "\t.reg .pred p;\n"
414
+ "\tsetp.ne.b32 p, %2, 0;\n"
415
+ "\tmov.b32 %0, 0;\n"
416
+ "\t@p ld.global.cg.u32 {%0}, [%1];\n"
417
+ "}\n"
418
+ : "=r"(data[0])
419
+ : "l"(src), "r"((int)do_access));
420
+ #else
421
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
422
+ if (do_access) {
423
+ data[0] = src_cast[0];
424
+ } else {
425
+ data[0] = 0;
426
+ }
427
+ #endif
428
+ }
429
+
430
+ template <>
431
+ __device__ __forceinline__ void load_global<4, LoadPolicy::CacheStreaming>(void* dst,
432
+ const void* src)
433
+ {
434
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
435
+ #ifdef PTX_AVAILABLE
436
+ asm volatile("ld.global.cs.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src));
437
+ #else
438
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
439
+ data[0] = src_cast[0];
440
+ #endif
441
+ }
442
+
443
+ template <>
444
+ __device__ __forceinline__ void load_global<4, LoadPolicy::CacheStreaming>(void* dst,
445
+ const void* src,
446
+ bool do_access)
447
+ {
448
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
449
+ #ifdef PTX_AVAILABLE
450
+ asm volatile(
451
+ "{\n"
452
+ "\t.reg .pred p;\n"
453
+ "\tsetp.ne.b32 p, %2, 0;\n"
454
+ "\tmov.b32 %0, 0;\n"
455
+ "\t@p ld.global.cs.u32 {%0}, [%1];\n"
456
+ "}\n"
457
+ : "=r"(data[0])
458
+ : "l"(src), "r"((int)do_access));
459
+ #else
460
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
461
+ if (do_access) {
462
+ data[0] = src_cast[0];
463
+ } else {
464
+ data[0] = 0;
465
+ }
466
+ #endif
467
+ }
468
+
469
+ template <>
470
+ __device__ __forceinline__ void load_global<2>(void* dst, const void* src)
471
+ {
472
+ int16_t* data = reinterpret_cast<int16_t*>(dst);
473
+ #ifdef PTX_AVAILABLE
474
+ asm volatile("ld.global.ca.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src));
475
+ #else
476
+ const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
477
+ data[0] = src_cast[0];
478
+ #endif
479
+ }
480
+
481
+ template <>
482
+ __device__ __forceinline__ void load_global<2>(void* dst, const void* src, bool do_access)
483
+ {
484
+ int16_t* data = reinterpret_cast<int16_t*>(dst);
485
+ #ifdef PTX_AVAILABLE
486
+ asm volatile(
487
+ "{\n"
488
+ "\t.reg .pred p;\n"
489
+ "\tsetp.ne.b32 p, %2, 0;\n"
490
+ "\tmov.u16 %0, 0;\n"
491
+ "\t@p ld.global.u16 {%0}, [%1];\n"
492
+ "}\n"
493
+ : "=h"(*data)
494
+ : "l"(src), "r"((int)do_access));
495
+ #else
496
+ const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
497
+ if (do_access) {
498
+ data[0] = src_cast[0];
499
+ } else {
500
+ data[0] = 0;
501
+ }
502
+ #endif
503
+ }
504
+
505
+ template <>
506
+ __device__ __forceinline__ void load_global<2, LoadPolicy::CacheGlobal>(void* dst, const void* src)
507
+ {
508
+ int16_t* data = reinterpret_cast<int16_t*>(dst);
509
+ #ifdef PTX_AVAILABLE
510
+ asm volatile("ld.global.cg.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src));
511
+ #else
512
+ const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
513
+ data[0] = src_cast[0];
514
+ #endif
515
+ }
516
+
517
+ template <>
518
+ __device__ __forceinline__ void load_global<2, LoadPolicy::CacheGlobal>(void* dst,
519
+ const void* src,
520
+ bool do_access)
521
+ {
522
+ int16_t* data = reinterpret_cast<int16_t*>(dst);
523
+ #ifdef PTX_AVAILABLE
524
+ asm volatile(
525
+ "{\n"
526
+ "\t.reg .pred p;\n"
527
+ "\tsetp.ne.b32 p, %2, 0;\n"
528
+ "\tmov.u16 %0, 0;\n"
529
+ "\t@p ld.global.cg.u16 {%0}, [%1];\n"
530
+ "}\n"
531
+ : "=h"(*data)
532
+ : "l"(src), "r"((int)do_access));
533
+ #else
534
+ const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
535
+ if (do_access) {
536
+ data[0] = src_cast[0];
537
+ } else {
538
+ data[0] = 0;
539
+ }
540
+ #endif
541
+ }
542
+
543
+ template <>
544
+ __device__ __forceinline__ void load_global<2, LoadPolicy::CacheStreaming>(void* dst,
545
+ const void* src)
546
+ {
547
+ int16_t* data = reinterpret_cast<int16_t*>(dst);
548
+ #ifdef PTX_AVAILABLE
549
+ asm volatile("ld.global.cs.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src));
550
+ #else
551
+ const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
552
+ data[0] = src_cast[0];
553
+ #endif
554
+ }
555
+
556
+ template <>
557
+ __device__ __forceinline__ void load_global<2, LoadPolicy::CacheStreaming>(void* dst,
558
+ const void* src,
559
+ bool do_access)
560
+ {
561
+ int16_t* data = reinterpret_cast<int16_t*>(dst);
562
+ #ifdef PTX_AVAILABLE
563
+ asm volatile(
564
+ "{\n"
565
+ "\t.reg .pred p;\n"
566
+ "\tsetp.ne.b32 p, %2, 0;\n"
567
+ "\tmov.u16 %0, 0;\n"
568
+ "\t@p ld.global.cs.u16 {%0}, [%1];\n"
569
+ "}\n"
570
+ : "=h"(*data)
571
+ : "l"(src), "r"((int)do_access));
572
+ #else
573
+ const int16_t* src_cast = reinterpret_cast<const int16_t*>(src);
574
+ if (do_access) {
575
+ data[0] = src_cast[0];
576
+ } else {
577
+ data[0] = 0;
578
+ }
579
+ #endif
580
+ }
581
+
582
+ /////////// Load Shared ///////////
583
+ namespace internal {
584
+
585
+ #ifdef PTX_AVAILABLE
586
+ __device__ __forceinline__ unsigned convert_to_shared(const void* ptr)
587
+ {
588
+ #if __CUDACC_VER_MAJOR__ >= 11
589
+ // In CUDA 11 we have a builtin intrinsic
590
+ return __cvta_generic_to_shared(ptr);
591
+ #else
592
+ unsigned ret_val;
593
+ asm volatile(
594
+ "{\n"
595
+ "\t.reg .u64 p1;\n"
596
+ "\tcvta.to.shared.u64 p1, %1\n"
597
+ "\tcvt.u32.u64 %0, p1;\n"
598
+ "}\n"
599
+ : "=r"(ret_val)
600
+ : "l"(ptr));
601
+ return ret_val;
602
+ #endif
603
+ }
604
+ #endif
605
+
606
+ } // namespace internal
607
+
608
+ template <>
609
+ __device__ __forceinline__ void load_shared<16>(void* dst, const void* src)
610
+ {
611
+ uint4* data = reinterpret_cast<uint4*>(dst);
612
+ #ifdef PTX_AVAILABLE
613
+ unsigned src_shr = internal::convert_to_shared(src);
614
+
615
+ asm volatile("ld.shared.v4.u32 {%0, %1, %2, %3}, [%4];\n"
616
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
617
+ : "r"(src_shr));
618
+ #else
619
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
620
+ data[0] = src_cast[0];
621
+ #endif
622
+ }
623
+
624
+ template <>
625
+ __device__ __forceinline__ void load_shared<16>(void* dst, const void* src, bool do_access)
626
+ {
627
+ uint4* data = reinterpret_cast<uint4*>(dst);
628
+ #ifdef PTX_AVAILABLE
629
+ unsigned src_shr = internal::convert_to_shared(src);
630
+
631
+ asm volatile(
632
+ "{\n"
633
+ "\t.reg .pred p;\n"
634
+ "\tsetp.ne.b32 p, %5, 0;\n"
635
+ "\tmov.b32 %0, 0;\n"
636
+ "\tmov.b32 %1, 0;\n"
637
+ "\tmov.b32 %2, 0;\n"
638
+ "\tmov.b32 %3, 0;\n"
639
+ "\t@p ld.shared.v4.u32 {%0, %1, %2, %3}, [%4];\n"
640
+ "}\n"
641
+ : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w)
642
+ : "r"(src_shr), "r"((int)do_access));
643
+ #else
644
+ const uint4* src_cast = reinterpret_cast<const uint4*>(src);
645
+ if (do_access) {
646
+ data[0] = src_cast[0];
647
+ } else {
648
+ data[0].x = 0;
649
+ data[0].y = 0;
650
+ data[0].z = 0;
651
+ data[0].w = 0;
652
+ }
653
+ #endif
654
+ }
655
+
656
+ template <>
657
+ __device__ __forceinline__ void load_shared<8>(void* dst, const void* src)
658
+ {
659
+ uint2* data = reinterpret_cast<uint2*>(dst);
660
+ #ifdef PTX_AVAILABLE
661
+ unsigned src_shr = internal::convert_to_shared(src);
662
+
663
+ asm volatile("ld.shared.v2.u32 {%0, %1}, [%2];\n"
664
+ : "=r"(data[0].x), "=r"(data[0].y)
665
+ : "r"(src_shr));
666
+ #else
667
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
668
+ data[0] = src_cast[0];
669
+ #endif
670
+ }
671
+
672
+ template <>
673
+ __device__ __forceinline__ void load_shared<8>(void* dst, const void* src, bool do_access)
674
+ {
675
+ uint2* data = reinterpret_cast<uint2*>(dst);
676
+ #ifdef PTX_AVAILABLE
677
+ unsigned src_shr = internal::convert_to_shared(src);
678
+
679
+ asm volatile(
680
+ "{\n"
681
+ "\t.reg .pred p;\n"
682
+ "\tsetp.ne.b32 p, %3, 0;\n"
683
+ "\tmov.b32 %0, 0;\n"
684
+ "\tmov.b32 %1, 0;\n"
685
+ "\t@p ld.shared.v2.u32 {%0, %1}, [%2];\n"
686
+ "}\n"
687
+ : "=r"(data[0].x), "=r"(data[0].y)
688
+ : "r"(src_shr), "r"((int)do_access));
689
+ #else
690
+ const uint2* src_cast = reinterpret_cast<const uint2*>(src);
691
+ if (do_access) {
692
+ data[0] = src_cast[0];
693
+ } else {
694
+ data[0].x = 0;
695
+ data[0].y = 0;
696
+ }
697
+ #endif
698
+ }
699
+
700
+ template <>
701
+ __device__ __forceinline__ void load_shared<4>(void* dst, const void* src)
702
+ {
703
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
704
+ #ifdef PTX_AVAILABLE
705
+ unsigned src_shr = internal::convert_to_shared(src);
706
+
707
+ asm volatile("ld.shared.u32 {%0}, [%1];\n" : "=r"(*data) : "r"(src_shr));
708
+ #else
709
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
710
+ data[0] = src_cast[0];
711
+ #endif
712
+ }
713
+
714
+ template <>
715
+ __device__ __forceinline__ void load_shared<4>(void* dst, const void* src, bool do_access)
716
+ {
717
+ int32_t* data = reinterpret_cast<int32_t*>(dst);
718
+ #ifdef PTX_AVAILABLE
719
+ unsigned src_shr = internal::convert_to_shared(src);
720
+
721
+ asm volatile(
722
+ "{\n"
723
+ "\t.reg .pred p;\n"
724
+ "\tsetp.ne.b32 p, %2, 0;\n"
725
+ "\tmov.b32 %0, 0;\n"
726
+ "\t@p ld.shared.u32 %0, [%1];\n"
727
+ "}\n"
728
+ : "=r"(data[0])
729
+ : "r"(src_shr), "r"((int)do_access));
730
+ #else
731
+ const int32_t* src_cast = reinterpret_cast<const int32_t*>(src);
732
+ if (do_access) {
733
+ data[0] = src_cast[0];
734
+ } else {
735
+ data[0] = 0;
736
+ }
737
+ #endif
738
+ }
739
+
740
+ /////////// Store Global ///////////
741
+
742
+ template <>
743
+ __device__ __forceinline__ void store_global<16>(void* dst, const void* src)
744
+ {
745
+ const uint4* data = reinterpret_cast<const uint4*>(src);
746
+ #ifdef PTX_AVAILABLE
747
+ asm volatile("st.global.wb.v4.u32 [%0], {%1, %2, %3, %4};\n"
748
+ :
749
+ : "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w)
750
+ : "memory");
751
+ #else
752
+ uint4* dst_cast = reinterpret_cast<uint4*>(dst);
753
+ dst_cast[0] = data[0];
754
+ #endif
755
+ }
756
+
757
+ template <>
758
+ __device__ __forceinline__ void store_global<16, StorePolicy::CacheGlobal>(void* dst,
759
+ const void* src)
760
+ {
761
+ const uint4* data = reinterpret_cast<const uint4*>(src);
762
+ #ifdef PTX_AVAILABLE
763
+ asm volatile("st.global.cg.v4.u32 [%0], {%1, %2, %3, %4};\n"
764
+ :
765
+ : "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w)
766
+ : "memory");
767
+ #else
768
+ uint4* dst_cast = reinterpret_cast<uint4*>(dst);
769
+ dst_cast[0] = data[0];
770
+ #endif
771
+ }
772
+
773
+ template <>
774
+ __device__ __forceinline__ void store_global<16, StorePolicy::CacheStreaming>(void* dst,
775
+ const void* src)
776
+ {
777
+ const uint4* data = reinterpret_cast<const uint4*>(src);
778
+ #ifdef PTX_AVAILABLE
779
+ asm volatile("st.global.cs.v4.u32 [%0], {%1, %2, %3, %4};\n"
780
+ :
781
+ : "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w)
782
+ : "memory");
783
+ #else
784
+ uint4* dst_cast = reinterpret_cast<uint4*>(dst);
785
+ dst_cast[0] = data[0];
786
+ #endif
787
+ }
788
+
789
+ template <>
790
+ __device__ __forceinline__ void store_global<8>(void* dst, const void* src)
791
+ {
792
+ const uint2* data = reinterpret_cast<const uint2*>(src);
793
+ #ifdef PTX_AVAILABLE
794
+ asm volatile("st.global.wb.v2.u32 [%0], {%1, %2};\n"
795
+ :
796
+ : "l"(dst), "r"(data[0].x), "r"(data[0].y));
797
+ #else
798
+ uint2* dst_cast = reinterpret_cast<uint2*>(dst);
799
+ dst_cast[0] = data[0];
800
+ #endif
801
+ }
802
+
803
+ template <>
804
+ __device__ __forceinline__ void store_global<8, StorePolicy::CacheGlobal>(void* dst,
805
+ const void* src)
806
+ {
807
+ const uint2* data = reinterpret_cast<const uint2*>(src);
808
+ #ifdef PTX_AVAILABLE
809
+ asm volatile("st.global.cg.v2.u32 [%0], {%1, %2};\n"
810
+ :
811
+ : "l"(dst), "r"(data[0].x), "r"(data[0].y));
812
+ #else
813
+ uint2* dst_cast = reinterpret_cast<uint2*>(dst);
814
+ dst_cast[0] = data[0];
815
+ #endif
816
+ }
817
+
818
+ template <>
819
+ __device__ __forceinline__ void store_global<8, StorePolicy::CacheStreaming>(void* dst,
820
+ const void* src)
821
+ {
822
+ const uint2* data = reinterpret_cast<const uint2*>(src);
823
+ #ifdef PTX_AVAILABLE
824
+ asm volatile("st.global.cs.v2.u32 [%0], {%1, %2};\n"
825
+ :
826
+ : "l"(dst), "r"(data[0].x), "r"(data[0].y));
827
+ #else
828
+ uint2* dst_cast = reinterpret_cast<uint2*>(dst);
829
+ dst_cast[0] = data[0];
830
+ #endif
831
+ }
832
+
833
+ template <>
834
+ __device__ __forceinline__ void store_global<4>(void* dst, const void* src)
835
+ {
836
+ const int32_t* data = reinterpret_cast<const int32_t*>(src);
837
+ #ifdef PTX_AVAILABLE
838
+ asm volatile("st.global.wb.u32 [%0], %1;\n" : : "l"(dst), "r"(*data));
839
+ #else
840
+ int32_t* dst_cast = reinterpret_cast<int32_t*>(dst);
841
+ dst_cast[0] = data[0];
842
+ #endif
843
+ }
844
+
845
+ template <>
846
+ __device__ __forceinline__ void store_global<4, StorePolicy::CacheGlobal>(void* dst,
847
+ const void* src)
848
+ {
849
+ const int32_t* data = reinterpret_cast<const int32_t*>(src);
850
+ #ifdef PTX_AVAILABLE
851
+ asm volatile("st.global.cg.u32 [%0], %1;\n" : : "l"(dst), "r"(*data));
852
+ #else
853
+ int32_t* dst_cast = reinterpret_cast<int32_t*>(dst);
854
+ dst_cast[0] = data[0];
855
+ #endif
856
+ }
857
+
858
+ template <>
859
+ __device__ __forceinline__ void store_global<4, StorePolicy::CacheStreaming>(void* dst,
860
+ const void* src)
861
+ {
862
+ const int32_t* data = reinterpret_cast<const int32_t*>(src);
863
+ #ifdef PTX_AVAILABLE
864
+ asm volatile("st.global.cs.u32 [%0], %1;\n" : : "l"(dst), "r"(*data));
865
+ #else
866
+ int32_t* dst_cast = reinterpret_cast<int32_t*>(dst);
867
+ dst_cast[0] = data[0];
868
+ #endif
869
+ }
870
+
871
+ /////////// Store Shared ///////////
872
+
873
+ template <>
874
+ __device__ __forceinline__ void store_shared<16>(void* dst, const void* src)
875
+ {
876
+ const uint4* data = reinterpret_cast<const uint4*>(src);
877
+ #ifdef PTX_AVAILABLE
878
+ unsigned dst_int = internal::convert_to_shared(dst);
879
+
880
+ asm volatile("st.shared.v4.u32 [%0], {%1, %2, %3, %4};\n"
881
+ :
882
+ : "r"(dst_int), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w));
883
+ #else
884
+ uint4* dst_cast = reinterpret_cast<uint4*>(dst);
885
+ dst_cast[0] = data[0];
886
+ #endif
887
+ }
888
+
889
+ template <>
890
+ __device__ __forceinline__ void store_shared<8>(void* dst, const void* src)
891
+ {
892
+ const uint2* data = reinterpret_cast<const uint2*>(src);
893
+ #ifdef PTX_AVAILABLE
894
+ unsigned dst_int = internal::convert_to_shared(dst);
895
+
896
+ asm volatile("st.shared.v2.u32 [%0], {%1, %2};\n"
897
+ :
898
+ : "r"(dst_int), "r"(data[0].x), "r"(data[0].y));
899
+ #else
900
+ uint2* dst_cast = reinterpret_cast<uint2*>(dst);
901
+ dst_cast[0] = data[0];
902
+ #endif
903
+ }
904
+
905
+ template <>
906
+ __device__ __forceinline__ void store_shared<4>(void* dst, const void* src)
907
+ {
908
+ const int32_t* data = reinterpret_cast<const int32_t*>(src);
909
+ #ifdef PTX_AVAILABLE
910
+ unsigned dst_int = internal::convert_to_shared(dst);
911
+
912
+ asm volatile("st.shared.u32 [%0], %1;\n" : : "r"(dst_int), "r"(*data));
913
+ #else
914
+ int32_t* dst_cast = reinterpret_cast<int32_t*>(dst);
915
+ dst_cast[0] = data[0];
916
+ #endif
917
+ }
918
+
919
+ /////////// Asynchronous Memory Copy ///////////
920
+
921
+ #ifdef ASYNC_COPY_AVAILABLE
922
+ template <int AccessSize>
923
+ __device__ __forceinline__ void memcpy_async(void* shr, const void* gbl)
924
+ {
925
+ static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16));
926
+ unsigned shr_int = internal::convert_to_shared(shr);
927
+
928
+ asm volatile("cp.async.ca.shared.global [%0], [%1], %2;\n"
929
+ :
930
+ : "r"(shr_int), "l"(gbl), "n"(AccessSize));
931
+ }
932
+
933
+ template <int AccessSize>
934
+ __device__ __forceinline__ void memcpy_async_nop(void* shr, const void* gbl, bool predicate)
935
+ {
936
+ static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16));
937
+ unsigned shr_int = internal::convert_to_shared(shr);
938
+
939
+ asm volatile(
940
+ "{\n"
941
+ " .reg .pred p;\n"
942
+ " setp.ne.b32 p, %0, 0;\n"
943
+ " @p cp.async.ca.shared.global [%1], [%2], %3;\n"
944
+ "}\n"
945
+ :
946
+ : "r"((int)predicate), "r"(shr_int), "l"(gbl), "n"(AccessSize));
947
+ }
948
+
949
+ template <int AccessSize>
950
+ __device__ __forceinline__ void memcpy_async_zero(void* shr, const void* gbl, bool predicate)
951
+ {
952
+ static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16));
953
+ unsigned shr_int = internal::convert_to_shared(shr);
954
+ int bytes_to_copy = (predicate ? AccessSize : 0);
955
+
956
+ asm volatile("cp.async.ca.shared.global [%0], [%1], %2, %3;\n"
957
+ :
958
+ : "r"(shr_int), "l"(gbl), "n"(AccessSize), "r"(bytes_to_copy));
959
+ }
960
+
961
+ template <int AccessSize>
962
+ __device__ __forceinline__ void memcpy_async_zero_nop(void* shr,
963
+ const void* gbl,
964
+ bool zero_predicate,
965
+ bool nop_predicate)
966
+ {
967
+ static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16));
968
+ unsigned shr_int = internal::convert_to_shared(shr);
969
+ int bytes_to_copy = (zero_predicate ? AccessSize : 0);
970
+
971
+ asm volatile(
972
+ "{\n"
973
+ " .reg .pred p;\n"
974
+ " setp.ne.b32 p, %0, 0;\n"
975
+ " @p cp.async.ca.shared.global [%1], [%2], %3, %4;\n"
976
+ "}\n"
977
+ :
978
+ : "r"((int)nop_predicate), "r"(shr_int), "l"(gbl), "n"(AccessSize), "r"(bytes_to_copy));
979
+ }
980
+
981
+ // Cache global variants. Separate interface to require deliberate use of them.
982
+ __device__ __forceinline__ void memcpy_async_cg(void* shr, const void* gbl)
983
+ {
984
+ unsigned shr_int = internal::convert_to_shared(shr);
985
+
986
+ asm volatile("cp.async.cg.shared.global [%0], [%1], 16;\n" : : "r"(shr_int), "l"(gbl));
987
+ }
988
+
989
+ __device__ __forceinline__ void memcpy_async_nop_cg(void* shr, const void* gbl, bool predicate)
990
+ {
991
+ unsigned shr_int = internal::convert_to_shared(shr);
992
+
993
+ asm volatile(
994
+ "{\n"
995
+ " .reg .pred p;\n"
996
+ " setp.ne.b32 p, %0, 0;\n"
997
+ " @p cp.async.cg.shared.global [%1], [%2], 16;\n"
998
+ "}\n"
999
+ :
1000
+ : "r"((int)predicate), "r"(shr_int), "l"(gbl));
1001
+ }
1002
+
1003
+ __device__ __forceinline__ void memcpy_async_zero_cg(void* shr, const void* gbl, bool predicate)
1004
+ {
1005
+ unsigned shr_int = internal::convert_to_shared(shr);
1006
+ int bytes_to_copy = (predicate ? 16 : 0);
1007
+
1008
+ asm volatile("cp.async.cg.shared.global [%0], [%1], 16, %2;\n"
1009
+ :
1010
+ : "r"(shr_int), "l"(gbl), "r"(bytes_to_copy));
1011
+ }
1012
+
1013
+ __device__ __forceinline__ void memcpy_async_zero_nop_cg(void* shr,
1014
+ const void* gbl,
1015
+ bool zero_predicate,
1016
+ bool nop_predicate)
1017
+ {
1018
+ unsigned shr_int = internal::convert_to_shared(shr);
1019
+ int bytes_to_copy = (zero_predicate ? 16 : 0);
1020
+
1021
+ asm volatile(
1022
+ "{\n"
1023
+ " .reg .pred p;\n"
1024
+ " setp.ne.b32 p, %0, 0;\n"
1025
+ " @p cp.async.cg.shared.global [%1], [%2], 16, %3;\n"
1026
+ "}\n"
1027
+ :
1028
+ : "r"((int)nop_predicate), "r"(shr_int), "l"(gbl), "r"(bytes_to_copy));
1029
+ }
1030
+
1031
+ __device__ __forceinline__ void memcpy_async_fence() { asm volatile("cp.async.commit_group;\n"); }
1032
+
1033
+ template <int stages>
1034
+ __device__ __forceinline__ void memcpy_async_wait()
1035
+ {
1036
+ static_assert(stages <= 8);
1037
+
1038
+ asm volatile("cp.async.wait_group %0;\n" : : "n"(stages));
1039
+ }
1040
+
1041
+ // TODO: The tail complete should be a known compile time artifact, should try and induce this
1042
+ // without all of the branches from the call-site. This is a hacky solution.
1043
+ template <>
1044
+ __device__ __forceinline__ void tail_complete_wait<1>(int remaining_stages)
1045
+ {
1046
+ if (remaining_stages == 0) memcpy_async_wait<0>();
1047
+ }
1048
+
1049
+ template <>
1050
+ __device__ __forceinline__ void tail_complete_wait<2>(int remaining_stages)
1051
+ {
1052
+ if (remaining_stages == 1)
1053
+ memcpy_async_wait<1>();
1054
+ else if (remaining_stages == 0)
1055
+ memcpy_async_wait<0>();
1056
+ }
1057
+
1058
+ template <>
1059
+ __device__ __forceinline__ void tail_complete_wait<3>(int remaining_stages)
1060
+ {
1061
+ if (remaining_stages == 2)
1062
+ memcpy_async_wait<2>();
1063
+ else if (remaining_stages == 1)
1064
+ memcpy_async_wait<1>();
1065
+ else if (remaining_stages == 0)
1066
+ memcpy_async_wait<0>();
1067
+ }
1068
+
1069
+ template <>
1070
+ __device__ __forceinline__ void tail_complete_wait<4>(int remaining_stages)
1071
+ {
1072
+ if (remaining_stages == 3)
1073
+ memcpy_async_wait<3>();
1074
+ else if (remaining_stages == 2)
1075
+ memcpy_async_wait<2>();
1076
+ else if (remaining_stages == 1)
1077
+ memcpy_async_wait<1>();
1078
+ else if (remaining_stages == 0)
1079
+ memcpy_async_wait<0>();
1080
+ }
1081
+
1082
+ template <>
1083
+ __device__ __forceinline__ void tail_complete_wait<5>(int remaining_stages)
1084
+ {
1085
+ if (remaining_stages == 4)
1086
+ memcpy_async_wait<4>();
1087
+ else if (remaining_stages == 3)
1088
+ memcpy_async_wait<3>();
1089
+ else if (remaining_stages == 2)
1090
+ memcpy_async_wait<2>();
1091
+ else if (remaining_stages == 1)
1092
+ memcpy_async_wait<1>();
1093
+ else if (remaining_stages == 0)
1094
+ memcpy_async_wait<0>();
1095
+ }
1096
+
1097
+ template <>
1098
+ __device__ __forceinline__ void tail_complete_wait<6>(int remaining_stages)
1099
+ {
1100
+ if (remaining_stages == 5)
1101
+ memcpy_async_wait<5>();
1102
+ else if (remaining_stages == 4)
1103
+ memcpy_async_wait<4>();
1104
+ else if (remaining_stages == 3)
1105
+ memcpy_async_wait<3>();
1106
+ else if (remaining_stages == 2)
1107
+ memcpy_async_wait<2>();
1108
+ else if (remaining_stages == 1)
1109
+ memcpy_async_wait<1>();
1110
+ else if (remaining_stages == 0)
1111
+ memcpy_async_wait<0>();
1112
+ }
1113
+ #endif
1114
+
1115
+ } // namespace mem_access
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/reduction_utils.h ADDED
@@ -0,0 +1,778 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include "conversion_utils.h"
9
+ #include "ds_kernel_utils.h"
10
+ #include "memory_access_utils.h"
11
+
12
+ namespace cg = cooperative_groups;
13
+
14
+ namespace reduce {
15
+
16
+ enum class ROpType {
17
+ // Addition
18
+ Add,
19
+
20
+ // Maximum reduction
21
+ Max,
22
+
23
+ // Minimum reduction
24
+ Min,
25
+ };
26
+
27
+ constexpr int max_threads = 1024;
28
+ constexpr int max_warps = max_threads / hw_warp_size;
29
+
30
+ /*
31
+ High level API. The API takes in a set of operations and variables
32
+ and performs that reduction operation on that variable. The reductions
33
+ of each of the arguments are completely independent of each other (
34
+ i.e., the val1-op1 combination has no impact on val2-op2).
35
+
36
+ Example usage:
37
+ ``` cpp
38
+ float max_val;
39
+ float min_val;
40
+ reduce::block<rop::Max, rop::Min>(tb, warp, max_val, min_val);
41
+ ```
42
+
43
+ TODO(cmikeh2): In theory, we might be able to do this sequentially with
44
+ device functions and rely on the assembler correctly behaving. My initial
45
+ instinct is this won't work, but if it does it would reduce implementation
46
+ cost significantly.
47
+
48
+ TODO(cmikeh2): We need to support sub-block reductions. The warp intrinsic
49
+ currently supports this (more incidentally than anything else). It is not
50
+ uncommon in something like softmax or a fused attention kernel to map multiple
51
+ reductions to a thread block, but each reduction itself is only scoped
52
+ to part of the threads (i.e block size = 512, 128 threads per reduction).
53
+ */
54
+ template <ROpType Op, int warp_bound = max_warps>
55
+ DS_D_INLINE void block(cg::thread_block& tb, cg::thread_block_tile<hw_warp_size>& warp, float& val);
56
+
57
+ template <ROpType Op1, ROpType Op2, int warp_bound = max_warps>
58
+ DS_D_INLINE void block(cg::thread_block& tb,
59
+ cg::thread_block_tile<hw_warp_size>& warp,
60
+ float& val1,
61
+ float& val2);
62
+
63
+ template <ROpType Op1, ROpType Op2, ROpType Op3, int warp_bound = max_warps>
64
+ DS_D_INLINE void block(cg::thread_block& tb,
65
+ cg::thread_block_tile<hw_warp_size>& warp,
66
+ float& val1,
67
+ float& val2,
68
+ float& val3);
69
+
70
+ template <ROpType Op1, ROpType Op2, ROpType Op3, ROpType Op4, int warp_bound = max_warps>
71
+ DS_D_INLINE void block(cg::thread_block& tb,
72
+ cg::thread_block_tile<hw_warp_size>& warp,
73
+ float& val1,
74
+ float& val2,
75
+ float& val3,
76
+ float& val4);
77
+
78
+ /*
79
+ The partitioned block is a special case of the above where in the warps of a threadblock are
80
+ partitioned into separate independent reductions. For example, I might have an 8 warp thread block
81
+ in which each pair of warps is processing an independent piece of data. I would then reduce that
82
+ data with the something like the following:
83
+ ``` cpp
84
+ float max_val;
85
+ reduce::partitioned_block<rop::Max, 2>(tb, warp, max_val);
86
+ ```
87
+ After which, each pair of warps would have coherent data with each other. Note, this API will not
88
+ provide correct results if the number of warps per partition is not a power of 2.
89
+ */
90
+ template <ROpType Op, int num_threads>
91
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
92
+ cg::thread_block_tile<hw_warp_size>& warp,
93
+ float& val);
94
+
95
+ template <ROpType Op1, ROpType Op2, int num_threads>
96
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
97
+ cg::thread_block_tile<hw_warp_size>& warp,
98
+ float& val1,
99
+ float& val2);
100
+
101
+ template <ROpType Op1, ROpType Op2, ROpType Op3, int num_threads>
102
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
103
+ cg::thread_block_tile<hw_warp_size>& warp,
104
+ float& val1,
105
+ float& val2,
106
+ float& val3);
107
+
108
+ template <ROpType Op1, ROpType Op2, ROpType Op3, ROpType Op4, int num_threads>
109
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
110
+ cg::thread_block_tile<hw_warp_size>& warp,
111
+ float& val1,
112
+ float& val2,
113
+ float& val3,
114
+ float& val4);
115
+
116
+ /*
117
+ Single element reduction primitives. Used inside serial collection
118
+ loops.
119
+
120
+ Example usage:
121
+ using rop = reduce::OpType;
122
+ float min = init<rop::Min>();
123
+ for (int i = 0; i < 4; i++) {
124
+ min = reduce::element<rop::Min>(min, data[i]);
125
+ }
126
+ */
127
+
128
+ template <ROpType Op, typename T>
129
+ DS_D_INLINE T element(const T lhs, const T rhs);
130
+
131
+ template <ROpType OType, typename T = float>
132
+ DS_D_INLINE T init();
133
+
134
+ /********************** Internal reduction APIs **********************/
135
+
136
+ /*
137
+ Single element "reductions". TODO(cmikeh2): this sort of "op" concept
138
+ should be refactored into its own implementation at some point. This interface
139
+ may be easily expanded for new types/operations, but the typical reductions
140
+ we need are covered with min/max/add on float.
141
+
142
+ NOTE: there is no mean reduction because that relies on knowledge of how
143
+ many values were already reduced into each scalar. Implementing this on top
144
+ of reduce should be straightforward (can just wrap the sum reduction) and
145
+ would be a good extension of the header.
146
+ */
147
+
148
+ DS_D_INLINE int _warp_rank()
149
+ {
150
+ const int thread_rank =
151
+ threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y;
152
+ return thread_rank / hw_warp_size;
153
+ }
154
+
155
+ /* Float element reduce implementations */
156
+ template <>
157
+ DS_D_INLINE float element<ROpType::Add>(const float lhs, const float rhs)
158
+ {
159
+ return lhs + rhs;
160
+ }
161
+
162
+ template <>
163
+ DS_D_INLINE float element<ROpType::Max>(const float lhs, const float rhs)
164
+ {
165
+ return fmaxf(lhs, rhs);
166
+ }
167
+
168
+ template <>
169
+ DS_D_INLINE float element<ROpType::Min>(const float lhs, const float rhs)
170
+ {
171
+ return fminf(lhs, rhs);
172
+ }
173
+
174
+ /* __half element reduce implementation */
175
+ template <>
176
+ DS_D_INLINE __half element<ROpType::Add>(const __half lhs, const __half rhs)
177
+ {
178
+ return lhs + rhs;
179
+ }
180
+
181
+ template <>
182
+ DS_D_INLINE __half element<ROpType::Max>(const __half lhs, const __half rhs)
183
+ {
184
+ #if __CUDA_ARCH__ >= 800
185
+ // Intrinsic limited to Ampere + newer
186
+ return __hmax(lhs, rhs);
187
+ #else
188
+ return (lhs > rhs) ? lhs : rhs;
189
+ #endif
190
+ }
191
+
192
+ template <>
193
+ DS_D_INLINE __half element<ROpType::Min>(const __half lhs, const __half rhs)
194
+ {
195
+ #if __CUDA_ARCH__ >= 800
196
+ // Intrinsic limited to Ampere + newer
197
+ return __hmin(lhs, rhs);
198
+ #else
199
+ return (lhs < rhs) ? lhs : rhs;
200
+ #endif
201
+ }
202
+
203
+ /* __half2 element reduce implementation */
204
+ template <>
205
+ DS_D_INLINE __half2 element<ROpType::Add>(const __half2 lhs, const __half2 rhs)
206
+ {
207
+ return lhs + rhs;
208
+ }
209
+
210
+ template <>
211
+ DS_D_INLINE __half2 element<ROpType::Max>(const __half2 lhs, const __half2 rhs)
212
+ {
213
+ #if __CUDA_ARCH__ >= 800
214
+ return __hmax2(lhs, rhs);
215
+ #else
216
+ __half2 ret_val;
217
+ ret_val.x = (lhs.x > rhs.x) ? lhs.x : rhs.x;
218
+ ret_val.y = (lhs.y > rhs.y) ? lhs.y : rhs.y;
219
+ return ret_val;
220
+ #endif
221
+ }
222
+
223
+ template <>
224
+ DS_D_INLINE __half2 element<ROpType::Min>(const __half2 lhs, const __half2 rhs)
225
+ {
226
+ #if __CUDA_ARCH__ >= 800
227
+ return __hmin2(lhs, rhs);
228
+ #else
229
+ __half2 ret_val;
230
+ ret_val.x = (lhs.x < rhs.x) ? lhs.x : rhs.x;
231
+ ret_val.y = (lhs.y < rhs.y) ? lhs.y : rhs.y;
232
+ return ret_val;
233
+ #endif
234
+ }
235
+
236
+ template <>
237
+ DS_D_INLINE int32_t element<ROpType::Add>(const int32_t lhs, const int32_t rhs)
238
+ {
239
+ return lhs + rhs;
240
+ }
241
+
242
+ template <>
243
+ DS_D_INLINE int32_t element<ROpType::Max>(const int32_t lhs, const int32_t rhs)
244
+ {
245
+ return (lhs > rhs) ? lhs : rhs;
246
+ }
247
+
248
+ template <>
249
+ DS_D_INLINE int32_t element<ROpType::Min>(const int32_t lhs, const int32_t rhs)
250
+ {
251
+ return (lhs < rhs) ? lhs : rhs;
252
+ }
253
+
254
+ template <>
255
+ DS_D_INLINE uint32_t element<ROpType::Add>(const uint32_t lhs, const uint32_t rhs)
256
+ {
257
+ return lhs + rhs;
258
+ }
259
+
260
+ template <>
261
+ DS_D_INLINE uint32_t element<ROpType::Max>(const uint32_t lhs, const uint32_t rhs)
262
+ {
263
+ return (lhs > rhs) ? lhs : rhs;
264
+ }
265
+
266
+ template <>
267
+ DS_D_INLINE uint32_t element<ROpType::Min>(const uint32_t lhs, const uint32_t rhs)
268
+ {
269
+ return (lhs < rhs) ? lhs : rhs;
270
+ }
271
+
272
+ template <>
273
+ DS_D_INLINE int64_t element<ROpType::Add>(const int64_t lhs, const int64_t rhs)
274
+ {
275
+ return lhs + rhs;
276
+ }
277
+
278
+ template <>
279
+ DS_D_INLINE int64_t element<ROpType::Max>(const int64_t lhs, const int64_t rhs)
280
+ {
281
+ return (lhs > rhs) ? lhs : rhs;
282
+ }
283
+
284
+ template <>
285
+ DS_D_INLINE int64_t element<ROpType::Min>(const int64_t lhs, const int64_t rhs)
286
+ {
287
+ return (lhs < rhs) ? lhs : rhs;
288
+ }
289
+
290
+ /*
291
+ Reduction initialization primitives
292
+ */
293
+ template <>
294
+ DS_D_INLINE float init<ROpType::Add>()
295
+ {
296
+ return 0.0f;
297
+ }
298
+
299
+ template <>
300
+ DS_D_INLINE float init<ROpType::Min>()
301
+ {
302
+ // Positive infinity
303
+ return INFINITY;
304
+ }
305
+
306
+ template <>
307
+ DS_D_INLINE float init<ROpType::Max>()
308
+ {
309
+ // Negative infinity
310
+ return -INFINITY;
311
+ }
312
+
313
+ template <>
314
+ DS_D_INLINE __half init<ROpType::Add>()
315
+ {
316
+ constexpr __half_raw zero = {0x0000};
317
+ return __half(zero);
318
+ }
319
+
320
+ template <>
321
+ DS_D_INLINE __half init<ROpType::Min>()
322
+ {
323
+ constexpr __half_raw inf = {0x7C00};
324
+ return __half(inf);
325
+ }
326
+
327
+ template <>
328
+ DS_D_INLINE __half init<ROpType::Max>()
329
+ {
330
+ constexpr __half_raw neg_inf = {0xFC00};
331
+ return __half(neg_inf);
332
+ }
333
+
334
+ template <>
335
+ DS_D_INLINE __half2 init<ROpType::Add>()
336
+ {
337
+ #ifdef __HIP_PLATFORM_AMD__
338
+ return __half2{_Float16_2{0x0000, 0x0000}};
339
+ #else
340
+ constexpr __half2_raw zero = {0x0000, 0x0000};
341
+ return __half2(zero);
342
+ #endif
343
+ }
344
+
345
+ template <>
346
+ DS_D_INLINE __half2 init<ROpType::Min>()
347
+ {
348
+ #ifdef __HIP_PLATFORM_AMD__
349
+ return __half2{_Float16_2{0x7C00, 0x7C00}};
350
+ #else
351
+ constexpr __half2_raw inf = {0x7C00, 0x7C00};
352
+ return __half2(inf);
353
+ #endif
354
+ }
355
+
356
+ template <>
357
+ DS_D_INLINE __half2 init<ROpType::Max>()
358
+ {
359
+ #ifdef __HIP_PLATFORM_AMD__
360
+ return __half2{_Float16_2{0xFC00, 0xFC00}};
361
+ #else
362
+ constexpr __half2_raw neg_inf = {0xFC00, 0xFC00};
363
+ return __half2(neg_inf);
364
+ #endif
365
+ }
366
+
367
+ template <>
368
+ DS_D_INLINE int32_t init<ROpType::Add>()
369
+ {
370
+ return 0;
371
+ }
372
+
373
+ template <>
374
+ DS_D_INLINE int32_t init<ROpType::Min>()
375
+ {
376
+ return 0x7FFFFFFF;
377
+ }
378
+
379
+ template <>
380
+ DS_D_INLINE int32_t init<ROpType::Max>()
381
+ {
382
+ return 0x80000000;
383
+ }
384
+
385
+ template <>
386
+ DS_D_INLINE uint32_t init<ROpType::Add>()
387
+ {
388
+ return 0;
389
+ }
390
+
391
+ template <>
392
+ DS_D_INLINE uint32_t init<ROpType::Min>()
393
+ {
394
+ return 0xFFFFFFFF;
395
+ }
396
+
397
+ template <>
398
+ DS_D_INLINE uint32_t init<ROpType::Max>()
399
+ {
400
+ return 0;
401
+ }
402
+
403
+ template <>
404
+ DS_D_INLINE int64_t init<ROpType::Add>()
405
+ {
406
+ return 0;
407
+ }
408
+
409
+ template <>
410
+ DS_D_INLINE int64_t init<ROpType::Min>()
411
+ {
412
+ return 0x7FFFFFFFFFFFFFFF;
413
+ }
414
+
415
+ template <>
416
+ DS_D_INLINE int64_t init<ROpType::Max>()
417
+ {
418
+ return 0x8000000000000000;
419
+ }
420
+
421
+ template <>
422
+ DS_D_INLINE uint64_t init<ROpType::Add>()
423
+ {
424
+ return 0;
425
+ }
426
+
427
+ template <>
428
+ DS_D_INLINE uint64_t init<ROpType::Min>()
429
+ {
430
+ return 0xFFFFFFFFFFFFFFFF;
431
+ }
432
+
433
+ template <>
434
+ DS_D_INLINE uint64_t init<ROpType::Max>()
435
+ {
436
+ return 0;
437
+ }
438
+
439
+ template <ROpType Op, typename T>
440
+ DS_D_INLINE void init(T* data)
441
+ {
442
+ data[0] = init<Op, T>();
443
+ }
444
+
445
+ template <ROpType Op1, ROpType Op2, typename T>
446
+ DS_D_INLINE void init(T* data)
447
+ {
448
+ data[0] = init<Op1, T>();
449
+ data[1] = init<Op2, T>();
450
+ }
451
+
452
+ template <ROpType Op1, ROpType Op2, ROpType Op3, typename T>
453
+ DS_D_INLINE void init(T* data)
454
+ {
455
+ data[0] = init<Op1, T>();
456
+ data[1] = init<Op2, T>();
457
+ data[2] = init<Op3, T>();
458
+ }
459
+
460
+ template <ROpType Op1, ROpType Op2, ROpType Op3, ROpType Op4, typename T>
461
+ DS_D_INLINE void init(T* data)
462
+ {
463
+ data[0] = init<Op1, T>();
464
+ data[1] = init<Op2, T>();
465
+ data[2] = init<Op3, T>();
466
+ data[3] = init<Op4, T>();
467
+ }
468
+
469
+ /*
470
+ Warp reduction primitives
471
+
472
+ `reduction_width` is an unsafe template parameter, that is that
473
+ when using `reduction_width` < hw_warp_size the warp is partitioned
474
+ into `hw_warp_size` / `reduction_width` groups of partial sums.
475
+
476
+ If someone can figure out how to use variadic templates in a reasonable way
477
+ here (fold is C++17 only and I don't think helps and recursion feels like
478
+ huge overkill that harms readability) that would be wonderful.
479
+ */
480
+
481
+ template <typename T, ROpType Op, int reduce_width = hw_warp_size>
482
+ DS_D_INLINE void _warp(cg::thread_block_tile<hw_warp_size>& warp, T* data)
483
+ {
484
+ #pragma unroll
485
+ for (int i = 1; i < reduce_width; i *= 2) {
486
+ data[0] = element<Op>(data[0], warp.shfl_xor(data[0], i));
487
+ }
488
+ }
489
+
490
+ template <typename T, ROpType Op1, ROpType Op2, int reduce_width = hw_warp_size>
491
+ DS_D_INLINE void _warp(cg::thread_block_tile<hw_warp_size>& warp, T* data)
492
+ {
493
+ #pragma unroll
494
+ for (int i = 1; i < reduce_width; i *= 2) {
495
+ data[0] = element<Op1>(data[0], warp.shfl_xor(data[0], i));
496
+ data[1] = element<Op2>(data[1], warp.shfl_xor(data[1], i));
497
+ }
498
+ }
499
+
500
+ template <typename T, ROpType Op1, ROpType Op2, ROpType Op3, int reduce_width = hw_warp_size>
501
+ DS_D_INLINE void _warp(cg::thread_block_tile<hw_warp_size>& warp, T* data)
502
+ {
503
+ #pragma unroll
504
+ for (int i = 1; i < reduce_width; i *= 2) {
505
+ data[0] = element<Op1>(data[0], warp.shfl_xor(data[0], i));
506
+ data[1] = element<Op2>(data[1], warp.shfl_xor(data[1], i));
507
+ data[2] = element<Op3>(data[2], warp.shfl_xor(data[2], i));
508
+ }
509
+ }
510
+
511
+ template <typename T,
512
+ ROpType Op1,
513
+ ROpType Op2,
514
+ ROpType Op3,
515
+ ROpType Op4,
516
+ int reduce_width = hw_warp_size>
517
+ DS_D_INLINE void _warp(cg::thread_block_tile<hw_warp_size>& warp, T* data)
518
+ {
519
+ #pragma unroll
520
+ for (int i = 1; i < reduce_width; i *= 2) {
521
+ data[0] = element<Op1>(data[0], warp.shfl_xor(data[0], i));
522
+ data[1] = element<Op2>(data[1], warp.shfl_xor(data[1], i));
523
+ data[2] = element<Op3>(data[2], warp.shfl_xor(data[2], i));
524
+ data[3] = element<Op4>(data[3], warp.shfl_xor(data[3], i));
525
+ }
526
+ }
527
+
528
+ /*
529
+ Implementation for primary block reduction that serves both `block` and
530
+ `partitioned_block`.
531
+
532
+ Total warps refers to the reduction width of the reduction, not
533
+ the number of warps in the block (which may exceed that
534
+ if the block is partitioned or if we do a conservative bound at
535
+ compile time).
536
+ */
537
+ template <typename T, int total_warps, ROpType... Ops>
538
+ DS_D_INLINE void _block(cg::thread_block& tb,
539
+ cg::thread_block_tile<hw_warp_size>& warp_arg,
540
+ T* data)
541
+ {
542
+ constexpr int elems = sizeof...(Ops);
543
+ constexpr int bytes = sizeof(T);
544
+ // Unused when `partition_size == 1` or total_warps == 1
545
+ __shared__ T reduce_buffer[max_warps * elems];
546
+
547
+ #ifdef __HIP_PLATFORM_AMD__
548
+ const int total_threads = blockDim.x * blockDim.y * blockDim.z;
549
+ const int running_warps = total_threads / hw_warp_size;
550
+ #else
551
+ const int running_warps = warp_arg.meta_group_size();
552
+ #endif
553
+
554
+ // Always perform warp-scope reduction
555
+ _warp<T, Ops...>(warp_arg, data);
556
+
557
+ // If max_warps == 1 let's skip the runtime check
558
+ if (total_warps != 1) {
559
+ if (warp_arg.thread_rank() == 0) {
560
+ #pragma unroll
561
+ for (int i = 0; i < elems; i++) {
562
+ mem_access::store_shared<bytes>(reduce_buffer + elems * _warp_rank() + i, data + i);
563
+ }
564
+ }
565
+
566
+ // Synchronization inside block-uniform conditional is safe
567
+ tb.sync();
568
+
569
+ if (_warp_rank() == 0) {
570
+ if (warp_arg.thread_rank() < running_warps) {
571
+ #pragma unroll
572
+ for (int i = 0; i < elems; i++) {
573
+ mem_access::load_shared<bytes>(
574
+ data + i, reduce_buffer + elems * warp_arg.thread_rank() + i);
575
+ }
576
+ } else {
577
+ init<Ops...>(data);
578
+ }
579
+
580
+ _warp<T, Ops..., total_warps>(warp_arg, data);
581
+
582
+ #pragma unroll
583
+ for (int i = 0; i < elems; i++) {
584
+ mem_access::store_shared<bytes>(reduce_buffer + elems * warp_arg.thread_rank() + i,
585
+ data + i);
586
+ }
587
+ }
588
+
589
+ // Synchronization inside block-uniform conditional is safe
590
+ tb.sync();
591
+
592
+ #pragma unroll
593
+ for (int i = 0; i < elems; i++) {
594
+ mem_access::load_shared<bytes>(data + i, reduce_buffer + _warp_rank() * elems + i);
595
+ }
596
+ }
597
+ }
598
+
599
+ /*
600
+ Main API implementations. For the most part, they just convert the individual
601
+ variables into arrays, which makes working with them easier with a single
602
+ implementation. In theory, we could use the `_block` implementation as another
603
+ option, but the nature of using a pointer is a little less safe and this allows
604
+ us to obfuscate the details of the partitioned implementation.
605
+ */
606
+ template <ROpType Op, int warp_bound>
607
+ DS_D_INLINE void block(cg::thread_block& tb, cg::thread_block_tile<hw_warp_size>& warp, float& val)
608
+ {
609
+ _block<float, warp_bound, Op>(tb, warp, &val);
610
+ }
611
+
612
+ template <ROpType Op1, ROpType Op2, int warp_bound>
613
+ DS_D_INLINE void block(cg::thread_block& tb,
614
+ cg::thread_block_tile<hw_warp_size>& warp,
615
+ float& val1,
616
+ float& val2)
617
+ {
618
+ float data[2] = {val1, val2};
619
+ _block<float, warp_bound, Op1, Op2>(tb, warp, data);
620
+ val1 = data[0];
621
+ val2 = data[1];
622
+ }
623
+
624
+ template <ROpType Op1, ROpType Op2, ROpType Op3, int warp_bound>
625
+ DS_D_INLINE void block(cg::thread_block& tb,
626
+ cg::thread_block_tile<hw_warp_size>& warp,
627
+ float& val1,
628
+ float& val2,
629
+ float& val3)
630
+ {
631
+ float data[3] = {val1, val2, val3};
632
+ _block<float, warp_bound, Op1, Op2, Op3>(tb, warp, data);
633
+ val1 = data[0];
634
+ val2 = data[1];
635
+ val3 = data[2];
636
+ }
637
+
638
+ template <ROpType Op1, ROpType Op2, ROpType Op3, ROpType Op4, int warp_bound>
639
+ DS_D_INLINE void block(cg::thread_block& tb,
640
+ cg::thread_block_tile<hw_warp_size>& warp,
641
+ float& val1,
642
+ float& val2,
643
+ float& val3,
644
+ float& val4)
645
+ {
646
+ float data[4] = {val1, val2, val3, val4};
647
+ _block<float, warp_bound, Op1, Op2, Op3, Op4>(tb, warp, data);
648
+ val1 = data[0];
649
+ val2 = data[1];
650
+ val3 = data[2];
651
+ val4 = data[3];
652
+ }
653
+
654
+ /*
655
+ Note: for the partitioned blocks, the implementation does not support non-power of 2 blocks in order
656
+ to shorten block scale reduction length.
657
+ */
658
+ template <ROpType Op, int num_threads>
659
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
660
+ cg::thread_block_tile<hw_warp_size>& warp,
661
+ float& val)
662
+ {
663
+ if (num_threads <= hw_warp_size) {
664
+ _warp<float, Op, num_threads>(warp, &val);
665
+ } else {
666
+ constexpr int num_warps = num_threads / hw_warp_size;
667
+ _block<float, num_warps, Op>(tb, warp, &val);
668
+ }
669
+ }
670
+
671
+ template <ROpType Op1, ROpType Op2, int num_threads>
672
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
673
+ cg::thread_block_tile<hw_warp_size>& warp,
674
+ float& val1,
675
+ float& val2)
676
+ {
677
+ float data[2] = {val1, val2};
678
+
679
+ if (num_threads <= hw_warp_size) {
680
+ _warp<float, Op1, Op2, num_threads>(warp, data);
681
+ } else {
682
+ constexpr int num_warps = num_threads / hw_warp_size;
683
+ _block<float, num_warps, Op1, Op2>(tb, warp, data);
684
+ }
685
+
686
+ val1 = data[0];
687
+ val2 = data[1];
688
+ }
689
+
690
+ template <ROpType Op1, ROpType Op2, ROpType Op3, int num_threads>
691
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
692
+ cg::thread_block_tile<hw_warp_size>& warp,
693
+ float& val1,
694
+ float& val2,
695
+ float& val3)
696
+ {
697
+ float data[3] = {val1, val2, val3};
698
+
699
+ if (num_threads <= hw_warp_size) {
700
+ _warp<float, Op1, Op2, Op3, num_threads>(warp, data);
701
+ } else {
702
+ constexpr int num_warps = num_threads / hw_warp_size;
703
+ _block<float, num_warps, Op1, Op2, Op3>(tb, warp, data);
704
+ }
705
+
706
+ val1 = data[0];
707
+ val2 = data[1];
708
+ val3 = data[2];
709
+ }
710
+
711
+ template <ROpType Op1, ROpType Op2, ROpType Op3, ROpType Op4, int num_threads>
712
+ DS_D_INLINE void partitioned_block(cg::thread_block& tb,
713
+ cg::thread_block_tile<hw_warp_size>& warp,
714
+ float& val1,
715
+ float& val2,
716
+ float& val3,
717
+ float& val4)
718
+ {
719
+ float data[4] = {val1, val2, val3, val4};
720
+
721
+ if (num_threads <= hw_warp_size) {
722
+ _warp<float, Op1, Op2, Op3, Op4, num_threads>(warp, data);
723
+ } else {
724
+ constexpr int num_warps = num_threads / hw_warp_size;
725
+ _block<float, num_warps, Op1, Op2, Op3, Op4>(tb, warp, data);
726
+ }
727
+
728
+ val1 = data[0];
729
+ val2 = data[1];
730
+ val3 = data[2];
731
+ val4 = data[3];
732
+ }
733
+
734
+ /*
735
+ Arg-reduce is a specialization of the above. We only support this with a single reduction
736
+ parameter. This only works for max/min reductions.
737
+ */
738
+
739
+ __align__(8) struct IdxReduceResult {
740
+ /*
741
+ NOTE: ORDERING MATTERS HERE! The idx is the least significant set of bits
742
+ and the val is the most significant. Changing the order of this declaration
743
+ will break the code.
744
+ */
745
+ int idx;
746
+ float val;
747
+ };
748
+
749
+ template <ROpType Op, int warpBound>
750
+ DS_D_INLINE IdxReduceResult
751
+ idx_reduce(cg::thread_block& tb, cg::thread_block_tile<hw_warp_size>& warp, float val, int idx)
752
+ {
753
+ IdxReduceResult res = {idx, val};
754
+
755
+ // Clear out the nan. This shouldn't be an issue for our initial applications
756
+ if (isnan(val)) res.val = init<Op>();
757
+
758
+ // Can do float compares as integers. By packing the index into the lower bits
759
+ // we can just do a single int64 rather than a branch, compare, and select.
760
+ // One side benefit of this is that it is by nature a stable algorithm and
761
+ // will always bias ties to the higher index.
762
+ int64_t* res_as_int = reinterpret_cast<int64_t*>(&res);
763
+
764
+ // The way floating point compare works is normally to perform a sign comparison
765
+ // and if they match, then do a comparison of the rest of the bits as unsigned
766
+ // integers. Since we are bundling these, that means for negative values we need
767
+ // to reverse the sort order, which we can do with an XOR.
768
+ if (val < 0) { *res_as_int ^= 0x7fffffff00000000; }
769
+
770
+ _block<int64_t, warpBound, Op>(tb, warp, res_as_int);
771
+
772
+ // Sign bit is preserved, so we can check if we need to invert the mantissa back
773
+ if (res.val < 0) { *res_as_int ^= 0x7fffffff00000000; }
774
+
775
+ return res;
776
+ }
777
+
778
+ } // namespace reduce
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .blocked_flash import *
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (253 Bytes). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__pycache__/blocked_flash.cpython-310.pyc ADDED
Binary file (3.62 kB). View file
 
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/attention_atom.h ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <cstdint>
9
+ #include "cuda.h"
10
+
11
+ struct AttentionAtom {
12
+ /*
13
+ The attention atom describes the workload of a particular query. The attention
14
+ kernel will execute each ``AttentionAtom`` for each head of the model.
15
+ */
16
+
17
+ // Pointer to a list of KV block indices.
18
+ int32_t* block_idx_list;
19
+
20
+ // Index of first token in the ragged batch associated with this atom.
21
+ int32_t q_start_idx;
22
+
23
+ // Number of tokens in the ragged batch associated with this atom.
24
+ int32_t q_len;
25
+
26
+ // Number of key/value blocks associated with this atom. All but the last are
27
+ // assumed to be fully dense.
28
+ int32_t kv_blocks;
29
+
30
+ // Number of tokens in the last key/value block.
31
+ int32_t total_extent;
32
+
33
+ // Global index of the first token in the atom. For example, in a prompt continuation
34
+ // in which we have already processed 768 tokens, this would be 768.
35
+ int32_t global_q_idx;
36
+
37
+ // Unused
38
+ int32_t unused;
39
+ };
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.cpp ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /******************************************************************************
7
+ * Copyright (c) 2023, Tri Dao.
8
+ ******************************************************************************/
9
+
10
+ #include <ATen/cuda/CUDAContext.h>
11
+ #include <c10/cuda/CUDAGuard.h>
12
+ #include <torch/extension.h>
13
+
14
+ #include "blocked_flash.h"
15
+ #include "flash.h"
16
+
17
+ #define CHECK_SHAPE(x, ...) \
18
+ TORCH_CHECK(x.sizes() == torch::IntArrayRef({__VA_ARGS__}), \
19
+ #x " must have shape (" #__VA_ARGS__ ")")
20
+
21
+ void flash_attn_by_atoms(at::Tensor& out,
22
+ at::Tensor& q,
23
+ at::Tensor& k,
24
+ at::Tensor& v,
25
+ at::Tensor& attention_atoms,
26
+ const float softmax_scale,
27
+ const bool is_causal)
28
+ {
29
+ auto dprops = at::cuda::getCurrentDeviceProperties();
30
+
31
+ bool is_sm8x = dprops->major == 8 && dprops->minor >= 0;
32
+ bool is_sm90 = dprops->major == 9 && dprops->minor == 0;
33
+ TORCH_CHECK(is_sm90 || is_sm8x, "FlashAttention only supports Ampere GPUs or newer.");
34
+
35
+ auto q_dtype = q.dtype();
36
+ TORCH_CHECK(q_dtype == torch::kFloat16 || q_dtype == torch::kBFloat16,
37
+ "FlashAttention only support fp16 and bf16 data type");
38
+ if (q_dtype == torch::kBFloat16) {
39
+ TORCH_CHECK(is_sm90 || is_sm8x, "bfloat16 is only supported on Ampere GPUs or newer");
40
+ }
41
+ TORCH_CHECK(k.dtype() == q_dtype, "query and key must have the same dtype");
42
+ TORCH_CHECK(v.dtype() == q_dtype, "query and value must have the same dtype");
43
+
44
+ TORCH_CHECK(q.is_cuda(), "Input tensor must be on CUDA device");
45
+ TORCH_CHECK(k.is_cuda(), "Input tensor must be on CUDA device");
46
+ TORCH_CHECK(v.is_cuda(), "Input tensor must be on CUDA device");
47
+
48
+ TORCH_CHECK(q.stride(-1) == 1, "Input tensor must have contiguous last dimension");
49
+ TORCH_CHECK(k.stride(-1) == 1, "Input tensor must have contiguous last dimension");
50
+ TORCH_CHECK(v.stride(-1) == 1, "Input tensor must have contiguous last dimension");
51
+
52
+ const int total_q = q.size(0);
53
+ const int head_size = k.size(-1);
54
+ const int num_heads_kv = k.size(-2);
55
+ const int num_heads_q = q.size(-1) / head_size;
56
+
57
+ TORCH_CHECK(head_size <= 256, "head_size must be <= 256");
58
+ TORCH_CHECK(head_size % 8 == 0, "head_size must be divisible by 8");
59
+ TORCH_CHECK(num_heads_q % num_heads_kv == 0, "num_heads_q must be divisible by num_heads_kv");
60
+
61
+ Flash_fwd_params params;
62
+
63
+ params.is_bf16 = q.dtype() == torch::kBFloat16;
64
+
65
+ // Set the pointers and strides.
66
+ params.q_ptr = q.data_ptr();
67
+ params.k_ptr = k.data_ptr();
68
+ params.v_ptr = v.data_ptr();
69
+ params.o_ptr = out.data_ptr();
70
+ params.atoms = reinterpret_cast<AttentionAtom*>(attention_atoms.data_ptr());
71
+
72
+ // All stride are in elements, not bytes.
73
+ params.q_row_stride = q.stride(0);
74
+ params.k_row_stride = k.stride(1);
75
+ params.v_row_stride = v.stride(1);
76
+ params.o_row_stride = out.stride(0);
77
+
78
+ // Assume heads are contiguous.
79
+ params.q_head_stride = head_size;
80
+ params.k_head_stride = head_size;
81
+ params.v_head_stride = head_size;
82
+ params.o_head_stride = head_size;
83
+
84
+ // Head params
85
+ params.h = num_heads_q;
86
+ params.h_k = num_heads_kv;
87
+ params.h_h_k_ratio = num_heads_q / num_heads_kv;
88
+ params.d = head_size;
89
+ auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; };
90
+ params.d_rounded = round_multiple(head_size, 32);
91
+ params.num_atoms = attention_atoms.size(0);
92
+
93
+ // Set the different scale values.
94
+ params.scale_softmax = softmax_scale;
95
+ params.scale_softmax_log2 = softmax_scale * M_LOG2E;
96
+
97
+ params.is_causal = is_causal;
98
+
99
+ auto stream = at::cuda::getCurrentCUDAStream().stream();
100
+ run_mha_fwd(params, stream);
101
+ }
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.h ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ #pragma once
7
+
8
+ #include <torch/extension.h>
9
+
10
+ void flash_attn_by_atoms(at::Tensor& out,
11
+ at::Tensor& q,
12
+ at::Tensor& k,
13
+ at::Tensor& v,
14
+ at::Tensor& attention_atoms,
15
+ const float softmax_scale,
16
+ const bool is_causal);
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ import torch
7
+
8
+ from deepspeed.accelerator import get_accelerator
9
+ from ....inference_utils import DtypeEnum
10
+ from deepspeed.ops.op_builder import RaggedOpsBuilder
11
+
12
+ from ... import DSKernelBase
13
+
14
+
15
+ def get_q_block_size(head_size: int) -> int:
16
+ """
17
+ Returns the query block size required by the kernel given a head size.
18
+ """
19
+ cc_major, cc_minor = torch.cuda.get_device_capability(get_accelerator().current_device()) #ignore-cuda
20
+
21
+ if cc_major < 8:
22
+ raise RuntimeError("Blocked attention requires CUDA compute capability >= 8.0")
23
+
24
+ if head_size <= 64:
25
+ return 128
26
+ elif head_size <= 160:
27
+ if cc_minor != 0:
28
+ return 64
29
+ else:
30
+ return 128
31
+ elif head_size == 192:
32
+ return 128
33
+ elif head_size == 224:
34
+ if cc_minor != 0:
35
+ return 64
36
+ else:
37
+ return 128
38
+ else:
39
+ if cc_major == 8 and cc_minor == 0:
40
+ return 128
41
+ else:
42
+ return 64
43
+
44
+
45
+ def get_kv_block_size(head_size: int) -> int:
46
+ """
47
+ Return preferred granulatity for blocked KV-cache implementation.
48
+ """
49
+ cc_major, cc_minor = torch.cuda.get_device_capability(get_accelerator().current_device()) #ignore-cuda
50
+
51
+ if cc_major < 8:
52
+ raise RuntimeError("Blocked attention requires CUDA compute capability >= 8.0")
53
+
54
+ if head_size <= 64:
55
+ return 128
56
+ elif head_size != 160 or cc_minor != 0:
57
+ return 64
58
+ else:
59
+ return 32
60
+
61
+
62
+ class BlockedFlashAttn(DSKernelBase):
63
+ """
64
+ Modified implementation of flash-attn-2 tuned for inference on blocked KV-cache and wider
65
+ range of input sequence lengths.
66
+ """
67
+
68
+ supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16]
69
+
70
+ def __init__(self, head_size: int, dtype: DtypeEnum) -> None:
71
+ """
72
+ Triggers any compilation of the kernels.
73
+ """
74
+ if not isinstance(dtype, DtypeEnum):
75
+ dtype = DtypeEnum(dtype)
76
+
77
+ if dtype not in BlockedFlashAttn.supported_dtypes:
78
+ raise ValueError("Unsupported data type: {}, supported data types are {}".format(
79
+ dtype, BlockedFlashAttn.supported_dtypes))
80
+
81
+ # For testing, need to revert to 32
82
+ if head_size % 16 != 0:
83
+ raise ValueError("Head size must be divisible by 32 (configured with {})".format(head_size))
84
+
85
+ inf_module = RaggedOpsBuilder().load()
86
+ self.kernel = inf_module.flash_attn_by_atoms
87
+
88
+ def __call__(self, out: torch.Tensor, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, atoms: torch.Tensor,
89
+ softmax_scale: float) -> torch.Tensor:
90
+ """
91
+ Flash attention implementation atop a blocked KV-cache. Atoms should be pre-populated.
92
+ See attention_atom.h for further details on the structure of the information.
93
+
94
+ Arguments:
95
+ out (torch.Tensor): Output tensor of shape [tokens, hidden_size]
96
+ q (torch.Tensor): Query tensor of shape [tokens, hidden_size]
97
+ k (torch.Tensor): Key cache tensor of shape [n_blocks, block_size, n_heads_kv, head_size]. This Tensor only needs to be contiguous on the final dimension.
98
+ v (torch.Tensor): Value cache tensor of shape [n_blocks, block_size, n_heads_kv, head_size]. This Tensor only needs to be contiguous on the final dimension.
99
+ atoms (torch.Tensor): Atom information tensor of shape [num_atoms, 8] and type int32.
100
+ Not all data is readable in this format. See attention_atom.h for further details.
101
+ softmax_scale (float): Softmax scale factor.
102
+
103
+ Returns:
104
+ out (torch.Tensor): Output tensor of shape [tokens, hidden_size]
105
+ """
106
+ self.kernel(out, q, k, v, atoms, softmax_scale, True)
107
+ return out
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/flash.h ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright (c) Microsoft Corporation.
2
+ // SPDX-License-Identifier: Apache-2.0
3
+
4
+ // DeepSpeed Team
5
+
6
+ /******************************************************************************
7
+ Copyright (c) 2023, Tri Dao.
8
+ ******************************************************************************/
9
+
10
+ #pragma once
11
+
12
+ #include <cuda.h>
13
+ #include <vector>
14
+
15
+ #include "attention_atom.h"
16
+
17
+ constexpr int TOTAL_DIM = 0;
18
+ constexpr int H_DIM = 1;
19
+ constexpr int D_DIM = 2;
20
+
21
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
22
+
23
+ struct Qkv_params {
24
+ using index_t = uint32_t;
25
+ // The QKV matrices.
26
+ void* __restrict__ q_ptr;
27
+ void* __restrict__ k_ptr;
28
+ void* __restrict__ v_ptr;
29
+
30
+ // The stride between rows of the Q, K and V matrices.
31
+ index_t q_row_stride;
32
+ index_t k_row_stride;
33
+ index_t v_row_stride;
34
+ index_t q_head_stride;
35
+ index_t k_head_stride;
36
+ index_t v_head_stride;
37
+
38
+ // The number of heads.
39
+ int h, h_k;
40
+ // In the case of multi-query and grouped-query attention (MQA/GQA), nheads_k could be
41
+ // different from nheads (query).
42
+ int h_h_k_ratio; // precompute h / h_k,
43
+ };
44
+
45
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
46
+
47
+ struct Flash_fwd_params : public Qkv_params {
48
+ // The O matrix (output).
49
+ void* __restrict__ o_ptr;
50
+
51
+ // The attention metadata
52
+ AttentionAtom* __restrict__ atoms;
53
+
54
+ // Total attention atoms
55
+ int num_atoms;
56
+
57
+ // The stride between rows of O.
58
+ index_t o_row_stride;
59
+ index_t o_head_stride;
60
+
61
+ // The dimensions
62
+ int d, d_rounded;
63
+
64
+ // The scaling factors for the kernel.
65
+ float scale_softmax;
66
+ float scale_softmax_log2;
67
+
68
+ bool is_bf16;
69
+ bool is_causal;
70
+ };
71
+
72
+ ////////////////////////////////////////////////////////////////////////////////////////////////////
73
+
74
+ void run_mha_fwd(Flash_fwd_params& params, cudaStream_t stream);
venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Copyright (c) Microsoft Corporation.
2
+ # SPDX-License-Identifier: Apache-2.0
3
+
4
+ # DeepSpeed Team
5
+
6
+ from .embed import RaggedEmbeddingKernel