diff --git a/.gitattributes b/.gitattributes index 30a76323b7e7780aaea1da254c7638e41e8349fa..e8ed92b1f60c2bc601898b629a3fdfff84f651c4 100644 --- a/.gitattributes +++ b/.gitattributes @@ -141,3 +141,4 @@ venv/lib/python3.10/site-packages/numpy/core/_multiarray_umath.cpython-310-x86_6 venv/lib/python3.10/site-packages/tokenizers/tokenizers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/numpy.libs/libopenblas64_p-r0-0cf96a72.3.23.dev.so filter=lfs diff=lfs merge=lfs -text venv/lib/python3.10/site-packages/scipy.libs/libopenblasp-r0-24bff013.3.26.dev.so filter=lfs diff=lfs merge=lfs -text +venv/lib/python3.10/site-packages/scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..45e523ab62b9b1b1a43c86685c5adfce59a718e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .base_engine import CheckpointEngineBase +from .in_memory_engine import InMemoryModelEngine +from .huggingface_engine import HuggingFaceCheckpointEngine diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6c1d32a76a741dd9d2233ef862e23880f9bc676 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/huggingface_engine.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/huggingface_engine.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18f316ff9e7cb6056d1a9a685edf382aad1ebff0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/huggingface_engine.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/in_memory_engine.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/in_memory_engine.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9227524cdc4d9d9608de450ef1882cae6540c9a1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/__pycache__/in_memory_engine.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/base_engine.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/base_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..26fc467d4d863aa0b6f6fb28b84d3f6260c702f2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/base_engine.py @@ -0,0 +1,41 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from abc import ABC, abstractmethod +from typing import Iterable, Tuple + +import torch + +#from .huggingface_engine import HuggingFaceCheckpointEngine + +MEGATRON = 'megatron' +HUGGINGFACE = 'huggingface' + + +class CheckpointEngineBase(ABC): + """ + Abstract interface for checkpoint engines to implement. + + There is no ``__init__`` method here by design, since the creation of the checkpoint + engine will happen outside the policy/engine code. The tradeoff being made here is + that we will write different frontends for different checkpoint engines, but these + frontends can be tailored to the specific checkpoint engine/model source needs. + """ + + @abstractmethod + def parameters(self) -> Iterable[Tuple[str, torch.Tensor]]: + """ + This method should create a generator of tuples of the form (name, parameter) for + all parameters in the model. The name should be the fully qualified name of the + parameter, and the parameter should be a torch.Tensor. + + The expected use of a checkpoint engine is the following: + ```python + for name, parameter in checkpoint_engine.parameters(): + container_map.map_param(name, parameter) + ``` + For a concrete use example, see ``InferenceV2Policy``. + """ + ... diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/huggingface_engine.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/huggingface_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..46a84c61f884b9b2aad3a949a23587aac31b0720 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/huggingface_engine.py @@ -0,0 +1,124 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import os +import json +import torch +from .base_engine import CheckpointEngineBase +from typing import Iterable, Tuple +from functools import partial + +from ..logging import inference_logger + + +class HuggingFaceCheckpointEngine(CheckpointEngineBase): + + def __init__(self, model_name_or_path: str, auth_token: str = None) -> None: + super().__init__() + from transformers import AutoConfig, GenerationConfig + + self.model_name_or_path = model_name_or_path + self.auth_token = auth_token + self.model_config = AutoConfig.from_pretrained(self.model_name_or_path) + # Define this property here so we can use it in the model implementation + if not hasattr(self.model_config, "max_seq_length"): + if hasattr(self.model_config, "max_position_embeddings"): + self.model_config.max_seq_length = self.model_config.max_position_embeddings + else: + generation_config = GenerationConfig.from_pretrained(self.model_name_or_path) + self.model_config.max_seq_length = generation_config.max_length + self._local_checkpoint_dir = None + self._all_ckpt_paths = self._fetch_checkpoint_files() + + def _fetch_checkpoint_files(self): + """ + Fetch the checkpoint files from the HuggingFace Hub. + """ + # TODO(jeff): for models like llama-2 the user will have to provide an auth `token`, + # currently coming from the ckpt engine init but maybe a catch all kwargs for other + # snapshot download parameters would be more flexible. + + from huggingface_hub import snapshot_download, list_repo_tree + + def model_has_safetensors(model_name_or_path: str) -> bool: + if os.path.isdir(model_name_or_path): + file_list = os.listdir(model_name_or_path) + else: + file_list = [rf.path for rf in list_repo_tree(model_name_or_path)] + for f in file_list: + if f.endswith(".safetensors"): + return True + return False + + if os.path.isdir(self.model_name_or_path): + self._local_checkpoint_dir = self.model_name_or_path + else: + # We need to download the checkpoint files from HF + if model_has_safetensors(self.model_name_or_path): + # Prioritize downloading safetensors if they are available + allow_patterns = ["*.safetensors", "*.json"] + else: + # Fallback to bin files when safetensors are not present + allow_patterns = ["*.bin", "*.json", "*.pt"] + self._local_checkpoint_dir = snapshot_download(self.model_name_or_path, + allow_patterns=allow_patterns, + revision=None, + token=self.auth_token) + + assert os.path.isdir( + self._local_checkpoint_dir + ), f"Checkpoint dir {self._local_checkpoint_dir} is not a directory, cannot load checkpoint." + + # Set the appropriate file names based on whether we have safetensors or not + if model_has_safetensors(self._local_checkpoint_dir): + from safetensors.torch import load_file + model_param_json_fname = "model.safetensors.index.json" + model_file_fname = "model.safetensors" + self._checkpoint_load_fn = load_file + else: + model_param_json_fname = "pytorch_model.bin.index.json" + model_file_fname = "pytorch_model.bin" + self._checkpoint_load_fn = partial(torch.load, map_location="cpu") + + model_param_json = os.path.join(self._local_checkpoint_dir, model_param_json_fname) + + if not os.path.isfile(model_param_json): + # We don't need any json as all such HF models will have pytorch_model.bin + all_checkpoint_files = [os.path.join(self._local_checkpoint_dir, model_file_fname)] + else: + param_map = json.load(open(model_param_json, "r")) + + # weight_map -> { "lm_head.weight": "pytorch_model-00002-of-00002.bin", ... } + weight_map = param_map["weight_map"] + + # unique set of all checkpoint files + all_checkpoint_files = set(weight_map.values()) + + # get absolute path of all unique checkpoint files + all_checkpoint_files = [os.path.join(self._local_checkpoint_dir, f) for f in all_checkpoint_files] + + return all_checkpoint_files + + def parameters(self) -> Iterable[Tuple[str, torch.Tensor]]: + """ + Generator of model parameters (satisfies the CheckpointEngineBase interface). + """ + for checkpoint in self._all_ckpt_paths: + inference_logger().info(f"Loading checkpoint: {checkpoint}") + checkpoint_sd = self._checkpoint_load_fn(checkpoint) + param_keys = list(checkpoint_sd.keys()) + for param_name in param_keys: + param = checkpoint_sd[param_name] + yield param_name, param + + del checkpoint_sd + + +if __name__ == "__main__": + # To test, add your auth_token here and run `python huggingface_engine.py` + engine = HuggingFaceCheckpointEngine(model_name_or_path="meta-llama/Llama-2-7b-hf", + auth_token="hf_xxxxxxxxxxxxxxxxx") + for name, param in engine.parameters(): + print(name, param.shape) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/in_memory_engine.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/in_memory_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..13ec7b288f5febb158e167890e1f259d9684ed28 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/checkpoint/in_memory_engine.py @@ -0,0 +1,40 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Iterable, Tuple +import torch + +from .base_engine import CheckpointEngineBase + + +class InMemoryModelEngine(CheckpointEngineBase): + """ + This "checkpoint" engine uses the existing interface to enable loading parameters into an + inference model from a model already instantiated in memory. In general, this is not the + recommended way to use the inference engine, and should only be used when absolutely necessary. + + The primary limitation of this approach is that the model must be fully instantiated in memory. + In a tensor parallel scenario, this means that the model is either replicated many times in host + memory. Currently, it is also recommended to only use this approach for models held in host memory. + + In order to free the memory held by this copy of the model, we delete the model in the first call + to `parameters`, so it is not safe to make this call twice. + """ + + def __init__(self, model: torch.nn.Module) -> None: + """ + Create virtual checkpoint engine for the provided module. + + Args: + model (torch.nn.Module): Model to load parameters from. + """ + super().__init__() + self.model = model + + def parameters(self) -> Iterable[Tuple[str, torch.Tensor]]: + for name, parameter in self.model.named_parameters(): + yield name, parameter + + del self.model diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97c9bd68b2568130b073a47d15c4fb235277508e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/__pycache__/ds_kernel.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/__pycache__/ds_kernel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dc929d6ef67f8cc8111fee15979a9f8dbad07488 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/__pycache__/ds_kernel.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/configs.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/configs.h new file mode 100644 index 0000000000000000000000000000000000000000..76e8eda2d35ee1afff9c451bbb0251a21461b615 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/configs.h @@ -0,0 +1,96 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +// This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112 + +#ifndef CONFIGS_H +#define CONFIGS_H + +// #define DEBUG_MODE +#define PIPELINE_LEVEL_GMEM 2 +#define PIPELINE_LEVEL_SMEM 2 // only support 2 + +/************************ Hardware Parameters ************************/ +#define WARP_SIZE 32 +#define REG_BIT_WIDTH 32 +// mma: M=16 K=16 N=8 +#define MMA_8 8 +#define MMA_16 16 +// for memory access +#define THREAD_OPT_ACCESS_BIT_WIDTH_128 128 // LDS.128, cp_async.128, ... +#define BIT_WIDTH_PER_HALF 16 // Half precision: FP16 + +/******************** Register Allocation For GEMM ********************/ +#define REG_PER_THREAD_C_TENSOR_16_16 8 // 8 for FP32 Accumulation +/********************** Memory Padding Parameters **********************/ +// Eliminating bank-conflict +#define PADDING_BYTES_16 16 // Padding 16 bytes each column +#define PADDING_SHARED_MEM_FOR_B_8 \ + 8 // Padding 8 half each column, during CopyFromGlobalToShared() for B +#define PADDING_SHARED_MEM_FOR_C_4 \ + 4 // Padding 4 float each column, during StoreToSharedMemoryFromRegister() for C +/************************* WARP Tiling part-1 *************************/ +#define WARP_ROW_MMA_TENSORS 4 +#define WARP_M (WARP_ROW_MMA_TENSORS * MMA_16) // 64 +#define WARP_K_MMA_TENSORS 4 +#define WARP_K (WARP_K_MMA_TENSORS * MMA_16) // 64 +template +struct TilingConfig { + // Depending on "n" dimension of the GEMM + static constexpr int BLOCK_ROW_WARPS = BLOCK_ROW_WARPS_; + static constexpr int BLOCK_COL_WARPS = BLOCK_COL_WARPS_; + static constexpr int WARP_COL_MMA_TENSORS = WARP_COL_MMA_TENSORS_; + /************************* WARP Tiling part-2 *************************/ + static constexpr int WARP_N = WARP_COL_MMA_TENSORS * MMA_8; + /*************************Thread Block Tiling *************************/ + static constexpr int TILE_M = WARP_M * BLOCK_ROW_WARPS; + static constexpr int TILE_N = MMA_8 * WARP_COL_MMA_TENSORS * BLOCK_COL_WARPS; + static constexpr int TILE_K = WARP_K; + /********************** #Thread per Thread Block **********************/ + static constexpr int BLOCK_WARPS = BLOCK_ROW_WARPS * BLOCK_COL_WARPS; + static constexpr int BLOCK_THREADS = BLOCK_WARPS * WARP_SIZE; + /******************************* Others *******************************/ + static constexpr int SMEM_SIZE_B_TILE = TILE_N * (TILE_K + PADDING_BYTES_16) * 2 * + PIPELINE_LEVEL_GMEM; // sizeof(half)=2, doubleBuffer=2 + static constexpr int SMEM_SIZE_C_TILE = + TILE_N * (TILE_M + PADDING_BYTES_16) * 4; // sizeof(float)=4 +}; + +/************************ General Config for Quant-LLM **********************/ +#define WEIGHT_FRAG1_BIT_WIDTH 2 +#define WEIGHT_FRAG2_BIT_WIDTH 4 +#define WEIGHT_BIT_WIDTH (WEIGHT_FRAG1_BIT_WIDTH + WEIGHT_FRAG2_BIT_WIDTH) // 6 +// #define QUANT_GROUP_SIZE_DIVIDED_BY_64 4 // +// QuantGroupSize: 4*64 = 256 +/*************************** 64*64 Weghts of A WARP *************************/ +#define WEIGHT_PER_UNIT (WARP_M * WARP_K) // 64*64 +#define SMEM_SIZE_IN_BYTES_PER_WARP_A1 \ + (WEIGHT_PER_UNIT * WEIGHT_FRAG1_BIT_WIDTH / \ + 8) // 1024 Bytes #doubleBuffer not takedn into consideration +#define SMEM_SIZE_IN_BYTES_PER_WARP_A2 \ + (WEIGHT_PER_UNIT * WEIGHT_FRAG2_BIT_WIDTH / \ + 8) // 2048 Bytes #doubleBuffer not takedn into consideration +#define SMEM_SIZE_A1_TILE \ + (SMEM_SIZE_IN_BYTES_PER_WARP_A1 * 4 * \ + PIPELINE_LEVEL_GMEM) // #WARP=4, #Trible-Buffer for 3-level pipeline for A = 12 KB; double + // buffer for 2-level pipeline A= 8 KB. +#define SMEM_SIZE_A2_TILE \ + (SMEM_SIZE_IN_BYTES_PER_WARP_A2 * 4 * \ + PIPELINE_LEVEL_GMEM) // #WARP=4, #Trible-Buffer for 3-level pipeline for A = 24 KB; double + // buffer for 2-level pipeline A= 16 KB. +/******************** Global Memory Layout For QUANTIZED DATA ******************/ +#define NUM_INT4_PER_UNIT_2BIT_FRAG (WEIGHT_PER_UNIT * WEIGHT_FRAG1_BIT_WIDTH / 128) // 64 +#define NUM_INT4_PER_UNIT_4BIT_FRAG (WEIGHT_PER_UNIT * WEIGHT_FRAG2_BIT_WIDTH / 128) // 128 +/******************** Register Allocation For QUANTIZED DATA ******************/ +#define WEIGHT_PER_THREAD (WEIGHT_PER_UNIT / WARP_SIZE) // 128 +#define REG_PER_THREAD_2BIT_FRAG (WEIGHT_PER_THREAD / REG_BIT_WIDTH * 2) // 8 +#define REG_PER_THREAD_4BIT_FRAG (WEIGHT_PER_THREAD / REG_BIT_WIDTH * 4) // 16 +/******************** Register Allocation For QUANT Scales ******************/ +#define WARP_REG_QUANT_SCALE 4 // 8 rows per thread -> 8 FP16 scales -> 4 registers +#define WARP_REG_QUANT_SCALE_DISTRIBUTED \ + 1 // T0-T3, T4-T7, ..., T28-T31 share the same scales, using shfl to get all the scales for + // each thread + +#endif // CONFIGS_H diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/kernel_matmul.cuh b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/kernel_matmul.cuh new file mode 100644 index 0000000000000000000000000000000000000000..0262baef4614692903877e043e9b773666a8864c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/kernel_matmul.cuh @@ -0,0 +1,272 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +// This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112 + +#ifndef DEEPSPEED_CUDA_LINEAR_KERNEL_MATMUL_CUH +#define DEEPSPEED_CUDA_LINEAR_KERNEL_MATMUL_CUH + +#include "configs.h" +#include "utils_core.cuh" +#include "utils_gmem.cuh" + +/* + * C = A*B + * A: row major with ahead-of-time layout transformation, FP6 + * B: col major, FP16 + * C: col major, FP16 + */ +template +__global__ void QUANT_GEMM_Kernel(const uint4* Weight1, + const uint4* Weight2, + const half* Scales, + const half* B, + OutputDataType* C, + const size_t M_Global, + const size_t N_Global, + const size_t K_Global, + int Split_K) +{ +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 && __CUDA_ARCH__ < 900 + +#ifdef DEBUG_MODE + assert(K_Global % TilingConfig::TILE_K == 0); + assert(M_Global % TilingConfig::TILE_M == 0); + assert(gridDim.y == Split_K * (M_Global / TilingConfig::TILE_M)); +#endif + extern __shared__ __align__(128) + half smem[]; // Dynamic shared memory for FP16 A tiles, 128 Bytes aligned + half(*smem_array)[WARP_K + PADDING_SHARED_MEM_FOR_B_8] = + reinterpret_cast( + smem + + (SMEM_SIZE_A1_TILE + SMEM_SIZE_A2_TILE) / 2); // Dynamic shared memory for FP16 B tiles + __shared__ half QuantScales[64 * TilingConfig::BLOCK_WARPS]; // static shared memory for + // quantization scales, 64 row per + // warp * 4 warps = 512 Bytes + // Thread Block Mapping, considering SplitK + const size_t BatchID = blockIdx.y / (M_Global / TilingConfig::TILE_M); + const size_t x = blockIdx.x; // Output Block ID: (BlockID_Row = y; BlockID_Col = x ) + const size_t y = + blockIdx.y % + (M_Global / TilingConfig::TILE_M); // Output Block ID: (BlockID_Row = y; BlockID_Col = x ) + const size_t Tile_Start_M = y * TilingConfig::TILE_M; + const size_t Tile_Start_N = x * TilingConfig::TILE_N; + const size_t NumColumnToCopy = (N_Global - Tile_Start_N) < TilingConfig::TILE_N + ? (N_Global - Tile_Start_N) + : TilingConfig::TILE_N; + const size_t NumBlock_K = K_Global / TilingConfig::TILE_K; + const size_t AverageNumBlock_K = NumBlock_K / Split_K; + const size_t ExtraNumBlock_K = NumBlock_K - AverageNumBlock_K * Split_K; + size_t NumIter = AverageNumBlock_K; + if (BatchID < ExtraNumBlock_K) NumIter++; + size_t StartBlockID_K = AverageNumBlock_K * BatchID; + if (BatchID < ExtraNumBlock_K) + StartBlockID_K += BatchID; + else + StartBlockID_K += ExtraNumBlock_K; + // Warp ID. + const int warpId = threadIdx.x / WARP_SIZE; + int WARP_i = + warpId / TilingConfig::BLOCK_COL_WARPS; // WARP_i: row number; WARP_j: column number + // int WARP_j = warpId % TilingConfig::BLOCK_COL_WARPS; + // Global Memory Address for Matrix A (Weight) + // ///////////////////////////////////////////////////////////////////////// StartPTR for each + // ThreadBlock(TB) + const uint4* TB_StartGPTR_A1 = + Weight1 + (y * TilingConfig::BLOCK_ROW_WARPS) * NumBlock_K * NUM_INT4_PER_UNIT_2BIT_FRAG; + const uint4* TB_StartGPTR_A2 = + Weight2 + (y * TilingConfig::BLOCK_ROW_WARPS) * NumBlock_K * NUM_INT4_PER_UNIT_4BIT_FRAG; + // StartPTR for each WARP. + const uint4* WARP_StartGPTR_A1 = + TB_StartGPTR_A1 + WARP_i * NumBlock_K * NUM_INT4_PER_UNIT_2BIT_FRAG; + const uint4* WARP_StartGPTR_A2 = + TB_StartGPTR_A2 + WARP_i * NumBlock_K * NUM_INT4_PER_UNIT_4BIT_FRAG; + // StartPTR for each WARP, considering SplitK + const size_t WARP_Start_UnitID_K = StartBlockID_K; + WARP_StartGPTR_A1 += WARP_Start_UnitID_K * NUM_INT4_PER_UNIT_2BIT_FRAG; + WARP_StartGPTR_A2 += WARP_Start_UnitID_K * NUM_INT4_PER_UNIT_4BIT_FRAG; + // Copying A tile from Global to Shared, using double-buffer + // ////////////////////////////////////////////////////////// StartSPTR for each ThreadBlock + uint32_t* AFrag_2BIT_SPTR = reinterpret_cast(smem); + uint32_t* AFrag_4BIT_SPTR = + AFrag_2BIT_SPTR + + SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 4 * TilingConfig::BLOCK_WARPS * + PIPELINE_LEVEL_GMEM; // 8 buffers including double buffers, 12 for trible buffers + // StartSPTR for each WARP + AFrag_2BIT_SPTR += warpId * SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 4; + AFrag_4BIT_SPTR += warpId * SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 4; + // Pre-fetch of A tile + for (int i = 0; i < PIPELINE_LEVEL_GMEM - 1; i++) { + CopyFromGlobalToShared_A( + AFrag_2BIT_SPTR + i * SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 4 * 4, WARP_StartGPTR_A1); + CopyFromGlobalToShared_A( + AFrag_4BIT_SPTR + i * SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 4 * 4, WARP_StartGPTR_A2); + WARP_StartGPTR_A1 += SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 16; + WARP_StartGPTR_A2 += SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 16; + } + // Global Memory Address for Matrix A (QuantScale) + // ///////////////////////////////////////////////////////////////////// + const half* TB_StartGPTR_A_Scale = Scales + (y * TilingConfig::BLOCK_ROW_WARPS) * 64; + const half* WARP_StartGPTR_A_Scales = TB_StartGPTR_A_Scale + WARP_i * 64; + CopyFromGlobalToShared_Scales(QuantScales + WARP_i * 64, WARP_StartGPTR_A_Scales); + // Copying B tile from Global to Shared, considering SplitK + // ///////////////////////////////////////////////////////////// + const half* BTile_GPTR = B + Tile_Start_N * K_Global + StartBlockID_K * TilingConfig::TILE_K; + for (int i = 0; i < PIPELINE_LEVEL_GMEM - 1; i++) { + CopyFromGlobalToShared( + smem_array + i * TilingConfig::TILE_N, BTile_GPTR, K_Global, NumColumnToCopy); + BTile_GPTR += TilingConfig::TILE_K; + } + // Register Allocation for A,B, and C, Initilazed to Zeros + // ///////////////////////////////////////////////////////////////////// + constexpr int NumRegSets_a = + WARP_ROW_MMA_TENSORS; // 1 set = 4 registers, containing a 16*16 MMA block + constexpr int NumRegSets_b = (TilingConfig::WARP_COL_MMA_TENSORS == 1) + ? 1 + : TilingConfig::WARP_COL_MMA_TENSORS / + 2; // 1 set = 4 registers, containing a 16*16 MMA block +#ifdef PIPELINE_LEVEL_SMEM + uint32_t a[NumRegSets_a * PIPELINE_LEVEL_SMEM] + [4]; // double/Trible buffer is used // Registers to store decompressed FP6 + uint32_t b[NumRegSets_b * PIPELINE_LEVEL_SMEM] + [4]; // double/Triple buffer is used // Register to store FP16 B matrix (a slice) +#endif + float c[NumRegSets_a * NumRegSets_b][REG_PER_THREAD_C_TENSOR_16_16]; + for (int i = 0; i < NumRegSets_a * NumRegSets_b; i++) + for (int j = 0; j < REG_PER_THREAD_C_TENSOR_16_16; j++) c[i][j] = 0.0f; + // + cp_async_wait_all(); + __syncthreads(); + + ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + uint32_t Scales_RPTR[4]; // 4 Registers per thread for Quantization Scales + ExtractFromSharedToReg_Scales(Scales_RPTR, QuantScales + WARP_i * 64); +#ifdef PIPELINE_LEVEL_SMEM + // Initializing the Software Pipeline: writing registers. + // //////////////////////////////////////////////////////////////////////////////////////////////// + initialize_mma_slice( + a, b, AFrag_2BIT_SPTR, AFrag_4BIT_SPTR, smem_array, Scales_RPTR); +#endif +// The outer loop. +// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +#pragma unroll(1) + for (size_t tile_id_k = 0; tile_id_k < NumIter; tile_id_k++) { + // Trible-Buffer for A Tile + uint32_t* __restrict__ read_SPTR_Frag1 = + AFrag_2BIT_SPTR + ((tile_id_k + 0) % PIPELINE_LEVEL_GMEM) * + SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 4 * + 4; // 1024 (1)*4: 4 WARPs; (2)/4: int*+1 = char*+16 + uint32_t* __restrict__ read_SPTR_Frag2 = + AFrag_4BIT_SPTR + ((tile_id_k + 0) % PIPELINE_LEVEL_GMEM) * + SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 4 * + 4; // 2048 (1)*4: 4 WARPs; (2)/4: int*+1 = char*+16 +#ifdef PIPELINE_LEVEL_SMEM + uint32_t* __restrict__ read2_SPTR_Frag1 = + AFrag_2BIT_SPTR + + ((tile_id_k + 1) % PIPELINE_LEVEL_GMEM) * SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 4 * 4; + uint32_t* __restrict__ read2_SPTR_Frag2 = + AFrag_4BIT_SPTR + + ((tile_id_k + 1) % PIPELINE_LEVEL_GMEM) * SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 4 * 4; +#endif + uint32_t* __restrict__ write_SPTR_Frag1 = + AFrag_2BIT_SPTR + ((tile_id_k + (PIPELINE_LEVEL_GMEM - 1)) % PIPELINE_LEVEL_GMEM) * + SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 4 * + 4; // 1024 (1)*4: 4 WARPs; (2)/4: int*+1 = char*+16 + uint32_t* __restrict__ write_SPTR_Frag2 = + AFrag_4BIT_SPTR + ((tile_id_k + (PIPELINE_LEVEL_GMEM - 1)) % PIPELINE_LEVEL_GMEM) * + SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 4 * + 4; // 2048 (1)*4: 4 WARPs; (2)/4: int*+1 = char*+16 + // Trible-Buffer for B Tile + half __restrict__(*read_SPTR)[WARP_K + PADDING_SHARED_MEM_FOR_B_8] = + smem_array + ((tile_id_k + 0) % PIPELINE_LEVEL_GMEM) * TilingConfig::TILE_N; +#ifdef PIPELINE_LEVEL_SMEM + half __restrict__(*read2_SPTR)[WARP_K + PADDING_SHARED_MEM_FOR_B_8] = + smem_array + ((tile_id_k + 1) % PIPELINE_LEVEL_GMEM) * TilingConfig::TILE_N; +#endif + half __restrict__(*write_SPTR)[WARP_K + PADDING_SHARED_MEM_FOR_B_8] = + smem_array + + ((tile_id_k + (PIPELINE_LEVEL_GMEM - 1)) % PIPELINE_LEVEL_GMEM) * TilingConfig::TILE_N; + // + bool GlobalCopy = (tile_id_k + PIPELINE_LEVEL_GMEM - 1) < NumIter; + // Copying A tile from Global to Register, Bypassing L1, using double-buffer + CopyFromGlobalToShared_A( + write_SPTR_Frag1, WARP_StartGPTR_A1, GlobalCopy); + CopyFromGlobalToShared_A( + write_SPTR_Frag2, WARP_StartGPTR_A2, GlobalCopy); + // copying B tile from GlobalMemory to SharedMemory + CopyFromGlobalToShared( + write_SPTR, BTile_GPTR, K_Global, NumColumnToCopy, GlobalCopy); + cp_async_group_commit(); +#ifdef PIPELINE_LEVEL_SMEM + core_mma_slice(c, + a, + b, + read_SPTR_Frag1, + read_SPTR_Frag2, + read_SPTR, + Scales_RPTR, + 1); // read_SPTR_Frag1, read_SPTR_Frag2 are different for each + // WARP; read_SPTR is shared among WARPs + core_mma_slice( + c, a, b, read_SPTR_Frag1, read_SPTR_Frag2, read_SPTR, Scales_RPTR, 2); + core_mma_slice( + c, a, b, read_SPTR_Frag1, read_SPTR_Frag2, read_SPTR, Scales_RPTR, 3); + // Barriers and Synchronizations + cp_async_wait_group(); + __syncthreads(); + core_mma_slice( + c, a, b, read2_SPTR_Frag1, read2_SPTR_Frag2, read2_SPTR, Scales_RPTR, 0); + // Updating global PTRs + WARP_StartGPTR_A1 += + SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 16; // 4KB/16=256 (1)/16: int4*+1 = char*+16 + WARP_StartGPTR_A2 += + SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 16; // 8KB/16=512 (1)/16: int4*+1 = char*+16 + BTile_GPTR += TilingConfig::TILE_K; +#else + PipelinedCoreLoop( + c, + read_SPTR, + read_SPTR_Frag1, + read_SPTR_Frag2, + Scales_RPTR); // read_SPTR_Frag1, read_SPTR_Frag2 are different for each WARP; + // read_SPTR is shared among WARPs + // Updating global PTRs + WARP_StartGPTR_A1 += + SMEM_SIZE_IN_BYTES_PER_WARP_A1 / 16; // 4KB/16=256 (1)/16: int4*+1 = char*+16 + WARP_StartGPTR_A2 += + SMEM_SIZE_IN_BYTES_PER_WARP_A2 / 16; // 8KB/16=512 (1)/16: int4*+1 = char*+16 + BTile_GPTR += TilingConfig::TILE_K; + // Barriers and Synchronizations + cp_async_wait_group(); + __syncthreads(); +#endif + } + ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + // Store the C fragments to shared memory. + float(*smem_CFrag)[TilingConfig::TILE_M + PADDING_SHARED_MEM_FOR_C_4] = + reinterpret_cast(smem); + StoreToSharedMemoryFromRegister(smem_CFrag, c); + __syncthreads(); + // Now that shared memory contains all the D tiles, stream them to global memory. + OutputDataType* BlockGlobalPTR = + C + BatchID * (M_Global * N_Global) + Tile_Start_M + Tile_Start_N * M_Global; + for (size_t i = warpId; i < NumColumnToCopy; i += TilingConfig::BLOCK_WARPS) // i-th column +#pragma unroll + for (size_t j = threadIdx.x % WARP_SIZE; j < TilingConfig::TILE_M; + j += WARP_SIZE) // j-th row + { + if constexpr (std::is_same::value) + BlockGlobalPTR[j + i * M_Global] = __float2half_rn(smem_CFrag[i][j]); + else + BlockGlobalPTR[j + i * M_Global] = smem_CFrag[i][j]; + } + +#else +#warning "The FP6 functions are only available on Ampere GPUs." +#endif +} + +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/ptx_mma.cuh b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/ptx_mma.cuh new file mode 100644 index 0000000000000000000000000000000000000000..8023629caac919c98869a6664878769956324997 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/ptx_mma.cuh @@ -0,0 +1,137 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +// This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112 + +#ifndef DEEPSPEED_CUDA_LINEAR_PTX_MMA_CUH +#define DEEPSPEED_CUDA_LINEAR_PTX_MMA_CUH + +#include +#include +#include + +#include +#include "configs.h" + +#ifdef PIPELINE_LEVEL_SMEM +template +__device__ __forceinline__ void B_FromSharedToReg( + uint32_t __restrict__ Reg[][4], + half __restrict__ (*read_SPTR)[WARP_K + PADDING_SHARED_MEM_FOR_B_8], + int slice_id) +{ +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 +#ifdef DEBUG_MODE + static_assert((TilingConfig::WARP_COL_MMA_TENSORS == 1) || + (TilingConfig::WARP_COL_MMA_TENSORS % 2 == 0)); +#endif + + const int warpId = threadIdx.x / WARP_SIZE; + int lane_id = threadIdx.x % WARP_SIZE; + int WARP_j = warpId % TilingConfig::BLOCK_COL_WARPS; + int warp_start_col = TilingConfig::WARP_COL_MMA_TENSORS * MMA_8 * + WARP_j; // each warp may start from reading warp_start_col'th column of + // the B tile in shared memory +#ifdef DEBUG_MODE + assert(warp_start_col == 0); +#endif + + int col = (lane_id % 8) + (lane_id / 16) * 8; + int row = (lane_id % 16) / 8 * 8; + uint32_t smem_local_ptr = static_cast( + __cvta_generic_to_shared(&read_SPTR[warp_start_col + col][slice_id * MMA_16 + row])); + if (TilingConfig::WARP_COL_MMA_TENSORS == 1) { + asm volatile("ldmatrix.sync.aligned.x2.m8n8.shared.b16 {%0, %1}, [%2];\n" + : "=r"(Reg[0][0]), "=r"(Reg[0][1]) + : "r"(smem_local_ptr)); + } else { +#pragma unroll + for (int i = 0; i < TilingConfig::WARP_COL_MMA_TENSORS / 2; i++) { + asm volatile("ldmatrix.sync.aligned.x4.m8n8.shared.b16 {%0, %1, %2, %3}, [%4];\n" + : "=r"(Reg[i][0]), "=r"(Reg[i][1]), "=r"(Reg[i][2]), "=r"(Reg[i][3]) + : "r"(smem_local_ptr)); + smem_local_ptr += 16 * (WARP_K + PADDING_SHARED_MEM_FOR_B_8) * sizeof(half); + } + } +#else +#warning "The matrix load functions are only supported on Ampere and newer architectures" +#endif +} +#else +// Debug: Whether ldmatrix.trans is required??? +// B is in column-major +template +__device__ __forceinline__ void B_FromSharedToReg( + uint32_t __restrict__ Reg[][4], + half __restrict__ (*read_SPTR)[WARP_K + PADDING_SHARED_MEM_FOR_B_8], + int k_offset) +{ +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 +#ifdef DEBUG_MODE + static_assert((TilingConfig::WARP_COL_MMA_TENSORS == 1) || + (TilingConfig::WARP_COL_MMA_TENSORS % 2 == 0)); +#endif + + const int warpId = threadIdx.x / WARP_SIZE; + int lane_id = threadIdx.x % WARP_SIZE; + int WARP_j = warpId % TilingConfig::BLOCK_COL_WARPS; + int warp_start_col = TilingConfig::WARP_COL_MMA_TENSORS * MMA_8 * + WARP_j; // each warp may start from reading warp_start_col'th column of + // the B tile in shared memory +#ifdef DEBUG_MODE + assert(warp_start_col == 0); +#endif + + int col = (lane_id % 8) + (lane_id / 16) * 8; + int row = (lane_id % 16) / 8 * 8; + uint32_t smem_local_ptr = static_cast( + __cvta_generic_to_shared(&read_SPTR[warp_start_col + col][k_offset + row])); + if (TilingConfig::WARP_COL_MMA_TENSORS == 1) { + asm volatile("ldmatrix.sync.aligned.x2.m8n8.shared.b16 {%0, %1}, [%2];\n" + : "=r"(Reg[0][0]), "=r"(Reg[0][1]) + : "r"(smem_local_ptr)); + } else { +#pragma unroll + for (int i = 0; i < TilingConfig::WARP_COL_MMA_TENSORS / 2; i++) { + asm volatile("ldmatrix.sync.aligned.x4.m8n8.shared.b16 {%0, %1, %2, %3}, [%4];\n" + : "=r"(Reg[i][0]), "=r"(Reg[i][1]), "=r"(Reg[i][2]), "=r"(Reg[i][3]) + : "r"(smem_local_ptr)); + smem_local_ptr += 16 * (WARP_K + PADDING_SHARED_MEM_FOR_B_8) * sizeof(half); + } + } +#else +#warning "The matrix load functions are only supported on Ampere and newer architectures" +#endif +} +#endif + +__device__ __forceinline__ void MMA_FP16_M16N8K16(uint32_t __restrict__ c[], + uint32_t __restrict__* a, + uint32_t __restrict__* b) +{ +#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 + asm volatile( + "mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32" + "{ %0, %1, %2, %3}," + "{ %4, %5, %6, %7 }," + "{ %8, %9 }," + "{ %10, %11, %12, %13 };" + : "=r"(c[0]), "=r"(c[1]), "=r"(c[2]), "=r"(c[3]) + : "r"(a[0]), + "r"(a[1]), + "r"(a[2]), + "r"(a[3]), + "r"(b[0]), + "r"(b[1]), + "r"(c[0]), + "r"(c[1]), + "r"(c[2]), + "r"(c[3])); +#else +#warning "The mma functions are only implemented for Ampere and newer architectures" +#endif +} + +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/utils_core.cuh b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/utils_core.cuh new file mode 100644 index 0000000000000000000000000000000000000000..a65575a1ba5a68ca655e24c1091b9015631ffbff --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/utils_core.cuh @@ -0,0 +1,246 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +// This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112 + +#ifndef DEEPSPEED_CUDA_LINEAR_UTILS_CORE_CUH +#define DEEPSPEED_CUDA_LINEAR_UTILS_CORE_CUH + +#include + +#include "configs.h" +#include "ptx_mma.cuh" +#include "utils_paralleldequant.cuh" + +#ifdef PIPELINE_LEVEL_SMEM +template +__device__ __forceinline__ void CopyFromSharedToRegister_AFrag(uint32_t Reg[], + uint32_t* SPTR, + int slice_id) +{ + SPTR += slice_id * (NUM_INT_PER_THREAD * WARP_SIZE); + int lane_id = threadIdx.x % WARP_SIZE; +#pragma unroll + for (int i = 0; i < NUM_INT_PER_THREAD; i++) { Reg[i] = SPTR[lane_id + i * WARP_SIZE]; } +} + +template +__device__ __forceinline__ void initialize_mma_slice( + uint32_t (*a)[4], + uint32_t (*b)[4], + uint32_t* __restrict__ A1_SPTR_read, + uint32_t* __restrict__ A2_SPTR_read, + half __restrict__ (*B_SPTR_read)[WARP_K + PADDING_SHARED_MEM_FOR_B_8], + uint32_t* RPTR_Scales) +{ + // Writing registers + // Registers to store FP6 fragments for a slice (64*16) of A matrix => 32 FP6 per thread => 6 + // register per thread; + uint32_t a_1[2]; // NO double buffer + uint32_t a_2[4]; // NO double buffer + CopyFromSharedToRegister_AFrag<2>(a_1, A1_SPTR_read, 0); + CopyFromSharedToRegister_AFrag<4>(a_2, A2_SPTR_read, 0); + Dequant_32FP6_4Way(a, a_1, a_2, RPTR_Scales); // SIMT Dequant: dequantizing FP6 to FP16 at + // register level, dequantizing a slice each time + B_FromSharedToReg(b, B_SPTR_read, 0); // Loading B from shared to registers +} + +template +__device__ __forceinline__ void core_mma_slice( + float c[][REG_PER_THREAD_C_TENSOR_16_16], + uint32_t (*a)[4], + uint32_t (*b)[4], + uint32_t* __restrict__ A1_SPTR_read, + uint32_t* __restrict__ A2_SPTR_read, + half __restrict__ (*B_SPTR_read)[WARP_K + PADDING_SHARED_MEM_FOR_B_8], + uint32_t* RPTR_Scales, + int slice_id) // writing slice[slice_id] to registers, k=0 -> slice_id=1 for prefetching +{ +#ifdef DEBUG_MODE + assert( + (TilingConfig::WARP_COL_MMA_TENSORS == 1) || + (TilingConfig::WARP_COL_MMA_TENSORS % 2 == + 0)); // if WARP_COL_MMA_TENSORS == 1, B tile in registers is padded to a 16*16 MMA block +#endif + const int NumRegSets_a = + WARP_ROW_MMA_TENSORS; // 1 set = 4 registers, containing a 16*16 MMA block + const int NumRegSets_b = (TilingConfig::WARP_COL_MMA_TENSORS == 1) + ? 1 + : TilingConfig::WARP_COL_MMA_TENSORS / + 2; // 1 set = 4 registers, containing a 16*16 MMA block + uint32_t(*c_uint_ptr)[REG_PER_THREAD_C_TENSOR_16_16] = + reinterpret_cast( + c); // Registers for accumulated FP32 results + + // Setting RPTRs for double buffers + uint32_t(*a_read)[4] = a; + uint32_t(*a_write)[4] = a; + uint32_t(*b_read)[4] = b; + uint32_t(*b_write)[4] = b; + if (slice_id % 2 == 1) { + b_write += NumRegSets_b; + a_write += NumRegSets_a; + } else { + b_read += NumRegSets_b; + a_read += NumRegSets_a; + } + +// Reading registers and issuing core tensor core computations (a slice of A and B tile in shared +// memory) +#pragma unroll + for (int i = 0; i < WARP_ROW_MMA_TENSORS; i++) { + if (TilingConfig::WARP_COL_MMA_TENSORS == 1) { + MMA_FP16_M16N8K16(c_uint_ptr[i], a_read[i], b_read[0]); + } else { +#pragma unroll + for (int j = 0; j < TilingConfig::WARP_COL_MMA_TENSORS / 2; j++) { + MMA_FP16_M16N8K16(c_uint_ptr[i + j * WARP_ROW_MMA_TENSORS], a_read[i], b_read[j]); + MMA_FP16_M16N8K16(c_uint_ptr[i + j * WARP_ROW_MMA_TENSORS] + 4, + a_read[i], + b_read[j] + 2); // c+4; b+2 + } + } + } + + // Writing registers + // Registers to store FP6 fragments for a slice (64*16) of A matrix => 32 FP6 per thread => 6 + // register per thread; + uint32_t a_1[2]; // NO double buffer + uint32_t a_2[4]; // NO double buffer + CopyFromSharedToRegister_AFrag<2>(a_1, A1_SPTR_read, slice_id); + CopyFromSharedToRegister_AFrag<4>(a_2, A2_SPTR_read, slice_id); + Dequant_32FP6_4Way( + a_write, a_1, a_2, RPTR_Scales); // SIMT Dequant: dequantizing FP6 to FP16 at register + // level, dequantizing a slice each time + B_FromSharedToReg( + b_write, B_SPTR_read, slice_id); // Loading B from shared to registers +} + +#else +// Old version with naive pipeline design +template +__device__ __forceinline__ void CopyFromSharedToRegister_AFrag(uint32_t Reg[], uint32_t* SPTR) +{ + int lane_id = threadIdx.x % WARP_SIZE; +#pragma unroll + for (int i = 0; i < NUM_INT_PER_THREAD; i++) { Reg[i] = SPTR[lane_id + i * WARP_SIZE]; } +} +template +__device__ __forceinline__ void PipelinedCoreLoop( + float c[][REG_PER_THREAD_C_TENSOR_16_16], + half __restrict__ (*read_SPTR)[WARP_K + PADDING_SHARED_MEM_FOR_B_8], + uint32_t* __restrict__ read_SPTR_Frag1, + uint32_t* __restrict__ read_SPTR_Frag2, + uint32_t* RPTR_Scales) +{ +#ifdef DEBUG_MODE + assert( + (TilingConfig::WARP_COL_MMA_TENSORS == 1) || + (TilingConfig::WARP_COL_MMA_TENSORS % 2 == + 0)); // if WARP_COL_MMA_TENSORS == 1, B tile in registers is padded to a 16*16 MMA block +#endif + const int NumRegSets_a = + WARP_ROW_MMA_TENSORS; // 1 set = 4 registers, containing a 16*16 MMA block + const int NumRegSets_b = (TilingConfig::WARP_COL_MMA_TENSORS == 1) + ? 1 + : TilingConfig::WARP_COL_MMA_TENSORS / + 2; // 1 set = 4 registers, containing a 16*16 MMA block + + // Registers to store FP32 results + uint32_t(*c_uint_ptr)[REG_PER_THREAD_C_TENSOR_16_16] = + reinterpret_cast(c); + // Registers to store FP6 fragments for a slice (64*16) of A matrix => 32 FP6 per thread => 6 + // register per thread; + uint32_t a_1[2 * 2]; // double buffer is used + uint32_t a_2[4 * 2]; // double buffer is used + // Registers to store decompressed FP6 + uint32_t a[NumRegSets_a * 1][4]; // No double buffer + // Register to store FP16 B matrix (a slice) + uint32_t b[NumRegSets_b * 2][4]; // double buffer is used + + // Overlapped Smem and TC pipeline: pre-loading from shared to registers + CopyFromSharedToRegister_AFrag<2>(a_1, read_SPTR_Frag1); + CopyFromSharedToRegister_AFrag<4>(a_2, read_SPTR_Frag2); + B_FromSharedToReg(b, read_SPTR, 0); + +#pragma unroll + for (int k = 0; k < WARP_K_MMA_TENSORS; k++) { + uint32_t(*b_read)[4] = b; + uint32_t(*b_write)[4] = b; + uint32_t* a_1_read = a_1; + uint32_t* a_1_write = a_1; + uint32_t* a_2_read = a_2; + uint32_t* a_2_write = a_2; + if (k % 2 == 0) { + b_write += NumRegSets_b; + a_1_write += 2; + a_2_write += 4; + } else { + b_read += NumRegSets_b; + a_1_read += 2; + a_2_read += 4; + } + // data loading + if (k + 1 < WARP_K_MMA_TENSORS) { + // updating SPTR for fragment1 and fragment2 + read_SPTR_Frag1 += 2 * WARP_SIZE; + read_SPTR_Frag2 += 4 * WARP_SIZE; + CopyFromSharedToRegister_AFrag<2>(a_1_write, read_SPTR_Frag1); + CopyFromSharedToRegister_AFrag<4>(a_2_write, read_SPTR_Frag2); + B_FromSharedToReg(b_write, read_SPTR, (k + 1) * MMA_16); + } + // SIMT Dequant + Tensor Core computations + Dequant_32FP6_4Way( + a, a_1_read, a_2_read, RPTR_Scales); // Dequantizing FP6 to FP16 at register level, + // dequantizing a slice each time +#pragma unroll + for (int i = 0; i < WARP_ROW_MMA_TENSORS; i++) { + if (TilingConfig::WARP_COL_MMA_TENSORS == 1) + MMA_FP16_M16N8K16(c_uint_ptr[i], a[i], b_read[0]); + else { +#pragma unroll + for (int j = 0; j < TilingConfig::WARP_COL_MMA_TENSORS / 2; j++) { + MMA_FP16_M16N8K16(c_uint_ptr[i + j * WARP_ROW_MMA_TENSORS], a[i], b_read[j]); + MMA_FP16_M16N8K16(c_uint_ptr[i + j * WARP_ROW_MMA_TENSORS] + 4, + a[i], + b_read[j] + 2); // c+4; b+2 + } + } + } + } +} +#endif // #ifdef PIPELINE_LEVEL_SMEM + +template +__device__ __forceinline__ void StoreToSharedMemoryFromRegister( + float (*smem_CFrag)[TilingConfig::TILE_M + PADDING_SHARED_MEM_FOR_C_4], + float c[][REG_PER_THREAD_C_TENSOR_16_16]) +{ + const int lane_id = threadIdx.x % WARP_SIZE; + const int warpId = threadIdx.x / WARP_SIZE; + int warp_row_offset = warpId * (MMA_16 * WARP_ROW_MMA_TENSORS); +#pragma unroll + for (int i = 0; i < WARP_ROW_MMA_TENSORS; i++) { +#pragma unroll + for (int j = 0; j < TilingConfig::WARP_COL_MMA_TENSORS; + j++) { // Dealing with one 16*8 Tensor + int RegSetID = i + (j / 2) * WARP_ROW_MMA_TENSORS; + int RegOffset = (j % 2) * (REG_PER_THREAD_C_TENSOR_16_16 / 2); + int Tensor_row_offset = warp_row_offset + i * MMA_16; + int Tensor_col_offset = j * MMA_8; +#pragma unroll + for (int r = 0; r < REG_PER_THREAD_C_TENSOR_16_16 / 2; r++) { + int row_offset = lane_id / 4; + if (r >= 2) row_offset += 8; + int col_offset = (lane_id % 4) * 2; + if (r % 2 == 1) col_offset += 1; + smem_CFrag[Tensor_col_offset + col_offset][Tensor_row_offset + row_offset] = + c[RegSetID][r + RegOffset]; + } + } + } +} + +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/utils_gmem.cuh b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/utils_gmem.cuh new file mode 100644 index 0000000000000000000000000000000000000000..d0c58352cd56502de5daf5b742782a68cf4657c6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/utils_gmem.cuh @@ -0,0 +1,86 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +// This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112 + +#ifndef DEEPSPEED_CUDA_LINEAR_UTILS_GMEM_CUH +#define DEEPSPEED_CUDA_LINEAR_UTILS_GMEM_CUH + +#include +#include "configs.h" +#include "ptx_cp.async.cuh" + +/* + * Copying A1/A2 from global memory to shared memory. + * Usually 1024 or 2048 Bytes + */ +template +__device__ __forceinline__ void CopyFromGlobalToShared_A(uint32_t* SPTR, + const uint4* GPTR, + bool pred_guard = true) +{ +#ifdef DEBUG_MODE + static_assert(SMEM_SIZE_IN_BYTES_PER_WARP / WARP_SIZE % 16 == 0); +#endif + int lane_id = threadIdx.x % WARP_SIZE; + half* SPTR_HALF = reinterpret_cast(SPTR); + const half* GPTR_HALF = reinterpret_cast(GPTR); + SPTR_HALF += lane_id * 8; + GPTR_HALF += lane_id * 8; +#pragma unroll + for (int i = 0; i < SMEM_SIZE_IN_BYTES_PER_WARP / WARP_SIZE / 16; i++) { + cp_async<16>(SPTR_HALF, GPTR_HALF, pred_guard); + SPTR_HALF += 256; // Forward 512 Bytes + GPTR_HALF += 256; // Forward 512 Bytes + } +} + +/* + * Copying 64 Quant Scales (FP16) from global memory to shared memory. + */ +__device__ __forceinline__ void CopyFromGlobalToShared_Scales(half* SPTR_QuantScales, + const half* GPTR_A_Scales) +{ + int lane_id = threadIdx.x % WARP_SIZE; + int Offset_Shared = lane_id * 2; + int Offset_Global = lane_id / 4 + (lane_id % 4) * 16; + for (int i = 0; i < 2; i++) + SPTR_QuantScales[Offset_Shared + i] = GPTR_A_Scales[Offset_Global + i * 8]; +} + +/* + * (1) Copying X rows * 64 columns of FP16 values, originally in row major + * (2) Copying 64 rows * X columns of FP16 values, originally in column major + * 16 Bytes per thread -> 512 Bytes per WARP = 4 line per WARP = 1 line per 8 Threads + */ +template +__device__ __forceinline__ void CopyFromGlobalToShared( + half __restrict__ (*SharedPTR)[WARP_K + PADDING_SHARED_MEM_FOR_B_8], + const half* GlobalPTR, + const int GlobalStride, + const int NumOfLinesLeft, // To support arbitrary N dimensions. + bool Pred = true) +{ + // static parameters: 1 Group (8 Threads) can copy 1 line (64 FP16) each time + const int NumOfThreads = BLOCK_WARPS * WARP_SIZE; + const int NumOfGroups = NumOfThreads / 8; + const int MaxIteration = (MaxNumOfLinesToCopy - 1) / NumOfGroups + 1; + // runtime variables + const int line_id = threadIdx.x / 8; + const int line_offset = (threadIdx.x % 8) * 8; + // PTR for source global memory and target shared memory + GlobalPTR += line_id * GlobalStride + line_offset; + SharedPTR += line_id; +#pragma unroll + for (int i = 0; i < MaxIteration; i++) { + bool AsyncCopyPred = (line_id + i * NumOfGroups) < NumOfLinesLeft && Pred; + cp_async<16>(&(*SharedPTR)[line_offset], GlobalPTR, AsyncCopyPred); + // + GlobalPTR += NumOfGroups * GlobalStride; + SharedPTR += NumOfGroups; + } +} + +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/utils_paralleldequant.cuh b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/utils_paralleldequant.cuh new file mode 100644 index 0000000000000000000000000000000000000000..9f035f12cfcd699984278f8cd4ab8caba2bfa2b5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/utils_paralleldequant.cuh @@ -0,0 +1,127 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +// This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112 + +#ifndef DEEPSPEED_CUDA_LINEAR_UTILS_PARALLELDEQUANT_CUH +#define DEEPSPEED_CUDA_LINEAR_UTILS_PARALLELDEQUANT_CUH + +#include +#include +#include + +/* + * Input: R1 + * Outputs: R1, R2 + * Note: Simplified Exponent calculation is applied. + */ +__device__ __forceinline__ void FP6_FP16_Cast_4Way(u_int32_t* R1, u_int32_t* R2) +{ + *R2 = *R1 & 0x80808080; + *R1 = *R1 >> 2; + *R1 = *R1 & 0x1f1f1f1f; + *R2 = *R2 | *R1; + *R1 = *R2 & 0x9f009f00; + *R2 = *R2 & 0x009f009f; + *R2 = *R2 << 8; +} + +/* + * Input: R1 + * Outputs: R1, R2 + * Note: Simplified Exponent calculation is NOT applied. + */ +__device__ __forceinline__ void FP6_FP16_Cast_4Way_Naive(u_int32_t* R1, u_int32_t* R2) +{ + //*R2 = *R1 & 0x80808080; + *R2 = *R1 & 0xc0c0c0c0; + *R1 = *R1 >> 2; + //*R1 = *R1 & 0x1f1f1f1f; + *R1 = *R1 & 0x0f0f0f0f; + *R2 = *R2 | *R1; + // + //*R1 = *R2 & 0x9f009f00; + //*R2 = *R2 & 0x009f009f; + *R1 = *R2 & 0xcf00cf00; + if (!(*R1 & 0x40000000) && (*R1 & 0x0c000000)) *R1 = *R1 | 0x30000000; + if (!(*R1 & 0x00004000) && (*R1 & 0x00000c00)) *R1 = *R1 | 0x00003000; + *R2 = *R2 & 0x00cf00cf; + if (!(*R2 & 0x00400000) && (*R2 & 0x000c0000)) *R2 = *R2 | 0x00300000; + if (!(*R2 & 0x00000040) && (*R2 & 0x0000000c)) *R2 = *R2 | 0x00000030; + // + *R2 = *R2 << 8; + //*R1 = 0x3c003c00; + //*R2 = 0x3c003c00; +} + +__device__ __forceinline__ u_int32_t MultScale(u_int32_t PackedFP16Pair, half Scale) +{ + half* FP16_1 = reinterpret_cast(&PackedFP16Pair); + half* FP16_2 = FP16_1 + 1; + uint32_t output; + half* output_half_ptr = reinterpret_cast(&output); + output_half_ptr[0] = __hmul(__hmul(*FP16_1, __float2half(4096.0f)), Scale); + output_half_ptr[1] = __hmul(__hmul(*FP16_2, __float2half(4096.0f)), Scale); + return output; +} + +__device__ __forceinline__ void Dequant_32FP6_4Way(u_int32_t __restrict__ Reg[][4], + u_int32_t __restrict__* read_RPTR_Frag1, + u_int32_t __restrict__* read_RPTR_Frag2, + u_int32_t* Scales) +{ + u_int32_t* OutputRegs = reinterpret_cast(Reg); + u_int32_t* Frag1_PTR = read_RPTR_Frag1; + u_int32_t* Frag2_PTR = read_RPTR_Frag2; + half* Scale_RPTR = reinterpret_cast(Scales); + u_int32_t Packed_FP6 = 0; + u_int32_t tmp = 0; +// Dequantizing 32 FP6, each Loop dequantizing 4 FP6 +#pragma unroll(8) + for (int i = 0; i < 8; i++) { + // Frag1 + Packed_FP6 = (*Frag1_PTR) & 0xc0c0c0c0; + if (i % 4 == 3) + Frag1_PTR++; + else + (*Frag1_PTR) = (*Frag1_PTR) << 2; + // Frag2 + tmp = (*Frag2_PTR) & 0xf0f0f0f0; + tmp = tmp >> 2; + if (i % 2 == 1) + Frag2_PTR++; + else + (*Frag2_PTR) = (*Frag2_PTR) << 4; + // Packed_FP6 + Packed_FP6 = Packed_FP6 | tmp; + // + FP6_FP16_Cast_4Way(&Packed_FP6, &tmp); + // + *OutputRegs = MultScale(Packed_FP6, Scale_RPTR[0]); // Muliply FP16 scales + OutputRegs += 1; + *OutputRegs = MultScale(tmp, Scale_RPTR[1]); // Muliply FP16 scales + OutputRegs += 1; + // Updating offset for FP16 scales for every two iterations + if (i % 2 == 1) Scale_RPTR += 2; + } +} + +/* + * + */ +__device__ __forceinline__ void ExtractFromSharedToReg_Scales(uint32_t* Scales, + half* WARP_SPTR_Scales) +{ + int lane_id = threadIdx.x % WARP_SIZE; + uint32_t* SPTR_uint = reinterpret_cast(WARP_SPTR_Scales); + uint32_t tmpReg = SPTR_uint[lane_id]; +#pragma unroll + for (int i = 0; i < 4; i++) { + // T __shfl_sync(unsigned mask, T var, int srcLane, int width=warpSize); + Scales[i] = __shfl_sync(0xffffffff, tmpReg, i, 4); + } +} + +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/weight_prepacking.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/weight_prepacking.h new file mode 100644 index 0000000000000000000000000000000000000000..98805e6c111c620688c6da7166898a333ecbc5d2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/core_ops/cuda_linear/include/weight_prepacking.h @@ -0,0 +1,209 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +// This is a copy of FP6-LLM kernel code: https://arxiv.org/abs/2401.14112 + +#ifndef DEEPSPEED_CUDA_LINEAR_WEIGHT_PREPACKING_H +#define DEEPSPEED_CUDA_LINEAR_WEIGHT_PREPACKING_H + +#include +#include +#include + +using namespace std; + +void Padding_8_FP6_To_8_Bytes(unsigned char Padded_FP6[], + unsigned char* FP6_Array) // padding 0 to the lowerest bit location +{ + Padded_FP6[0] = FP6_Array[0] & 0xfc; + Padded_FP6[1] = (FP6_Array[0] << 6) | ((FP6_Array[1] >> 2) & 0xfc); + Padded_FP6[2] = (FP6_Array[1] << 4) | ((FP6_Array[2] >> 4) & 0xfc); + Padded_FP6[3] = FP6_Array[2] << 2; + Padded_FP6[4] = FP6_Array[3] & 0xfc; + Padded_FP6[5] = (FP6_Array[3] << 6) | ((FP6_Array[4] >> 2) & 0xfc); + Padded_FP6[6] = (FP6_Array[4] << 4) | ((FP6_Array[5] >> 4) & 0xfc); + Padded_FP6[7] = FP6_Array[5] << 2; +} + +unsigned char Extract_2_Bits_From_4_PaddedFP6(unsigned char B1, + unsigned char B2, + unsigned char B3, + unsigned char B4) +{ + unsigned char out; + out = (B1 & 0xc0) | ((B2 & 0xc0) >> 2) | ((B3 & 0xc0) >> 4) | ((B4 & 0xc0) >> 6); + return out; +} + +unsigned char Extract_4_Bits_From_2_PaddedFP6( + unsigned char B1, + unsigned char + B2) // The highest two bits are already extracted by Extract_2_Bits_From_4_PaddedFP6(); +{ + unsigned char out; + out = ((B1 << 2) & 0xf0) | ((B2 >> 2) & 0x0f); + return out; +} + +// dealing with 4 1*8 blocks of FP6 +void Assign_32_FP6_To_4_Thread(vector Seg_2bit[], + vector Seg_4bit[], + unsigned char* PTR_1, + unsigned char* PTR_2, + unsigned char* PTR_3, + unsigned char* PTR_4) +{ + unsigned char Padded_8_FP8[4][8]; + Padding_8_FP6_To_8_Bytes(Padded_8_FP8[0], PTR_1); + Padding_8_FP6_To_8_Bytes(Padded_8_FP8[1], PTR_2); + Padding_8_FP6_To_8_Bytes(Padded_8_FP8[2], PTR_3); + Padding_8_FP6_To_8_Bytes(Padded_8_FP8[3], PTR_4); + // + unsigned char Seg1_Byte1_T[4]; + unsigned char Seg1_Byte2_T[4]; + unsigned char Seg2_Byte1_T[4]; + unsigned char Seg2_Byte2_T[4]; + unsigned char Seg2_Byte3_T[4]; + unsigned char Seg2_Byte4_T[4]; + for (int t = 0; t < 4; t++) { + Seg1_Byte1_T[t] = Extract_2_Bits_From_4_PaddedFP6(Padded_8_FP8[0][0 + t * 2], + Padded_8_FP8[0][1 + t * 2], + Padded_8_FP8[1][0 + t * 2], + Padded_8_FP8[1][1 + t * 2]); + Seg1_Byte2_T[t] = Extract_2_Bits_From_4_PaddedFP6(Padded_8_FP8[2][0 + t * 2], + Padded_8_FP8[2][1 + t * 2], + Padded_8_FP8[3][0 + t * 2], + Padded_8_FP8[3][1 + t * 2]); + Seg2_Byte1_T[t] = + Extract_4_Bits_From_2_PaddedFP6(Padded_8_FP8[0][0 + t * 2], Padded_8_FP8[0][1 + t * 2]); + Seg2_Byte2_T[t] = + Extract_4_Bits_From_2_PaddedFP6(Padded_8_FP8[1][0 + t * 2], Padded_8_FP8[1][1 + t * 2]); + Seg2_Byte3_T[t] = + Extract_4_Bits_From_2_PaddedFP6(Padded_8_FP8[2][0 + t * 2], Padded_8_FP8[2][1 + t * 2]); + Seg2_Byte4_T[t] = + Extract_4_Bits_From_2_PaddedFP6(Padded_8_FP8[3][0 + t * 2], Padded_8_FP8[3][1 + t * 2]); + } + // + for (int t = 0; t < 4; t++) { + Seg_2bit[t].push_back(Seg1_Byte1_T[t]); + Seg_2bit[t].push_back(Seg1_Byte2_T[t]); + Seg_4bit[t].push_back(Seg2_Byte1_T[t]); + Seg_4bit[t].push_back(Seg2_Byte2_T[t]); + Seg_4bit[t].push_back(Seg2_Byte3_T[t]); + Seg_4bit[t].push_back(Seg2_Byte4_T[t]); + } + return; +} + +void BitInterleaving_2bit(unsigned char* PTR_4Bytes) +{ + unsigned int* PTR_UINT = reinterpret_cast(PTR_4Bytes); + unsigned int input = *PTR_UINT; + // + // int order_2bit[16] = {1,5,9,13,3,7,11,15,2,6,10,14,4,8,12,16}; // pre-defined order for + // bit-interleaving in QuantLLM + int order_2bit[16] = { + 2, 6, 10, 14, 4, 8, 12, 16, 1, 5, 9, 13, 3, 7, 11, 15}; // pre-defined order for + // bit-interleaving in QuantLLM + unsigned int Frags_2bit[16]; // The highest 2 bits are used to store the extracted fragments. + for (int i = 0; i < 16; i++) Frags_2bit[i] = (input << 2 * (order_2bit[i] - 1)) & 0xc0000000; + // + unsigned int output = 0x00000000; + for (int i = 0; i < 16; i++) output |= (Frags_2bit[i] >> (i * 2)); + // + *PTR_UINT = output; +} + +void BitInterleaving_4bit(unsigned char* PTR_4Bytes) +{ + unsigned int* PTR_UINT = reinterpret_cast(PTR_4Bytes); + unsigned int input = *PTR_UINT; + // + // int order_4bit[8] = {1,5,3,7,2,6,4,8}; // pre-defined order for bit-interleaving in QuantLLM + int order_4bit[8] = { + 2, 6, 4, 8, 1, 5, 3, 7}; // pre-defined order for bit-interleaving in QuantLLM + unsigned int Frags_4bit[8]; // The highest4 bits are used to store the extracted fragments. + for (int i = 0; i < 8; i++) Frags_4bit[i] = (input << 4 * (order_4bit[i] - 1)) & 0xf0000000; + // + unsigned int output = 0x00000000; + for (int i = 0; i < 8; i++) output |= (Frags_4bit[i] >> (i * 4)); + // + *PTR_UINT = output; +} + +/* + * Inputs: + * (1) unsigned char Weight_6bit [M*K*6/8] + * Outputs: + * (1) unsigned char Weight_2bit [M*K*2/8] + * (2) unsigned char Weight_4bit [M*K*4/8] + * + * Assumption: Weight_6bit, Weight_2bit, Weight_4bit all stored continuously in row-major. + * 8 FP6 = 6 Bytes + * 8 FP4 = 4 Bytes + * 8 FP2 = 2 Bytes + */ +void weight_matrix_prepacking(int* FP6Weights, size_t M, size_t K) +{ + assert(M % 64 == 0); + assert(K % 64 == 0); + // + unsigned char* Weight_6bit = reinterpret_cast(FP6Weights); + unsigned char* Weight_2bit = Weight_6bit; + unsigned char* Weight_4bit = Weight_6bit + M * K * 2 / 8; + // + vector A_Segment_2bit[32]; + vector A_Segment_4bit[32]; + // + size_t BytesPerRow = K * 6 / 8; + // Pass-1: (1) 2+4 split; (2) assign weights to 32 threads. + for (size_t i = 0; i < M / 64; i++) // + { + for (size_t j = 0; j < K / 16; j++) { + for (size_t k = 0; k < 64 / 16; k++) { + size_t row = i * 64 + k * 16; + size_t col = j * 16; + unsigned char* StartPTR_1 = Weight_6bit + row * BytesPerRow + col * 6 / 8; + unsigned char* StartPTR_2 = StartPTR_1 + 8 * BytesPerRow; + unsigned char* StartPTR_3 = StartPTR_1 + 8 * 6 / 8; + unsigned char* StartPTR_4 = StartPTR_2 + 8 * 6 / 8; + // Dealing with each 16*16 blocks then... + for (int l = 0; l < 8; l++) + Assign_32_FP6_To_4_Thread(&A_Segment_2bit[l * 4], + &A_Segment_4bit[l * 4], + StartPTR_1 + l * BytesPerRow, + StartPTR_2 + l * BytesPerRow, + StartPTR_3 + l * BytesPerRow, + StartPTR_4 + l * BytesPerRow); + } + } + } + // Verifying the length of 2_bit segments and 4_bit segments + size_t BytesPerThread_2bit = M * K * 2 / 8 / 32; + size_t BytesPerThread_4bit = M * K * 4 / 8 / 32; + for (int i = 0; i < 32; i++) { + assert(A_Segment_2bit[i].size() == BytesPerThread_2bit); + assert(A_Segment_4bit[i].size() == BytesPerThread_4bit); + } + // Pass-2: Optimizing coleasced global memory access + for (size_t i = 0; i < BytesPerThread_2bit / 4; i++) + for (int t = 0; t < 32; t++) + for (int b = 0; b < 4; b++) + Weight_2bit[i * 128 + t * 4 + (3 - b)] = + A_Segment_2bit[t] + [i * 4 + b]; // why (3-b): special byte order within a register + for (size_t i = 0; i < BytesPerThread_4bit / 4; i++) + for (int t = 0; t < 32; t++) + for (int b = 0; b < 4; b++) + Weight_4bit[i * 128 + t * 4 + (3 - b)] = + A_Segment_4bit[t][i * 4 + b]; // why (3-b):special byte order within a register + // Pass-3: Bit-level interleaving + for (size_t i = 0; i < BytesPerThread_2bit * 32 / 4; i++) + BitInterleaving_2bit(Weight_2bit + 4 * i); + for (size_t i = 0; i < BytesPerThread_4bit * 32 / 4; i++) + BitInterleaving_4bit(Weight_4bit + 4 * i); +} + +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..44b9adbae794ea8776e9c5972a5445906de84ae7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .mixed_gemm import * +from .moe_gemm import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7095059a63c2e5b6a3834c6fce4406705b07c86f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/cutlass_ops.cpp b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/cutlass_ops.cpp new file mode 100644 index 0000000000000000000000000000000000000000..18e834f3e60a7e63cb6ded145361d10a4d472282 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/cutlass_ops.cpp @@ -0,0 +1,19 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include + +#include "mixed_gemm.h" +#include "moe_gemm.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + // mixed_gemm.h + m.def("mixed_gemm", &mixed_gemm, "Mixed-precision GEMM"); + + // moe_gemm.h + m.def("moe_gemm", &moe_gemm, "MultiGEMM for MoE (16-bit weights)"); + m.def("mixed_moe_gemm", &mixed_moe_gemm, "MultiGEMM for MoE (4-bit/8-bit weights)"); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..14ccf2ce53548240a255170c460893f3f7df7d18 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .mixed_gemm import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0dc3a72f1ecd6563792182c5a9a8f1c363de87f5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/__pycache__/mixed_gemm.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/__pycache__/mixed_gemm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8ec0fa1ac41d0c3f1895f3573cc7dcfbbc2b6b02 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/__pycache__/mixed_gemm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm.cu b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm.cu new file mode 100644 index 0000000000000000000000000000000000000000..7c522203bb48b41cdf377753a864af3ef521fba4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm.cu @@ -0,0 +1,93 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include "mixed_gemm.h" +#include "mixed_gemm_api.h" +#include "weight_variant.h" + +// Switch helpers inspired by +// https://github.com/NVIDIA/DALI/blob/main/include/dali/core/static_switch.h +// and https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Dispatch.h + +#define ACT_DTYPE_SWITCH(COND, ...) \ + [&] { \ + if (COND) { \ + using ActivationDtype = __half; \ + return __VA_ARGS__(); \ + } else { \ + using ActivationDtype = __nv_bfloat16; \ + return __VA_ARGS__(); \ + } \ + }() + +#define WEIGHT_VARIANT_SWITCH(COND, ...) \ + [&] { \ + if (COND) { \ + constexpr WeightVariant WVariant = WeightVariant::kFP8; \ + return __VA_ARGS__(); \ + } else { \ + constexpr WeightVariant WVariant = WeightVariant::kFP4; \ + return __VA_ARGS__(); \ + } \ + }() + +void mixed_gemm(at::Tensor& output, + at::Tensor& hidden_states, + at::Tensor& weight, + at::Tensor& scales, + c10::optional& bias, + int num_bits, + int activation_raw) +{ + TORCH_CHECK(output.dtype() == hidden_states.dtype(), + "Output and hidden states must have the same dtype"); + TORCH_CHECK(num_bits == 4 || num_bits == 8, "Data width must be 4 or 8"); + TORCH_CHECK(output.size(0) == hidden_states.size(0), "Token dimension mismatch"); + + int32_t m = output.size(0); + int32_t k = hidden_states.size(1); + int32_t n = weight.size(1); + + TORCH_CHECK(weight.size(0) == k, "Weight dimension mismatch"); + + ACT_DTYPE_SWITCH(hidden_states.dtype() == torch::kFloat16, [&] { + WEIGHT_VARIANT_SWITCH(num_bits == 8, [&] { + fastertransformer::CutlassFpAIntBGemmRunner runner = + *MixedGemmContext::Instance().GeMM_Runner(); + + ActivationType activation_type = (ActivationType)activation_raw; + if (!bias.has_value() && activation_type == ActivationType::IDENTITY) { + runner.gemm((ActivationDtype*)hidden_states.data_ptr(), + (const char*)weight.data_ptr(), + (ActivationDtype*)scales.data_ptr(), + (ActivationDtype*)output.data_ptr(), + m, + n, + k, + nullptr, + 0, + at::cuda::getCurrentCUDAStream()); + return; + } else { + ActivationDtype* bias_ptr = nullptr; + if (bias.has_value()) { bias_ptr = (ActivationDtype*)bias.value().data_ptr(); } + runner.gemm_bias_act((ActivationDtype*)hidden_states.data_ptr(), + (char*)weight.data_ptr(), + (ActivationDtype*)scales.data_ptr(), + bias_ptr, + (ActivationDtype*)output.data_ptr(), + m, + n, + k, + activation_type, + nullptr, + 0, + at::cuda::getCurrentCUDAStream()); + return; + } + }); + }); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm.py new file mode 100644 index 0000000000000000000000000000000000000000..dddb555e267a08e3430c82f0607db1f90a7a8842 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm.py @@ -0,0 +1,64 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ... import DSKernelBase +from ....inference_utils import ActivationType, DtypeEnum +from deepspeed.ops.op_builder import InferenceCutlassBuilder + +from typing import Optional + + +class MixedGEMM(DSKernelBase): + """ + CUTLASS implementation of MoE GEMM. + """ + + supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16] + supported_act_fns = [ActivationType.GELU, ActivationType.SILU, ActivationType.RELU, ActivationType.IDENTITY] + + def __init__(self, fp_dtype: DtypeEnum, act_fn: ActivationType, num_bits: int) -> None: + + if not isinstance(fp_dtype, DtypeEnum): + fp_dtype = DtypeEnum(fp_dtype) + + if fp_dtype not in MixedGEMM.supported_dtypes: + raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format( + fp_dtype, MixedGEMM.supported_dtypes)) + + if act_fn not in MixedGEMM.supported_act_fns: + raise ValueError("Unsupported activation function: {}, supported_act_fns are {}".format( + act_fn, MixedGEMM.supported_act_fns)) + + if num_bits != 4 and num_bits != 8: + raise ValueError("Unsupported num_bits: {}, supported num_bits are 4 and 8".format(num_bits)) + + inf_module = InferenceCutlassBuilder().load() + self.num_bits = num_bits + self.kernel = inf_module.moe_gemm + self.act_fn = act_fn + + def __call__(self, + output: torch.Tensor, + hidden_states: torch.Tensor, + weights: torch.Tensor, + scales: torch.Tensor, + biases: Optional[torch.Tensor] = None) -> None: + """ + Performs a MoE GEMM. Note that the stride between token inputs must be even (the distance between byte 1 of token 0 and token 1 must be the same as the distance between byte 1 of token 1 and token 2). + + Arguments: + output (torch.Tensor): The output of the MoE GEMM of shape [n_tokens, out_neurons]. + hidden_states (torch.Tensor): The direct input for the MoE GEMM of shape [n_tokens, in_neurons]. + weights (torch.Tensor): The weights of shape [in_neurons, out_neurons]. These weights must be contiguous. + scales (torch.Tensor): The scales of shape [out_neurons]. These scales must be contiguous. + biases (torch.Tensor): The biases of shape [out_neurons]. These biases must be contiguous. + + Returns: + output + """ + self.kernel(output, hidden_states, weights, biases, self.num_bits, self.act_fn) + return output diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm_api.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm_api.h new file mode 100644 index 0000000000000000000000000000000000000000..74fc07ffc4a2130c09c82872552ff5ccedcb2b8b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/mixed_gemm/mixed_gemm_api.h @@ -0,0 +1,57 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "activation_type.h" +#include "weight_variant.h" + +namespace fastertransformer { + +template +class CutlassFpAIntBGemmRunner { +public: + void gemm(const T* A, + const char* B, + const T* weight_scales, + T* C, + int m, + int n, + int k, + char* workspace_ptr, + const size_t workspace_bytes, + cudaStream_t stream); + + void gemm_bias_act(const T* A, + const char* B, + const T* weight_scales, + const T* biases, + T* C, + int m, + int n, + int k, + ActivationType activation_type, + char* workspace_ptr, + const size_t workspace_bytes, + cudaStream_t stream); +}; + +} // namespace fastertransformer + +template +class MixedGemmContext { +public: + MixedGemmContext() { _runner = new fastertransformer::CutlassFpAIntBGemmRunner(); } + + virtual ~MixedGemmContext() { delete _runner; } + + static MixedGemmContext& Instance() + { + static MixedGemmContext _ctx; + return _ctx; + } + + fastertransformer::CutlassFpAIntBGemmRunner* GeMM_Runner() const { return _runner; } + + fastertransformer::CutlassFpAIntBGemmRunner* _runner; +}; diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..aff4e77bba98cbb3b6c67bcf87b831a110eb9414 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/__init__.py @@ -0,0 +1,7 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .mixed_moe_gemm import * +from .moe_gemm import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5356be1bf242a5cf27e61052875658f138a565d8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/__pycache__/mixed_moe_gemm.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/__pycache__/mixed_moe_gemm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1936d20a316d13b447abbb440f665583d768dbad Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/__pycache__/mixed_moe_gemm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/__pycache__/moe_gemm.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/__pycache__/moe_gemm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f6634296303bc5976281e231c565c1108f13d1ba Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/__pycache__/moe_gemm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/mixed_moe_gemm.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/mixed_moe_gemm.py new file mode 100644 index 0000000000000000000000000000000000000000..9c55ce34153288e56046139dfce57a91778d7486 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/mixed_moe_gemm.py @@ -0,0 +1,67 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ... import DSKernelBase +from ....inference_utils import ActivationType, DtypeEnum +from deepspeed.ops.op_builder import InferenceCutlassBuilder + +from typing import Optional + + +class MixedMoEGEMM(DSKernelBase): + """ + CUTLASS implementation of MoE GEMM. + """ + + supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16] + supported_act_fns = [ActivationType.GELU, ActivationType.SILU, ActivationType.RELU, ActivationType.IDENTITY] + + def __init__(self, fp_dtype: DtypeEnum, act_fn: ActivationType, num_bits: int) -> None: + + if not isinstance(fp_dtype, DtypeEnum): + fp_dtype = DtypeEnum(fp_dtype) + + if fp_dtype not in MixedMoEGEMM.supported_dtypes: + raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format( + fp_dtype, MixedMoEGEMM.supported_dtypes)) + + if act_fn not in MixedMoEGEMM.supported_act_fns: + raise ValueError("Unsupported activation function: {}, supported_act_fns are {}".format( + act_fn, MixedMoEGEMM.supported_act_fns)) + + if num_bits != 4 and num_bits != 8: + raise ValueError("Unsupported num_bits: {}, supported num_bits are 4 and 8".format(num_bits)) + + inf_module = InferenceCutlassBuilder().load() + self.num_bits = num_bits + self.kernel = inf_module.moe_gemm + self.act_fn = act_fn + + def __call__(self, + ordered_output: torch.Tensor, + ordered_input: torch.Tensor, + weights: torch.Tensor, + scales: torch.Tensor, + total_rows_before_expert: torch.Tensor, + biases: Optional[torch.Tensor] = None) -> None: + """ + Performs a MoE GEMM. Note that the stride between token inputs must be even (the distance between byte 1 of token 0 and token 1 must be the same as the distance between byte 1 of token 1 and token 2). + + Arguments: + ordered_output (torch.Tensor): The output of the MoE GEMM of shape [n_tokens, out_neurons]. + ordered_input (torch.Tensor): The direct input for the MoE GEMM of shape [n_tokens, in_neurons]. + weights (torch.Tensor): The weights of shape [n_experts, in_neurons, out_neurons]. These weights must be contiguous. + scales (torch.Tensor): The scales of shape [n_experts, out_neurons]. These scales must be contiguous. + total_rows_before_expert (torch.Tensor): The total number of rows before each expert of shape [n_experts]. + biases (torch.Tensor): The biases of shape [n_experts, out_neurons]. These biases must be contiguous. + + Returns: + ordered_output + """ + self.kernel(ordered_output, ordered_input, weights, scales, biases, total_rows_before_expert, self.num_bits, + self.act_fn) + return ordered_output diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm.cu b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm.cu new file mode 100644 index 0000000000000000000000000000000000000000..d1cafc9fff4c54bc53fa059ec42ec4a2dfecb0eb --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm.cu @@ -0,0 +1,175 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include "moe_gemm.h" +#include "moe_gemm_api.h" +#include "weight_variant.h" + +// Switch helpers inspired by +// https://github.com/NVIDIA/DALI/blob/main/include/dali/core/static_switch.h +// and https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/Dispatch.h + +#define HIDDEN_DTYPE_SWITCH(COND, ...) \ + [&] { \ + if (COND) { \ + using ActivationDtype = __half; \ + constexpr WeightVariant WVariant = WeightVariant::kFP16; \ + return __VA_ARGS__(); \ + } else { \ + using ActivationDtype = __nv_bfloat16; \ + constexpr WeightVariant WVariant = WeightVariant::kBF16; \ + return __VA_ARGS__(); \ + } \ + }() + +void moe_gemm(at::Tensor& output, + at::Tensor& hidden_states, + at::Tensor& weight, + c10::optional& bias, + at::Tensor& total_rows_before_expert, + int activation_raw) +{ + TORCH_CHECK(output.dtype() == hidden_states.dtype(), + "Output and hidden states must have the same dtype"); + TORCH_CHECK(output.dtype() == weight.dtype(), "Output and weight must have the same dtype"); + + int64_t total_rows = hidden_states.size(0); + int64_t gemm_k = hidden_states.size(1); + int64_t gemm_n = weight.size(2); + int num_experts = weight.size(0); + + TORCH_CHECK(total_rows == output.size(0), "Total rows dimension mismatch"); + TORCH_CHECK(gemm_k == weight.size(1), "GEMM K dimension mismatch"); + TORCH_CHECK(gemm_n == output.size(1), "GEMM N dimension mismatch"); + TORCH_CHECK(num_experts == total_rows_before_expert.size(0), "Number of experts mismatch"); + + HIDDEN_DTYPE_SWITCH(hidden_states.dtype() == torch::kFloat16, [&] { + fastertransformer::MoeGemmRunner runner = + *MoeGemmContext::Instance().GeMM_Runner(); + + ActivationType activation_type = (ActivationType)activation_raw; + if (!bias.has_value() && activation_type == ActivationType::IDENTITY) { + runner.moe_gemm((ActivationDtype*)hidden_states.data_ptr(), + (char*)weight.data_ptr(), + nullptr, + (ActivationDtype*)output.data_ptr(), + (int64_t*)total_rows_before_expert.data_ptr(), + total_rows, + gemm_n, + gemm_k, + num_experts, + at::cuda::getCurrentCUDAStream()); + return; + } else { + ActivationDtype* bias_ptr = nullptr; + if (bias.has_value()) { + bias_ptr = (ActivationDtype*)bias.value().data_ptr(); + TORCH_CHECK(num_experts == bias.value().size(0), "Number of experts mismatch"); + TORCH_CHECK(gemm_n == bias.value().size(1), "GEMM N dimension mismatch"); + } + runner.moe_gemm_bias_act((ActivationDtype*)hidden_states.data_ptr(), + (char*)weight.data_ptr(), + nullptr, + bias_ptr, + (ActivationDtype*)output.data_ptr(), + (int64_t*)total_rows_before_expert.data_ptr(), + total_rows, + gemm_n, + gemm_k, + num_experts, + activation_type, + at::cuda::getCurrentCUDAStream()); + return; + } + }); +} + +#define ACT_DTYPE_SWITCH(COND, ...) \ + [&] { \ + if (COND) { \ + using ActivationDtype = __half; \ + return __VA_ARGS__(); \ + } else { \ + using ActivationDtype = __nv_bfloat16; \ + return __VA_ARGS__(); \ + } \ + }() + +#define WEIGHT_VARIANT_SWITCH(COND, ...) \ + [&] { \ + if (COND) { \ + constexpr WeightVariant WVariant = WeightVariant::kFP8; \ + return __VA_ARGS__(); \ + } else { \ + constexpr WeightVariant WVariant = WeightVariant::kFP4; \ + return __VA_ARGS__(); \ + } \ + }() + +void mixed_moe_gemm(at::Tensor& output, + at::Tensor& hidden_states, + at::Tensor& weight, + at::Tensor& scales, + c10::optional& bias, + at::Tensor& total_rows_before_expert, + int num_bits, + int activation_raw) +{ + TORCH_CHECK(output.dtype() == hidden_states.dtype(), + "Output and hidden states must have the same dtype"); + + int64_t total_rows = hidden_states.size(0); + int64_t gemm_k = hidden_states.size(1); + int64_t gemm_n = weight.size(2); + int num_experts = weight.size(0); + + TORCH_CHECK(total_rows == output.size(0), "Total rows dimension mismatch"); + TORCH_CHECK(gemm_k == weight.size(1), "GEMM K dimension mismatch"); + TORCH_CHECK(gemm_n == output.size(1), "GEMM N dimension mismatch"); + TORCH_CHECK(num_experts == total_rows_before_expert.size(0), "Number of experts mismatch"); + + ACT_DTYPE_SWITCH(hidden_states.dtype() == torch::kFloat16, [&] { + WEIGHT_VARIANT_SWITCH(num_bits == 8, [&] { + fastertransformer::MoeGemmRunner runner = + *MoeGemmContext::Instance().GeMM_Runner(); + + ActivationType activation_type = (ActivationType)activation_raw; + if (!bias.has_value() && activation_type == ActivationType::IDENTITY) { + runner.moe_gemm((ActivationDtype*)hidden_states.data_ptr(), + (char*)weight.data_ptr(), + (ActivationDtype*)scales.data_ptr(), + (ActivationDtype*)output.data_ptr(), + (int64_t*)total_rows_before_expert.data_ptr(), + total_rows, + gemm_n, + gemm_k, + num_experts, + at::cuda::getCurrentCUDAStream()); + return; + } else { + ActivationDtype* bias_ptr = nullptr; + if (bias.has_value()) { + bias_ptr = (ActivationDtype*)bias.value().data_ptr(); + TORCH_CHECK(num_experts == bias.value().size(0), "Number of experts mismatch"); + TORCH_CHECK(gemm_n == bias.value().size(1), "GEMM N dimension mismatch"); + } + runner.moe_gemm_bias_act((ActivationDtype*)hidden_states.data_ptr(), + (char*)weight.data_ptr(), + (ActivationDtype*)scales.data_ptr(), + bias_ptr, + (ActivationDtype*)output.data_ptr(), + (int64_t*)total_rows_before_expert.data_ptr(), + total_rows, + gemm_n, + gemm_k, + num_experts, + activation_type, + at::cuda::getCurrentCUDAStream()); + return; + } + }); + }); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm.h new file mode 100644 index 0000000000000000000000000000000000000000..dfd3d4561567bf203736a6eb9cc1a267a68ce003 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm.h @@ -0,0 +1,24 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include + +void moe_gemm(at::Tensor& output, + at::Tensor& hidden_states, + at::Tensor& weight, + c10::optional& bias, + at::Tensor& total_rows_before_expert, + int activation_raw); + +void mixed_moe_gemm(at::Tensor& output, + at::Tensor& hidden_states, + at::Tensor& weight, + at::Tensor& scales, + c10::optional& bias, + at::Tensor& total_rows_before_expert, + int num_bits, + int activation_raw); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm.py new file mode 100644 index 0000000000000000000000000000000000000000..0cc233e8d87a44a5d8298f116a56136cb35fe625 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm.py @@ -0,0 +1,60 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ... import DSKernelBase +from ....inference_utils import ActivationType, DtypeEnum +from deepspeed.ops.op_builder import InferenceCutlassBuilder + +from typing import Optional + + +class MoEGEMM(DSKernelBase): + """ + CUTLASS implementation of MoE GEMM. + """ + + supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16] + supported_act_fns = [ActivationType.GELU, ActivationType.SILU, ActivationType.RELU, ActivationType.IDENTITY] + + def __init__(self, fp_dtype: DtypeEnum, act_fn: ActivationType) -> None: + + if not isinstance(fp_dtype, DtypeEnum): + fp_dtype = DtypeEnum(fp_dtype) + + if fp_dtype not in MoEGEMM.supported_dtypes: + raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format( + fp_dtype, MoEGEMM.supported_dtypes)) + + if act_fn not in MoEGEMM.supported_act_fns: + raise ValueError("Unsupported activation function: {}, supported_act_fns are {}".format( + act_fn, MoEGEMM.supported_act_fns)) + + inf_module = InferenceCutlassBuilder().load() + self.kernel = inf_module.moe_gemm + self.act_fn = act_fn + + def __call__(self, + ordered_output: torch.Tensor, + ordered_input: torch.Tensor, + weights: torch.Tensor, + total_rows_before_expert: torch.Tensor, + biases: Optional[torch.Tensor] = None) -> None: + """ + Performs a MoE GEMM. Note that the stride between token inputs must be even (the distance between byte 1 of token 0 and token 1 must be the same as the distance between byte 1 of token 1 and token 2). + + Arguments: + ordered_output (torch.Tensor): The output of the MoE GEMM of shape [n_tokens, out_neurons]. + ordered_input (torch.Tensor): The direct input for the MoE GEMM of shape [n_tokens, in_neurons]. + weights (torch.Tensor): The weights of shape [n_experts, in_neurons, out_neurons]. These weights must be contiguous. + total_rows_before_expert (torch.Tensor): The total number of rows before each expert of shape [n_experts]. + biases (torch.Tensor): The biases of shape [n_experts, out_neurons]. These biases must be contiguous. + + Returns: + ordered_output + """ + self.kernel(ordered_output, ordered_input, weights, biases, total_rows_before_expert, self.act_fn) + return ordered_output diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm_api.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm_api.h new file mode 100644 index 0000000000000000000000000000000000000000..7ad92070b35f06c3a2807c0c8ab544f213be9c6e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/moe_gemm/moe_gemm_api.h @@ -0,0 +1,64 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "activation_type.h" +#include "weight_variant.h" + +namespace fastertransformer { + +template +class MoeGemmRunner { +public: + MoeGemmRunner(); + + void moe_gemm_bias_act(const T* A, + const char* B, + const T* weight_scales, + const T* biases, + T* C, + int64_t* total_rows_before_expert, + int64_t total_rows, + int64_t gemm_n, + int64_t gemm_k, + int num_experts, + ActivationType activation_type, + cudaStream_t stream); + + void moe_gemm(const T* A, + const char* B, + const T* weight_scales, + T* C, + int64_t* total_rows_before_expert, + int64_t total_rows, + int64_t gemm_n, + int64_t gemm_k, + int num_experts, + cudaStream_t stream); + +private: + int sm_; + int multi_processor_count_; +}; + +} // namespace fastertransformer + +template +class MoeGemmContext { +public: + MoeGemmContext() { _runner = new fastertransformer::MoeGemmRunner(); } + + virtual ~MoeGemmContext() { delete _runner; } + + static MoeGemmContext& Instance() + { + static MoeGemmContext _ctx; + return _ctx; + } + + fastertransformer::MoeGemmRunner* GeMM_Runner() const { return _runner; } + + fastertransformer::MoeGemmRunner* _runner; +}; diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/shared_resources/weight_variant.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/shared_resources/weight_variant.h new file mode 100644 index 0000000000000000000000000000000000000000..4d17c799f7265939ceea4e973d6c5b1285ecfcf1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/cutlass_ops/shared_resources/weight_variant.h @@ -0,0 +1,11 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +// Data structure that allows us to abstract internal CUTLASS datatypes/mappings +// to the DeepSpeed-Kernels repo. + +#pragma once + +enum WeightVariant { kFP16, kBF16, kFP8, kFP4 }; diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/activation_type.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/activation_type.h new file mode 100644 index 0000000000000000000000000000000000000000..a44921d5d650d3fb4d68522e95b82cda08a1ad60 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/activation_type.h @@ -0,0 +1,17 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +enum ActivationType { + GELU = 0, + RELU = 1, + SILU = 2, + GEGLU = 3, + ReGLU = 4, + SiGLU = 5, + IDENTITY = 6, + InvalidType = -1 +}; diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/conversion_utils.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/conversion_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..3a90a3e91ddf7d38d943e2b95a6f731796369e97 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/conversion_utils.h @@ -0,0 +1,640 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include "ds_kernel_utils.h" + +#include + +#ifdef BF16_AVAILABLE +#include +#endif + +namespace conversion { + +// Basic primitive for constructing conversions +template +DS_D_INLINE TO to(FROM val) +{ + return to(val); +} + +// Specializations + +/********************* Identity Conversions *********************/ +/* +Identity conversions are useful in templated functions where we might have +a fixed destination type. For example, I might have a kernel that accepts +__half, __nv_bfloat16, and float but always want to do the core computation +at floating point: + +T mem_value = input[idx]; +float compute_value = conversion::to(mem_value); + +In practice, we should be able to elide the second template parameter: +float compute_val = conversion::to(mem_value); + +In this case, we need an implementation to handle the T = float case + +NOTE: The type inferencing system appears to be unable to handle inferring the first +template parameter, even in the trivial case. +*/ + +// Floating point types +template <> +DS_D_INLINE double to(double val) +{ + return val; +} +template <> +DS_D_INLINE float to(float val) +{ + return val; +} +template <> +DS_D_INLINE __half to(__half val) +{ + return val; +} +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE __nv_bfloat16 to(__nv_bfloat16 val) +{ + return val; +} +#endif + +// Integer types +template <> +DS_D_INLINE int8_t to(int8_t val) +{ + return val; +} +template <> +DS_D_INLINE uint8_t to(uint8_t val) +{ + return val; +} +template <> +DS_D_INLINE int16_t to(int16_t val) +{ + return val; +} +template <> +DS_D_INLINE uint16_t to(uint16_t val) +{ + return val; +} +template <> +DS_D_INLINE int32_t to(int32_t val) +{ + return val; +} +template <> +DS_D_INLINE uint32_t to(uint32_t val) +{ + return val; +} +template <> +DS_D_INLINE int64_t to(int64_t val) +{ + return val; +} +template <> +DS_D_INLINE uint64_t to(uint64_t val) +{ + return val; +} + +// TODO: evaluate if we want bools + +/********************* To Double Conversions *********************/ + +// * to double variants + +// Would normally like to not use C cast, but this is an important enough conversion +// to keep +template <> +DS_D_INLINE double to(float val) +{ +#ifdef PTX_AVAILABLE + double ret_val; + asm("ctv.rn.f64.f32 %0, %1;\n" : "=d"(ret_val) : "f"(val)); + return ret_val; +#else + return double(val); +#endif +} +// Note: there is a CVT instruction for __half -> double, but there's no inline interface +// for passing a single half value +template <> +DS_D_INLINE double to(__half val) +{ + return to(__half2float(val)); +} +template <> +DS_D_INLINE double to(int64_t val) +{ + return __ll2double_rn(val); +} +template <> +DS_D_INLINE double to(int32_t val) +{ + return __int2double_rn(val); +} +template <> +DS_D_INLINE double to(int16_t val) +{ + return __int2double_rn(val); +} +template <> +DS_D_INLINE double to(int8_t val) +{ + return __int2double_rn(val); +} +template <> +DS_D_INLINE double to(uint64_t val) +{ + return __ull2double_rn(val); +} +template <> +DS_D_INLINE double to(uint32_t val) +{ + return __uint2double_rn(val); +} +template <> +DS_D_INLINE double to(uint16_t val) +{ + return __uint2double_rn(val); +} +template <> +DS_D_INLINE double to(uint8_t val) +{ + return __uint2double_rn(val); +} + +// Same applies here +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE double to(__nv_bfloat16 val) +{ + return to(__bfloat162float(val)); +} +#endif + +/********************* To Float Conversions *********************/ + +template <> +DS_D_INLINE float to(double val) +{ + return __double2float_rn(val); +} +template <> +DS_D_INLINE float to(__half val) +{ + return __half2float(val); +} +template <> +DS_D_INLINE float to(int64_t val) +{ + return __ll2float_rn(val); +} +template <> +DS_D_INLINE float to(int32_t val) +{ + return __int2float_rn(val); +} +template <> +DS_D_INLINE float to(int16_t val) +{ + return __int2float_rn(val); +} +template <> +DS_D_INLINE float to(int8_t val) +{ + return __int2float_rn(val); +} +template <> +DS_D_INLINE float to(uint64_t val) +{ + return __ull2float_rn(val); +} +template <> +DS_D_INLINE float to(uint32_t val) +{ + return __uint2float_rn(val); +} +template <> +DS_D_INLINE float to(uint16_t val) +{ + return __uint2float_rn(val); +} +template <> +DS_D_INLINE float to(uint8_t val) +{ + return __uint2float_rn(val); +} + +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE float to(__nv_bfloat16 val) +{ + return __bfloat162float(val); +} +#endif + +/********************* To Float2 Conversions *********************/ +template <> +DS_D_INLINE float2 to(__half2 val) +{ + return __half22float2(val); +} + +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE float2 to(__nv_bfloat162 val) +{ + return __bfloat1622float2(val); +} +#endif + +/********************* To Half Conversions *********************/ +template <> +DS_D_INLINE __half to(double val) +{ +#ifdef __HIP_PLATFORM_AMD__ + float val_f = __double2float_rn(val); + return __float2half(val_f); +#else + return __double2half(val); +#endif +} +template <> +DS_D_INLINE __half to(float val) +{ + return __float2half(val); +} +template <> +DS_D_INLINE __half to(int64_t val) +{ + return __ll2half_rn(val); +} +template <> +DS_D_INLINE __half to(int32_t val) +{ + return __int2half_rn(val); +} +template <> +DS_D_INLINE __half to(int16_t val) +{ + return __short2half_rn(val); +} +template <> +DS_D_INLINE __half to(int8_t val) +{ + return __int2half_rn(val); +} +template <> +DS_D_INLINE __half to(uint64_t val) +{ + return __ull2half_rn(val); +} +template <> +DS_D_INLINE __half to(uint32_t val) +{ + return __uint2half_rn(val); +} +template <> +DS_D_INLINE __half to(uint16_t val) +{ + return __ushort2half_rn(val); +} +template <> +DS_D_INLINE __half to(uint8_t val) +{ + return __uint2half_rn(val); +} + +#ifdef BF16_AVAILABLE +// No direct conversion +template <> +DS_D_INLINE __half to(__nv_bfloat16 val) +{ + return to<__half>(to(val)); +} +#endif + +/********************* To Half2 Conversions *********************/ +template <> +DS_D_INLINE __half2 to(float2 val) +{ + return __float22half2_rn(val); +} +template <> +DS_D_INLINE __half2 to(float val) +{ + return __float2half2_rn(val); +} + +#ifdef BF16_AVAILABLE +// No direct conversion +template <> +DS_D_INLINE __half2 to(__nv_bfloat162 val) +{ + return to<__half2>(to(val)); +} +#endif + +/********************* To BF16 Conversions *********************/ +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE __nv_bfloat16 to(double val) +{ + return __double2bfloat16(val); +} +template <> +DS_D_INLINE __nv_bfloat16 to(float val) +{ + return __float2bfloat16(val); +} +template <> +DS_D_INLINE __nv_bfloat16 to(int64_t val) +{ + return __ll2bfloat16_rn(val); +} +template <> +DS_D_INLINE __nv_bfloat16 to(int32_t val) +{ + return __int2bfloat16_rn(val); +} +template <> +DS_D_INLINE __nv_bfloat16 to(int16_t val) +{ + return __short2bfloat16_rn(val); +} +template <> +DS_D_INLINE __nv_bfloat16 to(int8_t val) +{ + return __int2bfloat16_rn(val); +} +template <> +DS_D_INLINE __nv_bfloat16 to(uint64_t val) +{ + return __ull2bfloat16_rn(val); +} +template <> +DS_D_INLINE __nv_bfloat16 to(uint32_t val) +{ + return __uint2bfloat16_rn(val); +} +template <> +DS_D_INLINE __nv_bfloat16 to(uint16_t val) +{ + return __ushort2bfloat16_rn(val); +} +template <> +DS_D_INLINE __nv_bfloat16 to(uint8_t val) +{ + return __uint2bfloat16_rn(val); +} +#endif + +/********************* To BF162 Conversions *********************/ +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE __nv_bfloat162 to(float2 val) +{ + return __float22bfloat162_rn(val); +} +template <> +DS_D_INLINE __nv_bfloat162 to(float val) +{ + return __float2bfloat162_rn(val); +} +template <> +DS_D_INLINE __nv_bfloat162 to(__half2 val) +{ + return to<__nv_bfloat162>(to(val)); +} +#endif + +/********************* To INT64_T Conversions *********************/ +template <> +DS_D_INLINE int64_t to(double val) +{ + return __double2ll_rn(val); +} +template <> +DS_D_INLINE int64_t to(float val) +{ + return __float2ll_rn(val); +} +template <> +DS_D_INLINE int64_t to(__half val) +{ + return __half2ll_rn(val); +} +// No direct support for integer casts at the C++ level and I don't feel they're so important +// to demand an PTX at this time + +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE int64_t to(__nv_bfloat16 val) +{ + return __bfloat162ll_rn(val); +} +#endif + +/********************* To INT32_T Conversions *********************/ +template <> +DS_D_INLINE int32_t to(double val) +{ + return __double2int_rn(val); +} +template <> +DS_D_INLINE int32_t to(float val) +{ + return __float2int_rn(val); +} +template <> +DS_D_INLINE int32_t to(__half val) +{ + return __half2int_rn(val); +} +// No direct support for integer casts at the C++ level and I don't feel they're so important +// to demand an PTX at this time + +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE int32_t to(__nv_bfloat16 val) +{ + return __bfloat162int_rn(val); +} +#endif + +/********************* To INT16_T Conversions *********************/ +template <> +DS_D_INLINE int16_t to(double val) +{ + return __double2int_rn(val); +} +template <> +DS_D_INLINE int16_t to(float val) +{ + return __float2int_rn(val); +} +template <> +DS_D_INLINE int16_t to(__half val) +{ + return __half2int_rn(val); +} +// No direct support for integer casts at the C++ level and I don't feel they're so important +// to demand an PTX at this time + +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE int16_t to(__nv_bfloat16 val) +{ + return __bfloat162int_rn(val); +} +#endif + +/********************* To INT8_T Conversions *********************/ +template <> +DS_D_INLINE int8_t to(double val) +{ + return __double2int_rn(val); +} +template <> +DS_D_INLINE int8_t to(float val) +{ + return __float2int_rn(val); +} +template <> +DS_D_INLINE int8_t to(__half val) +{ + return __half2int_rn(val); +} +// No direct support for integer casts at the C++ level and I don't feel they're so important +// to demand an PTX at this time + +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE int8_t to(__nv_bfloat16 val) +{ + return __bfloat162int_rn(val); +} +#endif + +/********************* To UINT64_T Conversions *********************/ +template <> +DS_D_INLINE uint64_t to(double val) +{ + return __double2ull_rn(val); +} +template <> +DS_D_INLINE uint64_t to(float val) +{ + return __float2ull_rn(val); +} +template <> +DS_D_INLINE uint64_t to(__half val) +{ + return __half2ull_rn(val); +} +// No direct support for integer casts at the C++ level and I don't feel they're so important +// to demand an PTX at this time + +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE uint64_t to(__nv_bfloat16 val) +{ + return __bfloat162ull_rn(val); +} +#endif + +/********************* To UINT32_T Conversions *********************/ +template <> +DS_D_INLINE uint32_t to(double val) +{ + return __double2uint_rn(val); +} +template <> +DS_D_INLINE uint32_t to(float val) +{ + return __float2uint_rn(val); +} +template <> +DS_D_INLINE uint32_t to(__half val) +{ + return __half2uint_rn(val); +} +// No direct support for integer casts at the C++ level and I don't feel they're so important +// to demand an PTX at this time + +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE uint32_t to(__nv_bfloat16 val) +{ + return __bfloat162uint_rn(val); +} +#endif + +/********************* To UINT16_T Conversions *********************/ +template <> +DS_D_INLINE uint16_t to(double val) +{ + return __double2uint_rn(val); +} +template <> +DS_D_INLINE uint16_t to(float val) +{ + return __float2uint_rn(val); +} +template <> +DS_D_INLINE uint16_t to(__half val) +{ + return __half2uint_rn(val); +} +// No direct support for integer casts at the C++ level and I don't feel they're so important +// to demand an PTX at this time + +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE uint16_t to(__nv_bfloat16 val) +{ + return __bfloat162uint_rn(val); +} +#endif + +/********************* To UINT8_T Conversions *********************/ +template <> +DS_D_INLINE uint8_t to(double val) +{ + return __double2uint_rn(val); +} +template <> +DS_D_INLINE uint8_t to(float val) +{ + return __float2uint_rn(val); +} +template <> +DS_D_INLINE uint8_t to(__half val) +{ + return __half2uint_rn(val); +} +// No direct support for integer casts at the C++ level and I don't feel they're so important +// to demand an PTX at this time + +#ifdef BF16_AVAILABLE +template <> +DS_D_INLINE uint8_t to(__nv_bfloat16 val) +{ + return __bfloat162uint_rn(val); +} +#endif + +} // namespace conversion diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/ds_kernel_utils.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/ds_kernel_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..8e4888109fcd8db1b8ce0f1419c816255f7b56a2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/ds_kernel_utils.h @@ -0,0 +1,58 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/* +Centralized header file for preprocessor macros and constants +used throughout the codebase. +*/ + +#pragma once + +#include +#include + +#ifdef BF16_AVAILABLE +#include +#endif + +#define DS_HD_INLINE __host__ __device__ __forceinline__ +#define DS_D_INLINE __device__ __forceinline__ + +#ifdef __HIP_PLATFORM_AMD__ + +// constexpr variant of warpSize for templating +constexpr int hw_warp_size = 64; +#define HALF_PRECISION_AVAILABLE = 1 +#include +#include + +#else // !__HIP_PLATFORM_AMD__ + +// constexpr variant of warpSize for templating +constexpr int hw_warp_size = 32; + +#if __CUDA_ARCH__ >= 530 +#define HALF_PRECISION_AVAILABLE = 1 +#define PTX_AVAILABLE +#endif // __CUDA_ARCH__ >= 530 + +#if __CUDA_ARCH__ >= 800 +#define ASYNC_COPY_AVAILABLE +#endif // __CUDA_ARCH__ >= 800 + +#include +#include + +#endif //__HIP_PLATFORM_AMD__ + +inline int next_pow2(const int val) +{ + int rounded_val = val - 1; + rounded_val |= rounded_val >> 1; + rounded_val |= rounded_val >> 2; + rounded_val |= rounded_val >> 4; + rounded_val |= rounded_val >> 8; + return rounded_val + 1; +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/memory_access_utils.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/memory_access_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..6789714d27c7ecb952255486fccbcc7a1686a676 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/memory_access_utils.h @@ -0,0 +1,1115 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include "ds_kernel_utils.h" + +/////////////////////////////// Memory Access Utils /////////////////////////////// +namespace mem_access { + +enum class LoadPolicy { + CacheAll, // Cache at all levels + CacheGlobal, // Cache at L2 only + CacheStreaming // Cache with evict first policy +}; + +enum class StorePolicy { + Writeback, // Cache in L1, write-back on eviction + CacheGlobal, // Bypass L1, write-back on eviction + CacheStreaming // Allocate cache line with evict first policy +}; + +template +__device__ __forceinline__ void load_global(void* dst, const void* src); + +template +__device__ __forceinline__ void load_global(void* dst, const void* src, bool do_access); + +// Shared accesses have no cache policy +template +__device__ __forceinline__ void load_shared(void* dst, const void* src); + +template +__device__ __forceinline__ void load_shared(void* dst, const void* src, bool do_access); + +template +__device__ __forceinline__ void store_global(void* dst, const void* src); + +// Shared accesses have no cache policy +template +__device__ __forceinline__ void store_shared(void* dst, const void* src); + +#ifdef ASYNC_COPY_AVAILABLE +template +__device__ __forceinline__ void memcpy_async(void* shr, const void* gbl); + +template +__device__ __forceinline__ void memcpy_async_nop(void* shr, const void* gbl, bool predicate); + +template +__device__ __forceinline__ void memcpy_async_zero(void* shr, const void* gbl, bool predicate); + +__device__ __forceinline__ void memcpy_async_fence(); + +template +__device__ __forceinline__ void memcpy_async_wait(); + +template +__device__ __forceinline__ void tail_complete_wait(int remaining_stages); +#endif + +// Util for tracking pipeline buffers +// TODO: Evaluate whether this should also be guarded by ASYNC_COPY_AVAILABLE +template +class BufferTracker { +public: + int current_state; + + __device__ __forceinline__ BufferTracker() : current_state(0) {} + + __device__ __forceinline__ int get() + { + int return_val = current_state++; + current_state = (current_state == max ? 0 : current_state); + return return_val; + } +}; + +__device__ __forceinline__ uint32_t lane_id() +{ +#ifdef PTX_AVAILABLE + unsigned int lane_id; + asm volatile("mov.u32 %0, %%laneid;" : "=r"(lane_id)); + return lane_id; +#else + return threadIdx.x & (warpSize - 1); // Portable +#endif +} + +/////////// Load Global /////////// +template <> +__device__ __forceinline__ void load_global<16>(void* dst, const void* src) +{ + uint4* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.ca.v4.u32 {%0, %1, %2, %3}, [%4];\n" + : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w) + : "l"(src)); +#else + const uint4* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<16>(void* dst, const void* src, bool do_access) +{ + uint4* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %5, 0;\n" + "\tmov.b32 %0, 0;\n" + "\tmov.b32 %1, 0;\n" + "\tmov.b32 %2, 0;\n" + "\tmov.b32 %3, 0;\n" + "\t@p ld.global.v4.u32 {%0, %1, %2, %3}, [%4];\n" + "}\n" + : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w) + : "l"(src), "r"((int)do_access)); +#else + const uint4* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0].x = 0; + data[0].y = 0; + data[0].z = 0; + data[0].w = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<16, LoadPolicy::CacheGlobal>(void* dst, const void* src) +{ + uint4* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n" + : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w) + : "l"(src)); +#else + const uint4* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<16, LoadPolicy::CacheGlobal>(void* dst, + const void* src, + bool do_access) +{ + uint4* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %5, 0;\n" + "\tmov.b32 %0, 0;\n" + "\tmov.b32 %1, 0;\n" + "\tmov.b32 %2, 0;\n" + "\tmov.b32 %3, 0;\n" + "\t@p ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n" + "}\n" + : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w) + : "l"(src), "r"((int)do_access)); +#else + const uint4* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0].x = 0; + data[0].y = 0; + data[0].z = 0; + data[0].w = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<16, LoadPolicy::CacheStreaming>(void* dst, + const void* src) +{ + uint4* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.cs.v4.u32 {%0, %1, %2, %3}, [%4];\n" + : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w) + : "l"(src)); +#else + const uint4* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<16, LoadPolicy::CacheStreaming>(void* dst, + const void* src, + bool do_access) +{ + uint4* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %5, 0;\n" + "\tmov.b32 %0, 0;\n" + "\tmov.b32 %1, 0;\n" + "\tmov.b32 %2, 0;\n" + "\tmov.b32 %3, 0;\n" + "\t@p ld.global.cg.v4.u32 {%0, %1, %2, %3}, [%4];\n" + "}\n" + : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w) + : "l"(src), "r"((int)do_access)); +#else + const uint4* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0].x = 0; + data[0].y = 0; + data[0].z = 0; + data[0].w = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<8>(void* dst, const void* src) +{ + uint2* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.ca.v2.u32 {%0, %1}, [%2];\n" + : "=r"(data[0].x), "=r"(data[0].y) + : "l"(src)); +#else + const uint2* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<8>(void* dst, const void* src, bool do_access) +{ + uint2* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %3, 0;\n" + "\tmov.b32 %0, 0;\n" + "\tmov.b32 %1, 0;\n" + "\t@p ld.global.v2.u32 {%0, %1}, [%2];\n" + "}\n" + : "=r"(data[0].x), "=r"(data[0].y) + : "l"(src), "r"((int)do_access)); +#else + const uint2* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0].x = 0; + data[0].y = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<8, LoadPolicy::CacheGlobal>(void* dst, const void* src) +{ + uint2* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.cg.v2.u32 {%0, %1}, [%2];\n" + : "=r"(data[0].x), "=r"(data[0].y) + : "l"(src)); +#else + const uint2* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<8, LoadPolicy::CacheGlobal>(void* dst, + const void* src, + bool do_access) +{ + uint2* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %3, 0;\n" + "\tmov.b32 %0, 0;\n" + "\tmov.b32 %1, 0;\n" + "\t@p ld.global.cg.v2.u32 {%0, %1}, [%2];\n" + "}\n" + : "=r"(data[0].x), "=r"(data[0].y) + : "l"(src), "r"((int)do_access)); +#else + const uint2* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0].x = 0; + data[0].y = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<8, LoadPolicy::CacheStreaming>(void* dst, + const void* src) +{ + uint2* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.cs.v2.u32 {%0, %1}, [%2];\n" + : "=r"(data[0].x), "=r"(data[0].y) + : "l"(src)); +#else + const uint2* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<8, LoadPolicy::CacheStreaming>(void* dst, + const void* src, + bool do_access) +{ + uint2* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %3, 0;\n" + "\tmov.b32 %0, 0;\n" + "\tmov.b32 %1, 0;\n" + "\t@p ld.global.cs.v2.u32 {%0, %1}, [%2];\n" + "}\n" + : "=r"(data[0].x), "=r"(data[0].y) + : "l"(src), "r"((int)do_access)); +#else + const uint2* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0].x = 0; + data[0].y = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<4>(void* dst, const void* src) +{ + int32_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.ca.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src)); +#else + const int32_t* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<4>(void* dst, const void* src, bool do_access) +{ + int32_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %2, 0;\n" + "\tmov.b32 %0, 0;\n" + "\t@p ld.global.u32 {%0}, [%1];\n" + "}\n" + : "=r"(data[0]) + : "l"(src), "r"((int)do_access)); +#else + const int32_t* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0] = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<4, LoadPolicy::CacheGlobal>(void* dst, const void* src) +{ + int32_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.cg.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src)); +#else + const int32_t* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<4, LoadPolicy::CacheGlobal>(void* dst, + const void* src, + bool do_access) +{ + int32_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %2, 0;\n" + "\tmov.b32 %0, 0;\n" + "\t@p ld.global.cg.u32 {%0}, [%1];\n" + "}\n" + : "=r"(data[0]) + : "l"(src), "r"((int)do_access)); +#else + const int32_t* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0] = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<4, LoadPolicy::CacheStreaming>(void* dst, + const void* src) +{ + int32_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.cs.u32 {%0}, [%1];\n" : "=r"(*data) : "l"(src)); +#else + const int32_t* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<4, LoadPolicy::CacheStreaming>(void* dst, + const void* src, + bool do_access) +{ + int32_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %2, 0;\n" + "\tmov.b32 %0, 0;\n" + "\t@p ld.global.cs.u32 {%0}, [%1];\n" + "}\n" + : "=r"(data[0]) + : "l"(src), "r"((int)do_access)); +#else + const int32_t* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0] = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<2>(void* dst, const void* src) +{ + int16_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.ca.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src)); +#else + const int16_t* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<2>(void* dst, const void* src, bool do_access) +{ + int16_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %2, 0;\n" + "\tmov.u16 %0, 0;\n" + "\t@p ld.global.u16 {%0}, [%1];\n" + "}\n" + : "=h"(*data) + : "l"(src), "r"((int)do_access)); +#else + const int16_t* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0] = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<2, LoadPolicy::CacheGlobal>(void* dst, const void* src) +{ + int16_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.cg.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src)); +#else + const int16_t* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<2, LoadPolicy::CacheGlobal>(void* dst, + const void* src, + bool do_access) +{ + int16_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %2, 0;\n" + "\tmov.u16 %0, 0;\n" + "\t@p ld.global.cg.u16 {%0}, [%1];\n" + "}\n" + : "=h"(*data) + : "l"(src), "r"((int)do_access)); +#else + const int16_t* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0] = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_global<2, LoadPolicy::CacheStreaming>(void* dst, + const void* src) +{ + int16_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile("ld.global.cs.u16 {%0}, [%1];\n" : "=h"(*data) : "l"(src)); +#else + const int16_t* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_global<2, LoadPolicy::CacheStreaming>(void* dst, + const void* src, + bool do_access) +{ + int16_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %2, 0;\n" + "\tmov.u16 %0, 0;\n" + "\t@p ld.global.cs.u16 {%0}, [%1];\n" + "}\n" + : "=h"(*data) + : "l"(src), "r"((int)do_access)); +#else + const int16_t* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0] = 0; + } +#endif +} + +/////////// Load Shared /////////// +namespace internal { + +#ifdef PTX_AVAILABLE +__device__ __forceinline__ unsigned convert_to_shared(const void* ptr) +{ +#if __CUDACC_VER_MAJOR__ >= 11 + // In CUDA 11 we have a builtin intrinsic + return __cvta_generic_to_shared(ptr); +#else + unsigned ret_val; + asm volatile( + "{\n" + "\t.reg .u64 p1;\n" + "\tcvta.to.shared.u64 p1, %1\n" + "\tcvt.u32.u64 %0, p1;\n" + "}\n" + : "=r"(ret_val) + : "l"(ptr)); + return ret_val; +#endif +} +#endif + +} // namespace internal + +template <> +__device__ __forceinline__ void load_shared<16>(void* dst, const void* src) +{ + uint4* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + unsigned src_shr = internal::convert_to_shared(src); + + asm volatile("ld.shared.v4.u32 {%0, %1, %2, %3}, [%4];\n" + : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w) + : "r"(src_shr)); +#else + const uint4* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_shared<16>(void* dst, const void* src, bool do_access) +{ + uint4* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + unsigned src_shr = internal::convert_to_shared(src); + + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %5, 0;\n" + "\tmov.b32 %0, 0;\n" + "\tmov.b32 %1, 0;\n" + "\tmov.b32 %2, 0;\n" + "\tmov.b32 %3, 0;\n" + "\t@p ld.shared.v4.u32 {%0, %1, %2, %3}, [%4];\n" + "}\n" + : "=r"(data[0].x), "=r"(data[0].y), "=r"(data[0].z), "=r"(data[0].w) + : "r"(src_shr), "r"((int)do_access)); +#else + const uint4* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0].x = 0; + data[0].y = 0; + data[0].z = 0; + data[0].w = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_shared<8>(void* dst, const void* src) +{ + uint2* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + unsigned src_shr = internal::convert_to_shared(src); + + asm volatile("ld.shared.v2.u32 {%0, %1}, [%2];\n" + : "=r"(data[0].x), "=r"(data[0].y) + : "r"(src_shr)); +#else + const uint2* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_shared<8>(void* dst, const void* src, bool do_access) +{ + uint2* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + unsigned src_shr = internal::convert_to_shared(src); + + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %3, 0;\n" + "\tmov.b32 %0, 0;\n" + "\tmov.b32 %1, 0;\n" + "\t@p ld.shared.v2.u32 {%0, %1}, [%2];\n" + "}\n" + : "=r"(data[0].x), "=r"(data[0].y) + : "r"(src_shr), "r"((int)do_access)); +#else + const uint2* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0].x = 0; + data[0].y = 0; + } +#endif +} + +template <> +__device__ __forceinline__ void load_shared<4>(void* dst, const void* src) +{ + int32_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + unsigned src_shr = internal::convert_to_shared(src); + + asm volatile("ld.shared.u32 {%0}, [%1];\n" : "=r"(*data) : "r"(src_shr)); +#else + const int32_t* src_cast = reinterpret_cast(src); + data[0] = src_cast[0]; +#endif +} + +template <> +__device__ __forceinline__ void load_shared<4>(void* dst, const void* src, bool do_access) +{ + int32_t* data = reinterpret_cast(dst); +#ifdef PTX_AVAILABLE + unsigned src_shr = internal::convert_to_shared(src); + + asm volatile( + "{\n" + "\t.reg .pred p;\n" + "\tsetp.ne.b32 p, %2, 0;\n" + "\tmov.b32 %0, 0;\n" + "\t@p ld.shared.u32 %0, [%1];\n" + "}\n" + : "=r"(data[0]) + : "r"(src_shr), "r"((int)do_access)); +#else + const int32_t* src_cast = reinterpret_cast(src); + if (do_access) { + data[0] = src_cast[0]; + } else { + data[0] = 0; + } +#endif +} + +/////////// Store Global /////////// + +template <> +__device__ __forceinline__ void store_global<16>(void* dst, const void* src) +{ + const uint4* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + asm volatile("st.global.wb.v4.u32 [%0], {%1, %2, %3, %4};\n" + : + : "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w) + : "memory"); +#else + uint4* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +template <> +__device__ __forceinline__ void store_global<16, StorePolicy::CacheGlobal>(void* dst, + const void* src) +{ + const uint4* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + asm volatile("st.global.cg.v4.u32 [%0], {%1, %2, %3, %4};\n" + : + : "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w) + : "memory"); +#else + uint4* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +template <> +__device__ __forceinline__ void store_global<16, StorePolicy::CacheStreaming>(void* dst, + const void* src) +{ + const uint4* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + asm volatile("st.global.cs.v4.u32 [%0], {%1, %2, %3, %4};\n" + : + : "l"(dst), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w) + : "memory"); +#else + uint4* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +template <> +__device__ __forceinline__ void store_global<8>(void* dst, const void* src) +{ + const uint2* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + asm volatile("st.global.wb.v2.u32 [%0], {%1, %2};\n" + : + : "l"(dst), "r"(data[0].x), "r"(data[0].y)); +#else + uint2* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +template <> +__device__ __forceinline__ void store_global<8, StorePolicy::CacheGlobal>(void* dst, + const void* src) +{ + const uint2* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + asm volatile("st.global.cg.v2.u32 [%0], {%1, %2};\n" + : + : "l"(dst), "r"(data[0].x), "r"(data[0].y)); +#else + uint2* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +template <> +__device__ __forceinline__ void store_global<8, StorePolicy::CacheStreaming>(void* dst, + const void* src) +{ + const uint2* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + asm volatile("st.global.cs.v2.u32 [%0], {%1, %2};\n" + : + : "l"(dst), "r"(data[0].x), "r"(data[0].y)); +#else + uint2* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +template <> +__device__ __forceinline__ void store_global<4>(void* dst, const void* src) +{ + const int32_t* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + asm volatile("st.global.wb.u32 [%0], %1;\n" : : "l"(dst), "r"(*data)); +#else + int32_t* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +template <> +__device__ __forceinline__ void store_global<4, StorePolicy::CacheGlobal>(void* dst, + const void* src) +{ + const int32_t* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + asm volatile("st.global.cg.u32 [%0], %1;\n" : : "l"(dst), "r"(*data)); +#else + int32_t* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +template <> +__device__ __forceinline__ void store_global<4, StorePolicy::CacheStreaming>(void* dst, + const void* src) +{ + const int32_t* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + asm volatile("st.global.cs.u32 [%0], %1;\n" : : "l"(dst), "r"(*data)); +#else + int32_t* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +/////////// Store Shared /////////// + +template <> +__device__ __forceinline__ void store_shared<16>(void* dst, const void* src) +{ + const uint4* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + unsigned dst_int = internal::convert_to_shared(dst); + + asm volatile("st.shared.v4.u32 [%0], {%1, %2, %3, %4};\n" + : + : "r"(dst_int), "r"(data[0].x), "r"(data[0].y), "r"(data[0].z), "r"(data[0].w)); +#else + uint4* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +template <> +__device__ __forceinline__ void store_shared<8>(void* dst, const void* src) +{ + const uint2* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + unsigned dst_int = internal::convert_to_shared(dst); + + asm volatile("st.shared.v2.u32 [%0], {%1, %2};\n" + : + : "r"(dst_int), "r"(data[0].x), "r"(data[0].y)); +#else + uint2* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +template <> +__device__ __forceinline__ void store_shared<4>(void* dst, const void* src) +{ + const int32_t* data = reinterpret_cast(src); +#ifdef PTX_AVAILABLE + unsigned dst_int = internal::convert_to_shared(dst); + + asm volatile("st.shared.u32 [%0], %1;\n" : : "r"(dst_int), "r"(*data)); +#else + int32_t* dst_cast = reinterpret_cast(dst); + dst_cast[0] = data[0]; +#endif +} + +/////////// Asynchronous Memory Copy /////////// + +#ifdef ASYNC_COPY_AVAILABLE +template +__device__ __forceinline__ void memcpy_async(void* shr, const void* gbl) +{ + static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16)); + unsigned shr_int = internal::convert_to_shared(shr); + + asm volatile("cp.async.ca.shared.global [%0], [%1], %2;\n" + : + : "r"(shr_int), "l"(gbl), "n"(AccessSize)); +} + +template +__device__ __forceinline__ void memcpy_async_nop(void* shr, const void* gbl, bool predicate) +{ + static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16)); + unsigned shr_int = internal::convert_to_shared(shr); + + asm volatile( + "{\n" + " .reg .pred p;\n" + " setp.ne.b32 p, %0, 0;\n" + " @p cp.async.ca.shared.global [%1], [%2], %3;\n" + "}\n" + : + : "r"((int)predicate), "r"(shr_int), "l"(gbl), "n"(AccessSize)); +} + +template +__device__ __forceinline__ void memcpy_async_zero(void* shr, const void* gbl, bool predicate) +{ + static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16)); + unsigned shr_int = internal::convert_to_shared(shr); + int bytes_to_copy = (predicate ? AccessSize : 0); + + asm volatile("cp.async.ca.shared.global [%0], [%1], %2, %3;\n" + : + : "r"(shr_int), "l"(gbl), "n"(AccessSize), "r"(bytes_to_copy)); +} + +template +__device__ __forceinline__ void memcpy_async_zero_nop(void* shr, + const void* gbl, + bool zero_predicate, + bool nop_predicate) +{ + static_assert((AccessSize == 4 || AccessSize == 8 || AccessSize == 16)); + unsigned shr_int = internal::convert_to_shared(shr); + int bytes_to_copy = (zero_predicate ? AccessSize : 0); + + asm volatile( + "{\n" + " .reg .pred p;\n" + " setp.ne.b32 p, %0, 0;\n" + " @p cp.async.ca.shared.global [%1], [%2], %3, %4;\n" + "}\n" + : + : "r"((int)nop_predicate), "r"(shr_int), "l"(gbl), "n"(AccessSize), "r"(bytes_to_copy)); +} + +// Cache global variants. Separate interface to require deliberate use of them. +__device__ __forceinline__ void memcpy_async_cg(void* shr, const void* gbl) +{ + unsigned shr_int = internal::convert_to_shared(shr); + + asm volatile("cp.async.cg.shared.global [%0], [%1], 16;\n" : : "r"(shr_int), "l"(gbl)); +} + +__device__ __forceinline__ void memcpy_async_nop_cg(void* shr, const void* gbl, bool predicate) +{ + unsigned shr_int = internal::convert_to_shared(shr); + + asm volatile( + "{\n" + " .reg .pred p;\n" + " setp.ne.b32 p, %0, 0;\n" + " @p cp.async.cg.shared.global [%1], [%2], 16;\n" + "}\n" + : + : "r"((int)predicate), "r"(shr_int), "l"(gbl)); +} + +__device__ __forceinline__ void memcpy_async_zero_cg(void* shr, const void* gbl, bool predicate) +{ + unsigned shr_int = internal::convert_to_shared(shr); + int bytes_to_copy = (predicate ? 16 : 0); + + asm volatile("cp.async.cg.shared.global [%0], [%1], 16, %2;\n" + : + : "r"(shr_int), "l"(gbl), "r"(bytes_to_copy)); +} + +__device__ __forceinline__ void memcpy_async_zero_nop_cg(void* shr, + const void* gbl, + bool zero_predicate, + bool nop_predicate) +{ + unsigned shr_int = internal::convert_to_shared(shr); + int bytes_to_copy = (zero_predicate ? 16 : 0); + + asm volatile( + "{\n" + " .reg .pred p;\n" + " setp.ne.b32 p, %0, 0;\n" + " @p cp.async.cg.shared.global [%1], [%2], 16, %3;\n" + "}\n" + : + : "r"((int)nop_predicate), "r"(shr_int), "l"(gbl), "r"(bytes_to_copy)); +} + +__device__ __forceinline__ void memcpy_async_fence() { asm volatile("cp.async.commit_group;\n"); } + +template +__device__ __forceinline__ void memcpy_async_wait() +{ + static_assert(stages <= 8); + + asm volatile("cp.async.wait_group %0;\n" : : "n"(stages)); +} + +// TODO: The tail complete should be a known compile time artifact, should try and induce this +// without all of the branches from the call-site. This is a hacky solution. +template <> +__device__ __forceinline__ void tail_complete_wait<1>(int remaining_stages) +{ + if (remaining_stages == 0) memcpy_async_wait<0>(); +} + +template <> +__device__ __forceinline__ void tail_complete_wait<2>(int remaining_stages) +{ + if (remaining_stages == 1) + memcpy_async_wait<1>(); + else if (remaining_stages == 0) + memcpy_async_wait<0>(); +} + +template <> +__device__ __forceinline__ void tail_complete_wait<3>(int remaining_stages) +{ + if (remaining_stages == 2) + memcpy_async_wait<2>(); + else if (remaining_stages == 1) + memcpy_async_wait<1>(); + else if (remaining_stages == 0) + memcpy_async_wait<0>(); +} + +template <> +__device__ __forceinline__ void tail_complete_wait<4>(int remaining_stages) +{ + if (remaining_stages == 3) + memcpy_async_wait<3>(); + else if (remaining_stages == 2) + memcpy_async_wait<2>(); + else if (remaining_stages == 1) + memcpy_async_wait<1>(); + else if (remaining_stages == 0) + memcpy_async_wait<0>(); +} + +template <> +__device__ __forceinline__ void tail_complete_wait<5>(int remaining_stages) +{ + if (remaining_stages == 4) + memcpy_async_wait<4>(); + else if (remaining_stages == 3) + memcpy_async_wait<3>(); + else if (remaining_stages == 2) + memcpy_async_wait<2>(); + else if (remaining_stages == 1) + memcpy_async_wait<1>(); + else if (remaining_stages == 0) + memcpy_async_wait<0>(); +} + +template <> +__device__ __forceinline__ void tail_complete_wait<6>(int remaining_stages) +{ + if (remaining_stages == 5) + memcpy_async_wait<5>(); + else if (remaining_stages == 4) + memcpy_async_wait<4>(); + else if (remaining_stages == 3) + memcpy_async_wait<3>(); + else if (remaining_stages == 2) + memcpy_async_wait<2>(); + else if (remaining_stages == 1) + memcpy_async_wait<1>(); + else if (remaining_stages == 0) + memcpy_async_wait<0>(); +} +#endif + +} // namespace mem_access diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/reduction_utils.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/reduction_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..eb8efab77ac1e900357f7fc29327b248c7e7fc31 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/includes/reduction_utils.h @@ -0,0 +1,778 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include "conversion_utils.h" +#include "ds_kernel_utils.h" +#include "memory_access_utils.h" + +namespace cg = cooperative_groups; + +namespace reduce { + +enum class ROpType { + // Addition + Add, + + // Maximum reduction + Max, + + // Minimum reduction + Min, +}; + +constexpr int max_threads = 1024; +constexpr int max_warps = max_threads / hw_warp_size; + +/* +High level API. The API takes in a set of operations and variables +and performs that reduction operation on that variable. The reductions +of each of the arguments are completely independent of each other ( +i.e., the val1-op1 combination has no impact on val2-op2). + +Example usage: +``` cpp +float max_val; +float min_val; +reduce::block(tb, warp, max_val, min_val); +``` + +TODO(cmikeh2): In theory, we might be able to do this sequentially with +device functions and rely on the assembler correctly behaving. My initial +instinct is this won't work, but if it does it would reduce implementation +cost significantly. + +TODO(cmikeh2): We need to support sub-block reductions. The warp intrinsic +currently supports this (more incidentally than anything else). It is not +uncommon in something like softmax or a fused attention kernel to map multiple +reductions to a thread block, but each reduction itself is only scoped +to part of the threads (i.e block size = 512, 128 threads per reduction). +*/ +template +DS_D_INLINE void block(cg::thread_block& tb, cg::thread_block_tile& warp, float& val); + +template +DS_D_INLINE void block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2); + +template +DS_D_INLINE void block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2, + float& val3); + +template +DS_D_INLINE void block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2, + float& val3, + float& val4); + +/* +The partitioned block is a special case of the above where in the warps of a threadblock are +partitioned into separate independent reductions. For example, I might have an 8 warp thread block +in which each pair of warps is processing an independent piece of data. I would then reduce that +data with the something like the following: +``` cpp +float max_val; +reduce::partitioned_block(tb, warp, max_val); +``` +After which, each pair of warps would have coherent data with each other. Note, this API will not +provide correct results if the number of warps per partition is not a power of 2. +*/ +template +DS_D_INLINE void partitioned_block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val); + +template +DS_D_INLINE void partitioned_block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2); + +template +DS_D_INLINE void partitioned_block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2, + float& val3); + +template +DS_D_INLINE void partitioned_block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2, + float& val3, + float& val4); + +/* +Single element reduction primitives. Used inside serial collection +loops. + +Example usage: +using rop = reduce::OpType; +float min = init(); +for (int i = 0; i < 4; i++) { + min = reduce::element(min, data[i]); +} +*/ + +template +DS_D_INLINE T element(const T lhs, const T rhs); + +template +DS_D_INLINE T init(); + +/********************** Internal reduction APIs **********************/ + +/* +Single element "reductions". TODO(cmikeh2): this sort of "op" concept +should be refactored into its own implementation at some point. This interface +may be easily expanded for new types/operations, but the typical reductions +we need are covered with min/max/add on float. + +NOTE: there is no mean reduction because that relies on knowledge of how +many values were already reduced into each scalar. Implementing this on top +of reduce should be straightforward (can just wrap the sum reduction) and +would be a good extension of the header. +*/ + +DS_D_INLINE int _warp_rank() +{ + const int thread_rank = + threadIdx.x + threadIdx.y * blockDim.x + threadIdx.z * blockDim.x * blockDim.y; + return thread_rank / hw_warp_size; +} + +/* Float element reduce implementations */ +template <> +DS_D_INLINE float element(const float lhs, const float rhs) +{ + return lhs + rhs; +} + +template <> +DS_D_INLINE float element(const float lhs, const float rhs) +{ + return fmaxf(lhs, rhs); +} + +template <> +DS_D_INLINE float element(const float lhs, const float rhs) +{ + return fminf(lhs, rhs); +} + +/* __half element reduce implementation */ +template <> +DS_D_INLINE __half element(const __half lhs, const __half rhs) +{ + return lhs + rhs; +} + +template <> +DS_D_INLINE __half element(const __half lhs, const __half rhs) +{ +#if __CUDA_ARCH__ >= 800 + // Intrinsic limited to Ampere + newer + return __hmax(lhs, rhs); +#else + return (lhs > rhs) ? lhs : rhs; +#endif +} + +template <> +DS_D_INLINE __half element(const __half lhs, const __half rhs) +{ +#if __CUDA_ARCH__ >= 800 + // Intrinsic limited to Ampere + newer + return __hmin(lhs, rhs); +#else + return (lhs < rhs) ? lhs : rhs; +#endif +} + +/* __half2 element reduce implementation */ +template <> +DS_D_INLINE __half2 element(const __half2 lhs, const __half2 rhs) +{ + return lhs + rhs; +} + +template <> +DS_D_INLINE __half2 element(const __half2 lhs, const __half2 rhs) +{ +#if __CUDA_ARCH__ >= 800 + return __hmax2(lhs, rhs); +#else + __half2 ret_val; + ret_val.x = (lhs.x > rhs.x) ? lhs.x : rhs.x; + ret_val.y = (lhs.y > rhs.y) ? lhs.y : rhs.y; + return ret_val; +#endif +} + +template <> +DS_D_INLINE __half2 element(const __half2 lhs, const __half2 rhs) +{ +#if __CUDA_ARCH__ >= 800 + return __hmin2(lhs, rhs); +#else + __half2 ret_val; + ret_val.x = (lhs.x < rhs.x) ? lhs.x : rhs.x; + ret_val.y = (lhs.y < rhs.y) ? lhs.y : rhs.y; + return ret_val; +#endif +} + +template <> +DS_D_INLINE int32_t element(const int32_t lhs, const int32_t rhs) +{ + return lhs + rhs; +} + +template <> +DS_D_INLINE int32_t element(const int32_t lhs, const int32_t rhs) +{ + return (lhs > rhs) ? lhs : rhs; +} + +template <> +DS_D_INLINE int32_t element(const int32_t lhs, const int32_t rhs) +{ + return (lhs < rhs) ? lhs : rhs; +} + +template <> +DS_D_INLINE uint32_t element(const uint32_t lhs, const uint32_t rhs) +{ + return lhs + rhs; +} + +template <> +DS_D_INLINE uint32_t element(const uint32_t lhs, const uint32_t rhs) +{ + return (lhs > rhs) ? lhs : rhs; +} + +template <> +DS_D_INLINE uint32_t element(const uint32_t lhs, const uint32_t rhs) +{ + return (lhs < rhs) ? lhs : rhs; +} + +template <> +DS_D_INLINE int64_t element(const int64_t lhs, const int64_t rhs) +{ + return lhs + rhs; +} + +template <> +DS_D_INLINE int64_t element(const int64_t lhs, const int64_t rhs) +{ + return (lhs > rhs) ? lhs : rhs; +} + +template <> +DS_D_INLINE int64_t element(const int64_t lhs, const int64_t rhs) +{ + return (lhs < rhs) ? lhs : rhs; +} + +/* +Reduction initialization primitives +*/ +template <> +DS_D_INLINE float init() +{ + return 0.0f; +} + +template <> +DS_D_INLINE float init() +{ + // Positive infinity + return INFINITY; +} + +template <> +DS_D_INLINE float init() +{ + // Negative infinity + return -INFINITY; +} + +template <> +DS_D_INLINE __half init() +{ + constexpr __half_raw zero = {0x0000}; + return __half(zero); +} + +template <> +DS_D_INLINE __half init() +{ + constexpr __half_raw inf = {0x7C00}; + return __half(inf); +} + +template <> +DS_D_INLINE __half init() +{ + constexpr __half_raw neg_inf = {0xFC00}; + return __half(neg_inf); +} + +template <> +DS_D_INLINE __half2 init() +{ +#ifdef __HIP_PLATFORM_AMD__ + return __half2{_Float16_2{0x0000, 0x0000}}; +#else + constexpr __half2_raw zero = {0x0000, 0x0000}; + return __half2(zero); +#endif +} + +template <> +DS_D_INLINE __half2 init() +{ +#ifdef __HIP_PLATFORM_AMD__ + return __half2{_Float16_2{0x7C00, 0x7C00}}; +#else + constexpr __half2_raw inf = {0x7C00, 0x7C00}; + return __half2(inf); +#endif +} + +template <> +DS_D_INLINE __half2 init() +{ +#ifdef __HIP_PLATFORM_AMD__ + return __half2{_Float16_2{0xFC00, 0xFC00}}; +#else + constexpr __half2_raw neg_inf = {0xFC00, 0xFC00}; + return __half2(neg_inf); +#endif +} + +template <> +DS_D_INLINE int32_t init() +{ + return 0; +} + +template <> +DS_D_INLINE int32_t init() +{ + return 0x7FFFFFFF; +} + +template <> +DS_D_INLINE int32_t init() +{ + return 0x80000000; +} + +template <> +DS_D_INLINE uint32_t init() +{ + return 0; +} + +template <> +DS_D_INLINE uint32_t init() +{ + return 0xFFFFFFFF; +} + +template <> +DS_D_INLINE uint32_t init() +{ + return 0; +} + +template <> +DS_D_INLINE int64_t init() +{ + return 0; +} + +template <> +DS_D_INLINE int64_t init() +{ + return 0x7FFFFFFFFFFFFFFF; +} + +template <> +DS_D_INLINE int64_t init() +{ + return 0x8000000000000000; +} + +template <> +DS_D_INLINE uint64_t init() +{ + return 0; +} + +template <> +DS_D_INLINE uint64_t init() +{ + return 0xFFFFFFFFFFFFFFFF; +} + +template <> +DS_D_INLINE uint64_t init() +{ + return 0; +} + +template +DS_D_INLINE void init(T* data) +{ + data[0] = init(); +} + +template +DS_D_INLINE void init(T* data) +{ + data[0] = init(); + data[1] = init(); +} + +template +DS_D_INLINE void init(T* data) +{ + data[0] = init(); + data[1] = init(); + data[2] = init(); +} + +template +DS_D_INLINE void init(T* data) +{ + data[0] = init(); + data[1] = init(); + data[2] = init(); + data[3] = init(); +} + +/* +Warp reduction primitives + +`reduction_width` is an unsafe template parameter, that is that +when using `reduction_width` < hw_warp_size the warp is partitioned +into `hw_warp_size` / `reduction_width` groups of partial sums. + +If someone can figure out how to use variadic templates in a reasonable way +here (fold is C++17 only and I don't think helps and recursion feels like +huge overkill that harms readability) that would be wonderful. +*/ + +template +DS_D_INLINE void _warp(cg::thread_block_tile& warp, T* data) +{ +#pragma unroll + for (int i = 1; i < reduce_width; i *= 2) { + data[0] = element(data[0], warp.shfl_xor(data[0], i)); + } +} + +template +DS_D_INLINE void _warp(cg::thread_block_tile& warp, T* data) +{ +#pragma unroll + for (int i = 1; i < reduce_width; i *= 2) { + data[0] = element(data[0], warp.shfl_xor(data[0], i)); + data[1] = element(data[1], warp.shfl_xor(data[1], i)); + } +} + +template +DS_D_INLINE void _warp(cg::thread_block_tile& warp, T* data) +{ +#pragma unroll + for (int i = 1; i < reduce_width; i *= 2) { + data[0] = element(data[0], warp.shfl_xor(data[0], i)); + data[1] = element(data[1], warp.shfl_xor(data[1], i)); + data[2] = element(data[2], warp.shfl_xor(data[2], i)); + } +} + +template +DS_D_INLINE void _warp(cg::thread_block_tile& warp, T* data) +{ +#pragma unroll + for (int i = 1; i < reduce_width; i *= 2) { + data[0] = element(data[0], warp.shfl_xor(data[0], i)); + data[1] = element(data[1], warp.shfl_xor(data[1], i)); + data[2] = element(data[2], warp.shfl_xor(data[2], i)); + data[3] = element(data[3], warp.shfl_xor(data[3], i)); + } +} + +/* +Implementation for primary block reduction that serves both `block` and +`partitioned_block`. + +Total warps refers to the reduction width of the reduction, not +the number of warps in the block (which may exceed that +if the block is partitioned or if we do a conservative bound at +compile time). +*/ +template +DS_D_INLINE void _block(cg::thread_block& tb, + cg::thread_block_tile& warp_arg, + T* data) +{ + constexpr int elems = sizeof...(Ops); + constexpr int bytes = sizeof(T); + // Unused when `partition_size == 1` or total_warps == 1 + __shared__ T reduce_buffer[max_warps * elems]; + +#ifdef __HIP_PLATFORM_AMD__ + const int total_threads = blockDim.x * blockDim.y * blockDim.z; + const int running_warps = total_threads / hw_warp_size; +#else + const int running_warps = warp_arg.meta_group_size(); +#endif + + // Always perform warp-scope reduction + _warp(warp_arg, data); + + // If max_warps == 1 let's skip the runtime check + if (total_warps != 1) { + if (warp_arg.thread_rank() == 0) { +#pragma unroll + for (int i = 0; i < elems; i++) { + mem_access::store_shared(reduce_buffer + elems * _warp_rank() + i, data + i); + } + } + + // Synchronization inside block-uniform conditional is safe + tb.sync(); + + if (_warp_rank() == 0) { + if (warp_arg.thread_rank() < running_warps) { +#pragma unroll + for (int i = 0; i < elems; i++) { + mem_access::load_shared( + data + i, reduce_buffer + elems * warp_arg.thread_rank() + i); + } + } else { + init(data); + } + + _warp(warp_arg, data); + +#pragma unroll + for (int i = 0; i < elems; i++) { + mem_access::store_shared(reduce_buffer + elems * warp_arg.thread_rank() + i, + data + i); + } + } + + // Synchronization inside block-uniform conditional is safe + tb.sync(); + +#pragma unroll + for (int i = 0; i < elems; i++) { + mem_access::load_shared(data + i, reduce_buffer + _warp_rank() * elems + i); + } + } +} + +/* +Main API implementations. For the most part, they just convert the individual +variables into arrays, which makes working with them easier with a single +implementation. In theory, we could use the `_block` implementation as another +option, but the nature of using a pointer is a little less safe and this allows +us to obfuscate the details of the partitioned implementation. +*/ +template +DS_D_INLINE void block(cg::thread_block& tb, cg::thread_block_tile& warp, float& val) +{ + _block(tb, warp, &val); +} + +template +DS_D_INLINE void block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2) +{ + float data[2] = {val1, val2}; + _block(tb, warp, data); + val1 = data[0]; + val2 = data[1]; +} + +template +DS_D_INLINE void block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2, + float& val3) +{ + float data[3] = {val1, val2, val3}; + _block(tb, warp, data); + val1 = data[0]; + val2 = data[1]; + val3 = data[2]; +} + +template +DS_D_INLINE void block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2, + float& val3, + float& val4) +{ + float data[4] = {val1, val2, val3, val4}; + _block(tb, warp, data); + val1 = data[0]; + val2 = data[1]; + val3 = data[2]; + val4 = data[3]; +} + +/* +Note: for the partitioned blocks, the implementation does not support non-power of 2 blocks in order +to shorten block scale reduction length. +*/ +template +DS_D_INLINE void partitioned_block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val) +{ + if (num_threads <= hw_warp_size) { + _warp(warp, &val); + } else { + constexpr int num_warps = num_threads / hw_warp_size; + _block(tb, warp, &val); + } +} + +template +DS_D_INLINE void partitioned_block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2) +{ + float data[2] = {val1, val2}; + + if (num_threads <= hw_warp_size) { + _warp(warp, data); + } else { + constexpr int num_warps = num_threads / hw_warp_size; + _block(tb, warp, data); + } + + val1 = data[0]; + val2 = data[1]; +} + +template +DS_D_INLINE void partitioned_block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2, + float& val3) +{ + float data[3] = {val1, val2, val3}; + + if (num_threads <= hw_warp_size) { + _warp(warp, data); + } else { + constexpr int num_warps = num_threads / hw_warp_size; + _block(tb, warp, data); + } + + val1 = data[0]; + val2 = data[1]; + val3 = data[2]; +} + +template +DS_D_INLINE void partitioned_block(cg::thread_block& tb, + cg::thread_block_tile& warp, + float& val1, + float& val2, + float& val3, + float& val4) +{ + float data[4] = {val1, val2, val3, val4}; + + if (num_threads <= hw_warp_size) { + _warp(warp, data); + } else { + constexpr int num_warps = num_threads / hw_warp_size; + _block(tb, warp, data); + } + + val1 = data[0]; + val2 = data[1]; + val3 = data[2]; + val4 = data[3]; +} + +/* +Arg-reduce is a specialization of the above. We only support this with a single reduction +parameter. This only works for max/min reductions. +*/ + +__align__(8) struct IdxReduceResult { + /* + NOTE: ORDERING MATTERS HERE! The idx is the least significant set of bits + and the val is the most significant. Changing the order of this declaration + will break the code. + */ + int idx; + float val; +}; + +template +DS_D_INLINE IdxReduceResult +idx_reduce(cg::thread_block& tb, cg::thread_block_tile& warp, float val, int idx) +{ + IdxReduceResult res = {idx, val}; + + // Clear out the nan. This shouldn't be an issue for our initial applications + if (isnan(val)) res.val = init(); + + // Can do float compares as integers. By packing the index into the lower bits + // we can just do a single int64 rather than a branch, compare, and select. + // One side benefit of this is that it is by nature a stable algorithm and + // will always bias ties to the higher index. + int64_t* res_as_int = reinterpret_cast(&res); + + // The way floating point compare works is normally to perform a sign comparison + // and if they match, then do a comparison of the rest of the bits as unsigned + // integers. Since we are bundling these, that means for negative values we need + // to reverse the sort order, which we can do with an XOR. + if (val < 0) { *res_as_int ^= 0x7fffffff00000000; } + + _block(tb, warp, res_as_int); + + // Sign bit is preserved, so we can check if we need to invert the mantissa back + if (res.val < 0) { *res_as_int ^= 0x7fffffff00000000; } + + return res; +} + +} // namespace reduce diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..87b2b3d68777a38cfccadaa55740cc681bf45cf1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .blocked_flash import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a042aa903edfda7d49ba169559cb4b435c192ba4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__pycache__/blocked_flash.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__pycache__/blocked_flash.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe4ec7d1e0e36f96b1f1b43bf3a0b38b3cb39b18 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/__pycache__/blocked_flash.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/attention_atom.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/attention_atom.h new file mode 100644 index 0000000000000000000000000000000000000000..ed8eb9e19b3dafaa8ab4d5add697c53d95ea1c54 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/attention_atom.h @@ -0,0 +1,39 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include "cuda.h" + +struct AttentionAtom { + /* + The attention atom describes the workload of a particular query. The attention + kernel will execute each ``AttentionAtom`` for each head of the model. + */ + + // Pointer to a list of KV block indices. + int32_t* block_idx_list; + + // Index of first token in the ragged batch associated with this atom. + int32_t q_start_idx; + + // Number of tokens in the ragged batch associated with this atom. + int32_t q_len; + + // Number of key/value blocks associated with this atom. All but the last are + // assumed to be fully dense. + int32_t kv_blocks; + + // Number of tokens in the last key/value block. + int32_t total_extent; + + // Global index of the first token in the atom. For example, in a prompt continuation + // in which we have already processed 768 tokens, this would be 768. + int32_t global_q_idx; + + // Unused + int32_t unused; +}; diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.cpp b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6cb60ad00f9b2b9322bc3a23b08b4d562292c991 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.cpp @@ -0,0 +1,101 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/****************************************************************************** + * Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#include +#include +#include + +#include "blocked_flash.h" +#include "flash.h" + +#define CHECK_SHAPE(x, ...) \ + TORCH_CHECK(x.sizes() == torch::IntArrayRef({__VA_ARGS__}), \ + #x " must have shape (" #__VA_ARGS__ ")") + +void flash_attn_by_atoms(at::Tensor& out, + at::Tensor& q, + at::Tensor& k, + at::Tensor& v, + at::Tensor& attention_atoms, + const float softmax_scale, + const bool is_causal) +{ + auto dprops = at::cuda::getCurrentDeviceProperties(); + + bool is_sm8x = dprops->major == 8 && dprops->minor >= 0; + bool is_sm90 = dprops->major == 9 && dprops->minor == 0; + TORCH_CHECK(is_sm90 || is_sm8x, "FlashAttention only supports Ampere GPUs or newer."); + + auto q_dtype = q.dtype(); + TORCH_CHECK(q_dtype == torch::kFloat16 || q_dtype == torch::kBFloat16, + "FlashAttention only support fp16 and bf16 data type"); + if (q_dtype == torch::kBFloat16) { + TORCH_CHECK(is_sm90 || is_sm8x, "bfloat16 is only supported on Ampere GPUs or newer"); + } + TORCH_CHECK(k.dtype() == q_dtype, "query and key must have the same dtype"); + TORCH_CHECK(v.dtype() == q_dtype, "query and value must have the same dtype"); + + TORCH_CHECK(q.is_cuda(), "Input tensor must be on CUDA device"); + TORCH_CHECK(k.is_cuda(), "Input tensor must be on CUDA device"); + TORCH_CHECK(v.is_cuda(), "Input tensor must be on CUDA device"); + + TORCH_CHECK(q.stride(-1) == 1, "Input tensor must have contiguous last dimension"); + TORCH_CHECK(k.stride(-1) == 1, "Input tensor must have contiguous last dimension"); + TORCH_CHECK(v.stride(-1) == 1, "Input tensor must have contiguous last dimension"); + + const int total_q = q.size(0); + const int head_size = k.size(-1); + const int num_heads_kv = k.size(-2); + const int num_heads_q = q.size(-1) / head_size; + + TORCH_CHECK(head_size <= 256, "head_size must be <= 256"); + TORCH_CHECK(head_size % 8 == 0, "head_size must be divisible by 8"); + TORCH_CHECK(num_heads_q % num_heads_kv == 0, "num_heads_q must be divisible by num_heads_kv"); + + Flash_fwd_params params; + + params.is_bf16 = q.dtype() == torch::kBFloat16; + + // Set the pointers and strides. + params.q_ptr = q.data_ptr(); + params.k_ptr = k.data_ptr(); + params.v_ptr = v.data_ptr(); + params.o_ptr = out.data_ptr(); + params.atoms = reinterpret_cast(attention_atoms.data_ptr()); + + // All stride are in elements, not bytes. + params.q_row_stride = q.stride(0); + params.k_row_stride = k.stride(1); + params.v_row_stride = v.stride(1); + params.o_row_stride = out.stride(0); + + // Assume heads are contiguous. + params.q_head_stride = head_size; + params.k_head_stride = head_size; + params.v_head_stride = head_size; + params.o_head_stride = head_size; + + // Head params + params.h = num_heads_q; + params.h_k = num_heads_kv; + params.h_h_k_ratio = num_heads_q / num_heads_kv; + params.d = head_size; + auto round_multiple = [](int x, int m) { return (x + m - 1) / m * m; }; + params.d_rounded = round_multiple(head_size, 32); + params.num_atoms = attention_atoms.size(0); + + // Set the different scale values. + params.scale_softmax = softmax_scale; + params.scale_softmax_log2 = softmax_scale * M_LOG2E; + + params.is_causal = is_causal; + + auto stream = at::cuda::getCurrentCUDAStream().stream(); + run_mha_fwd(params, stream); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.h new file mode 100644 index 0000000000000000000000000000000000000000..68037b4251131747479ce93d6ac5df5a8af0b028 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.h @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include + +void flash_attn_by_atoms(at::Tensor& out, + at::Tensor& q, + at::Tensor& k, + at::Tensor& v, + at::Tensor& attention_atoms, + const float softmax_scale, + const bool is_causal); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.py new file mode 100644 index 0000000000000000000000000000000000000000..54d465698b4edb5f9eddf66f711d2eba8f24b522 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/blocked_flash.py @@ -0,0 +1,107 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from deepspeed.accelerator import get_accelerator +from ....inference_utils import DtypeEnum +from deepspeed.ops.op_builder import RaggedOpsBuilder + +from ... import DSKernelBase + + +def get_q_block_size(head_size: int) -> int: + """ + Returns the query block size required by the kernel given a head size. + """ + cc_major, cc_minor = torch.cuda.get_device_capability(get_accelerator().current_device()) #ignore-cuda + + if cc_major < 8: + raise RuntimeError("Blocked attention requires CUDA compute capability >= 8.0") + + if head_size <= 64: + return 128 + elif head_size <= 160: + if cc_minor != 0: + return 64 + else: + return 128 + elif head_size == 192: + return 128 + elif head_size == 224: + if cc_minor != 0: + return 64 + else: + return 128 + else: + if cc_major == 8 and cc_minor == 0: + return 128 + else: + return 64 + + +def get_kv_block_size(head_size: int) -> int: + """ + Return preferred granulatity for blocked KV-cache implementation. + """ + cc_major, cc_minor = torch.cuda.get_device_capability(get_accelerator().current_device()) #ignore-cuda + + if cc_major < 8: + raise RuntimeError("Blocked attention requires CUDA compute capability >= 8.0") + + if head_size <= 64: + return 128 + elif head_size != 160 or cc_minor != 0: + return 64 + else: + return 32 + + +class BlockedFlashAttn(DSKernelBase): + """ + Modified implementation of flash-attn-2 tuned for inference on blocked KV-cache and wider + range of input sequence lengths. + """ + + supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16] + + def __init__(self, head_size: int, dtype: DtypeEnum) -> None: + """ + Triggers any compilation of the kernels. + """ + if not isinstance(dtype, DtypeEnum): + dtype = DtypeEnum(dtype) + + if dtype not in BlockedFlashAttn.supported_dtypes: + raise ValueError("Unsupported data type: {}, supported data types are {}".format( + dtype, BlockedFlashAttn.supported_dtypes)) + + # For testing, need to revert to 32 + if head_size % 16 != 0: + raise ValueError("Head size must be divisible by 32 (configured with {})".format(head_size)) + + inf_module = RaggedOpsBuilder().load() + self.kernel = inf_module.flash_attn_by_atoms + + def __call__(self, out: torch.Tensor, q: torch.Tensor, k: torch.Tensor, v: torch.Tensor, atoms: torch.Tensor, + softmax_scale: float) -> torch.Tensor: + """ + Flash attention implementation atop a blocked KV-cache. Atoms should be pre-populated. + See attention_atom.h for further details on the structure of the information. + + Arguments: + out (torch.Tensor): Output tensor of shape [tokens, hidden_size] + q (torch.Tensor): Query tensor of shape [tokens, hidden_size] + k (torch.Tensor): Key cache tensor of shape [n_blocks, block_size, n_heads_kv, head_size]. This Tensor only needs to be contiguous on the final dimension. + v (torch.Tensor): Value cache tensor of shape [n_blocks, block_size, n_heads_kv, head_size]. This Tensor only needs to be contiguous on the final dimension. + atoms (torch.Tensor): Atom information tensor of shape [num_atoms, 8] and type int32. + Not all data is readable in this format. See attention_atom.h for further details. + softmax_scale (float): Softmax scale factor. + + Returns: + out (torch.Tensor): Output tensor of shape [tokens, hidden_size] + """ + self.kernel(out, q, k, v, atoms, softmax_scale, True) + return out diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/flash.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/flash.h new file mode 100644 index 0000000000000000000000000000000000000000..b4a53e6d7f52649b5d4ba81cfbc9e678d7edc1ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/blocked_flash/flash.h @@ -0,0 +1,74 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +/****************************************************************************** +Copyright (c) 2023, Tri Dao. + ******************************************************************************/ + +#pragma once + +#include +#include + +#include "attention_atom.h" + +constexpr int TOTAL_DIM = 0; +constexpr int H_DIM = 1; +constexpr int D_DIM = 2; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +struct Qkv_params { + using index_t = uint32_t; + // The QKV matrices. + void* __restrict__ q_ptr; + void* __restrict__ k_ptr; + void* __restrict__ v_ptr; + + // The stride between rows of the Q, K and V matrices. + index_t q_row_stride; + index_t k_row_stride; + index_t v_row_stride; + index_t q_head_stride; + index_t k_head_stride; + index_t v_head_stride; + + // The number of heads. + int h, h_k; + // In the case of multi-query and grouped-query attention (MQA/GQA), nheads_k could be + // different from nheads (query). + int h_h_k_ratio; // precompute h / h_k, +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +struct Flash_fwd_params : public Qkv_params { + // The O matrix (output). + void* __restrict__ o_ptr; + + // The attention metadata + AttentionAtom* __restrict__ atoms; + + // Total attention atoms + int num_atoms; + + // The stride between rows of O. + index_t o_row_stride; + index_t o_head_stride; + + // The dimensions + int d, d_rounded; + + // The scaling factors for the kernel. + float scale_softmax; + float scale_softmax_log2; + + bool is_bf16; + bool is_causal; +}; + +//////////////////////////////////////////////////////////////////////////////////////////////////// + +void run_mha_fwd(Flash_fwd_params& params, cudaStream_t stream); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d6b8e6047d74787c07286cd792bbbbba9978b9c5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .embed import RaggedEmbeddingKernel diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3590d14f599cbd68c1139e02f607df625a7cde7e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__pycache__/embed.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__pycache__/embed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee9ca6e8f1fa8858d51a1d9088bf1eaee710aa14 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/__pycache__/embed.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.cpp b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.cpp new file mode 100644 index 0000000000000000000000000000000000000000..04b72bf948db6b2e86c74d19264def509ed26c70 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.cpp @@ -0,0 +1,101 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "embed.h" +#include "ragged_kernel_helpers.h" + +#ifdef BF16_AVAILABLE +#define DISPATCH_FOR_FLOAT(DTYPE, ...) \ + [&] { \ + if (DTYPE == torch::kFloat32) { \ + using float_t = float; \ + return __VA_ARGS__(); \ + } else if (DTYPE == torch::kFloat16) { \ + using float_t = __half; \ + return __VA_ARGS__(); \ + } else if (DTYPE == torch::kBFloat16) { \ + using float_t = __nv_bfloat16; \ + return __VA_ARGS__(); \ + } else { \ + TORCH_CHECK(false, "Unsupported dispatch type"); \ + } \ + }() +#else +#define DISPATCH_FOR_FLOAT(DTYPE, ...) \ + [&] { \ + if (DTYPE == torch::kFloat32) { \ + using float_t = float; \ + return __VA_ARGS__(); \ + } else if (DTYPE == torch::kFloat16) { \ + using float_t = __half; \ + return __VA_ARGS__(); \ + } else { \ + TORCH_CHECK(false, "Unsupported dispatch type"); \ + } \ + }() +#endif + +#define DISPATCH_FOR_INT(DTYPE, ...) \ + [&] { \ + if (DTYPE == torch::kInt32) { \ + using int_t = int32_t; \ + return __VA_ARGS__(); \ + } else if (DTYPE == torch::kInt64) { \ + using int_t = int64_t; \ + return __VA_ARGS__(); \ + } else { \ + TORCH_CHECK(false, "Unsupported dispatch type"); \ + } \ + }() + +/* +Embeddings kernel aware of ragged batch structure. +*/ +void ragged_embed(torch::Tensor& embedded_tokens, + torch::Tensor& input_ids, + torch::Tensor& embedding_weight, + c10::optional& position_embedding_weight, + int32_t pos_embed_offset, + torch::Tensor& batch_metadata, + torch::Tensor& seq_metadata, + torch::Tensor& tokens_to_seq, + torch::Tensor& kv_ptrs) +{ + // We don't care about KV cache here, so just hardcoding 0s for block_size/num_blocks + BatchWrapperCPP batch_wrapper = + make_cpp_batch_wrapper(batch_metadata, seq_metadata, tokens_to_seq, kv_ptrs, 0, 0); + + const int32_t n_tokens = input_ids.numel(); + const int32_t embed_dim = embedding_weight.size(1); + const int32_t vocab_size = embedding_weight.size(0); + + DISPATCH_FOR_INT(input_ids.scalar_type(), [&] { + DISPATCH_FOR_FLOAT(embedding_weight.scalar_type(), [&] { + float_t* pos_embed_ptr = nullptr; + int32_t max_position_embed_idx = 0; + if (position_embedding_weight.has_value()) { + TORCH_CHECK( + position_embedding_weight.value().options().dtype() == + embedding_weight.options().dtype(), + "position_embedding_weight and embedding_weight must have the same dtype"); + pos_embed_ptr = + reinterpret_cast(position_embedding_weight.value().data_ptr()); + max_position_embed_idx = position_embedding_weight.value().size(0) - 1; + } + + launch_ragged_embed_kernel((float_t*)embedded_tokens.data_ptr(), + (const int_t*)input_ids.data_ptr(), + (const float_t*)embedding_weight.data_ptr(), + pos_embed_ptr, + batch_wrapper, + n_tokens, + embed_dim, + vocab_size, + max_position_embed_idx, + pos_embed_offset, + at::cuda::getCurrentCUDAStream()); + }); + }); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.cuh b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.cuh new file mode 100644 index 0000000000000000000000000000000000000000..94c397439b80d386f6a928b3630072a3a86ec964 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.cuh @@ -0,0 +1,26 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include "ds_kernel_utils.h" +#include "ragged_dtypes.h" + +#ifdef BF16_AVAILABLE +#include +#endif + +template +void launch_ragged_embed_kernel(EmbedType* embedded_tokens, + const TokenType* input_ids, + const EmbedType* embedding_weight, + const EmbedType* position_weight, + const BatchWrapperCPP batch_desc, + const int32_t n_tokens, + const int32_t embed_dim, + const int32_t vocab_size, + const int32_t max_position_embed_idx, + const int32_t position_embed_offset, + cudaStream_t stream); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.h new file mode 100644 index 0000000000000000000000000000000000000000..7897c1362669695133cf051c7a3dc319ed2a0783 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.h @@ -0,0 +1,23 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include "embed.cuh" + +/* +Embeddings kernel aware of ragged batch structure. +*/ +void ragged_embed(torch::Tensor& embedded_tokens, + torch::Tensor& input_ids, + torch::Tensor& embedding_weight, + c10::optional& position_weight, + int32_t position_embed_offset, + torch::Tensor& batch_metadata, + torch::Tensor& seq_metadata, + torch::Tensor& tokens_to_seq, + torch::Tensor& kv_ptrs); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.py new file mode 100644 index 0000000000000000000000000000000000000000..0443ce3fdd8eee3787b16c3baa1604f3f63d211b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed.py @@ -0,0 +1,67 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Optional + +import torch + +from ... import DSKernelBase +from deepspeed.ops.op_builder import RaggedOpsBuilder +from ....inference_utils import elem_size +from ....ragged import RaggedBatchWrapper + + +class RaggedEmbeddingKernel(DSKernelBase): + """ + Ragged-aware CUDA kernel implementation for an embedding lookup. This will only lookup + the necessary tokens for a padded batch (i.e. if we are CGed and running with a slightly + larger batch size than the actual tokens). + """ + + supported_dtypes = [torch.float16, torch.bfloat16, torch.float32] + supported_token_dtypes = [torch.int32, torch.int64] + + def __init__(self, embed_dtype: torch.dtype, token_dtype: torch.dtype, embed_dim: int) -> None: + """ + Args: + fp_dtype (torch.dtype): Data type of the embedding table and output dtype. + Supported values are torch.float16, torch.bfloat16, and torch.float32. + token_dtype (torch.dtype): Data type of the token ids. Supported values are + torch.int32 and torch.int64. + embed_dim (int): Embedding dimension. Must be aligned to 16 bytes. + """ + if embed_dtype not in RaggedEmbeddingKernel.supported_dtypes: + raise ValueError("Unsupported embedding data type: {}, supported_dtypes are {}".format( + embed_dtype, RaggedEmbeddingKernel.supported_dtypes)) + + if token_dtype not in RaggedEmbeddingKernel.supported_token_dtypes: + raise ValueError("Unsupported token data type: {}, supported_dtypes are {}".format( + token_dtype, RaggedEmbeddingKernel.supported_token_dtypes)) + + if elem_size(embed_dtype) * embed_dim % 16 != 0: + raise ValueError("Embedding dimension must be aligned to 16 bytes, got {}".format(embed_dim)) + + inf_module = RaggedOpsBuilder().load() + self.kernel = inf_module.ragged_embed + + def __call__(self, + embedded_tokens: torch.Tensor, + ragged_wrapper: RaggedBatchWrapper, + embedding_weight: torch.Tensor, + position_embed_weight: Optional[torch.Tensor] = None, + position_embed_offset: int = 0) -> torch.Tensor: + """ + Ragged aware embedding lookup. + + Args: + embedded_tokens (torch.Tensor): Output tensor of shape [num_tokens, embed_dim] + ragged_wrapper (RaggedBatchWrapper): Wrapper for the ragged batch. + embedding_weight (torch.Tensor): Embedding table of shape [vocab_size, embed_dim] + """ + self.kernel(embedded_tokens, ragged_wrapper.input_ids(), + embedding_weight, position_embed_weight, position_embed_offset, + ragged_wrapper.batch_metadata_buffer(), ragged_wrapper.inflight_seq_descriptors(), + ragged_wrapper.tokens_to_seq(), ragged_wrapper.kv_ptrs()) + return embedded_tokens diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed_cuda.cu b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..81d6d534ddf5b4c9415786ec22df17c834fb8a37 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/embed/embed_cuda.cu @@ -0,0 +1,137 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "ds_kernel_utils.h" +#include "embed.cuh" +#include "memory_access_utils.h" +#include "ragged_dtypes.h" + +namespace embed { + +constexpr int granularity = 16; +constexpr int threads = 512; + +} // namespace embed + +template +__global__ void ragged_embed_kernel(EmbedType* embedded_tokens, + const TokenType* input_ids, + const EmbedType* embedding_weight, + const EmbedType* position_weight, + const BatchWrapperCPP batch_desc, + const int32_t embed_dim, + const int32_t vocab_size, + const int32_t max_position_embed_idx, + const int32_t position_embed_offset) +{ + constexpr int T_vector = embed::granularity / sizeof(EmbedType); + + const int32_t token_idx = blockIdx.y; + + // It's possible our batch is padded (under CG conditions typically) + if (token_idx >= batch_desc.batch_metadata->n_tokens) return; + + TokenType token_value = input_ids[token_idx]; + + if (token_value >= vocab_size || token_value < 0) { + // TODO(cmikeh2): This is invalid, but not sure how we want to handle it being invalid + // yet. + return; + } + + const EmbedType* embedding_row = embedding_weight + token_value * embed_dim; + EmbedType* dest_row = embedded_tokens + token_idx * embed_dim; + + const int channel_offset = (threadIdx.x + embed::threads * blockIdx.x) * T_vector; + + if (channel_offset < embed_dim) { + EmbedType reg_buf[T_vector]; + + mem_access::load_global(reg_buf, embedding_row + channel_offset); + + if (position_weight != nullptr) { + // Map the token to its global idx (indirect memory accesses aren't great but whatever) + const int32_t seq_idx = batch_desc.tokens_to_seq[token_idx]; + const InflightSeqDescriptor seq_desc = batch_desc.seq_metadata[seq_idx]; + int32_t pos_emb_idx = seq_desc.seen_tokens + (token_idx - seq_desc.start_idx); + + // Position embed offset is an OPT-specific feature I think? + pos_emb_idx = pos_emb_idx + position_embed_offset; + + // This clamping is technically + pos_emb_idx = (pos_emb_idx < 0) ? 0 : pos_emb_idx; + pos_emb_idx = (pos_emb_idx >= max_position_embed_idx) ? max_position_embed_idx + : pos_emb_idx; + + const EmbedType* position_embedding_row = position_weight + pos_emb_idx * embed_dim; + + EmbedType pos_buf[T_vector]; + mem_access::load_global(pos_buf, + position_embedding_row + channel_offset); + +#pragma unroll + for (int i = 0; i < T_vector; i++) { reg_buf[i] += pos_buf[i]; } + } + + mem_access::store_global(dest_row + channel_offset, reg_buf); + } +} + +template +void launch_ragged_embed_kernel(EmbedType* embedded_tokens, + const TokenType* input_ids, + const EmbedType* embedding_weight, + const EmbedType* position_weight, + const BatchWrapperCPP batch_desc, + const int32_t n_tokens, + const int32_t embed_dim, + const int32_t vocab_size, + const int32_t max_position_embed_idx, + const int32_t position_embed_offset, + cudaStream_t stream) +{ + constexpr int T_vector = embed::granularity / sizeof(EmbedType); + constexpr int elems_per_block = embed::threads * T_vector; + const int parallel_blocks = (embed_dim + elems_per_block - 1) / elems_per_block; + + const dim3 grid_dim(parallel_blocks, n_tokens, 1); + const dim3 block_dim(embed::threads, 1, 1); + + ragged_embed_kernel + <<>>(embedded_tokens, + input_ids, + embedding_weight, + position_weight, + batch_desc, + embed_dim, + vocab_size, + max_position_embed_idx, + position_embed_offset); +} + +#define INSTANTIATE_EMBED_FOR_TYPES(TOKEN_TYPE, EMBED_TYPE) \ + template void launch_ragged_embed_kernel( \ + EMBED_TYPE * embedded_tokens, \ + const TOKEN_TYPE* input_ids, \ + const EMBED_TYPE* embedding_weight, \ + const EMBED_TYPE* position_weight, \ + const BatchWrapperCPP batch_descriptor, \ + const int32_t n_tokens, \ + const int32_t embed_dim, \ + const int32_t vocab_size, \ + const int32_t max_position_embed_idx, \ + const int32_t position_embed_offset, \ + cudaStream_t stream); + +INSTANTIATE_EMBED_FOR_TYPES(int32_t, float) +INSTANTIATE_EMBED_FOR_TYPES(int64_t, float) + +INSTANTIATE_EMBED_FOR_TYPES(int32_t, __half) +INSTANTIATE_EMBED_FOR_TYPES(int64_t, __half) + +#ifdef BF16_AVAILABLE +INSTANTIATE_EMBED_FOR_TYPES(int32_t, __nv_bfloat16) +INSTANTIATE_EMBED_FOR_TYPES(int64_t, __nv_bfloat16) +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/includes/top_k_utils.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/includes/top_k_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..abb9e15f8f6f9349088db58c9fbde4abc71d2737 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/includes/top_k_utils.h @@ -0,0 +1,15 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#define TOP_K_SWITCH(N_TOP_K, ...) \ + [&] { \ + if (1 == N_TOP_K) { \ + constexpr int CONST_TOP_K = 1; \ + __VA_ARGS__(); \ + } else if (2 == N_TOP_K) { \ + constexpr int CONST_TOP_K = 2; \ + __VA_ARGS__(); \ + } \ + }() diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..72103a0d82a1270631dec4c60a62e67923fb8448 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .logits_gather import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f694fbc8726e0d8453e0d94528a8142ebfab0d80 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__pycache__/logits_gather.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__pycache__/logits_gather.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4dd6e8bc6db31ef9be8e54d54b299cf9d4d51bbb Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/__pycache__/logits_gather.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.cpp b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1a7e7c0a21678c2a7c3b7283c919fcaaeef00aab --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.cpp @@ -0,0 +1,45 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "logits_gather.h" + +#define DISPATCH_TO_LOGITS_GATHER(T_TYPE, C_TYPE) \ + if (all_acts.options().dtype() == torch::T_TYPE) { \ + launch_logits_gather((C_TYPE*)final_token_acts.data_ptr(), \ + (const C_TYPE*)all_acts.data_ptr(), \ + batch_metadata_raw, \ + seq_metadata_raw, \ + n_seqs, \ + embed_dim, \ + at::cuda::getCurrentCUDAStream()); \ + } + +/* +Logits gather will parse the ragged batch data structure and gather only the logits that +will be used for token sampling. +*/ +void gather_for_logits(torch::Tensor& final_token_acts, + torch::Tensor& all_acts, + torch::Tensor& batch_metadata, + torch::Tensor& seq_metadata) +{ + const RaggedBatchDescriptor* batch_metadata_raw = + reinterpret_cast(batch_metadata.data_ptr()); + + const InflightSeqDescriptor* seq_metadata_raw = + reinterpret_cast(seq_metadata.data_ptr()); + + const int n_seqs = final_token_acts.size(0); + const int embed_dim = final_token_acts.size(1); + + TORCH_CHECK(all_acts.scalar_type() == final_token_acts.scalar_type(), + "all_acts and final_token_acts must have the same scalar type"); + + DISPATCH_TO_LOGITS_GATHER(kFloat, float) + DISPATCH_TO_LOGITS_GATHER(kHalf, half) +#ifdef BF16_AVAILABLE + DISPATCH_TO_LOGITS_GATHER(kBFloat16, __nv_bfloat16) +#endif +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.cuh b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.cuh new file mode 100644 index 0000000000000000000000000000000000000000..c4e84c05e6d8d355d3af5f7f0b5f1fff60e117b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.cuh @@ -0,0 +1,22 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include "ds_kernel_utils.h" +#include "ragged_dtypes.h" + +#ifdef BF16_AVAILABLE +#include +#endif + +template +void launch_logits_gather(T* final_token_acts, + const T* all_acts, + const RaggedBatchDescriptor* batch_metadata, + const InflightSeqDescriptor* seq_metadata, + const int32_t n_seqs, + const int32_t embed_dim, + cudaStream_t stream); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.h new file mode 100644 index 0000000000000000000000000000000000000000..73a855984daadb42f04a84bca25d6d22fe3e993c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.h @@ -0,0 +1,20 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include "logits_gather.cuh" +#include "ragged_dtypes.h" + +/* +Logits gather will parse the ragged batch data structure and gather only the logits that +will be used for token sampling. +*/ +void gather_for_logits(torch::Tensor& final_token_acts, + torch::Tensor& all_acts, + torch::Tensor& batch_metadata, + torch::Tensor& seq_metadata); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.py new file mode 100644 index 0000000000000000000000000000000000000000..64b453e9e9e3ef270c1e22d5b8bb7317ccdff128 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather.py @@ -0,0 +1,52 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ... import DSKernelBase +from deepspeed.ops.op_builder import RaggedOpsBuilder +from ....inference_utils import elem_size +from ....ragged import RaggedBatchWrapper + + +class RaggedLogitsGather(DSKernelBase): + """ + CUDA Kernel implementation for gather the hidden states of the final token + of each sequence. This is used to reduce the cost of the performing the unembedding. + """ + + supported_dtypes = [torch.float16, torch.bfloat16, torch.float32] + + def __init__(self, model_dim: int, fp_dtype: torch.dtype): + """ + Parameters: + fp_dtype (torch.dtype): Data type for the input/output. Supported values + are torch.float16, torch.bfloat16, and torch.float32. + """ + if fp_dtype not in RaggedLogitsGather.supported_dtypes: + raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format( + fp_dtype, RaggedLogitsGather.supported_dtypes)) + + if elem_size(fp_dtype) * model_dim % 16 != 0: + raise ValueError("Embedding dimension must be aligned to 16 bytes, got {}".format(model_dim)) + + inf_module = RaggedOpsBuilder().load() + self.kernel = inf_module.gather_for_logits + + def __call__(self, final_token_activations: torch.Tensor, all_activations: torch.Tensor, + ragged_wrapper: RaggedBatchWrapper) -> torch.Tensor: + """ + Gather the hidden states of the final token of each sequence from `all_activations` into + `final_token_activations`. + + Args: + final_token_activations (torch.Tensor): Output tensor of shape [num_seqs, model_dim] + all_activations (torch.Tensor): Input tensor of shape [num_tokens, model_dim] + ragged_wrapper (RaggedBatchWrapper): Wrapper for the ragged batch. + """ + + self.kernel(final_token_activations, all_activations, ragged_wrapper.batch_metadata_buffer(), + ragged_wrapper.inflight_seq_descriptors()) + return final_token_activations diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather_cuda.cu b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..a539888ff9045277014016d854251a86ef6c035d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/logits_gather/logits_gather_cuda.cu @@ -0,0 +1,86 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "ds_kernel_utils.h" +#include "logits_gather.cuh" +#include "memory_access_utils.h" +#include "ragged_dtypes.h" + +namespace logits_gather { + +constexpr int granularity = 16; +constexpr int threads = 512; + +} // namespace logits_gather + +template +__global__ void logits_gather_kernel(T* final_token_acts, + const T* token_acts, + const RaggedBatchDescriptor* ragged_batch, + const InflightSeqDescriptor* inflight_batch, + const int32_t embed_dim) +{ + constexpr int T_vector = logits_gather::granularity / sizeof(T); + + const int32_t seq_id = blockIdx.y; + + // It's possible we've padded the output Tensor (under CG conditions) + if (seq_id >= ragged_batch->n_sequences) return; + + const InflightSeqDescriptor seq = inflight_batch[seq_id]; + const int final_token_idx = seq.start_idx + seq.n_tokens - 1; + + const int token_offset = final_token_idx * embed_dim; + const int thread_offset = + threadIdx.x * T_vector + blockIdx.x * logits_gather::threads * T_vector; + + const int final_token_offset = seq_id * embed_dim; + + T reg_buf[T_vector]; + + if (thread_offset < embed_dim) { + mem_access::load_global( + reg_buf, token_acts + token_offset + thread_offset); + + mem_access::store_global( + final_token_acts + final_token_offset + thread_offset, reg_buf); + } +} + +template +void launch_logits_gather(T* final_token_acts, + const T* all_acts, + const RaggedBatchDescriptor* ragged_batch, + const InflightSeqDescriptor* inflight_batch, + const int32_t n_seqs, + const int32_t embed_dim, + cudaStream_t stream) +{ + constexpr int T_vector = logits_gather::granularity / sizeof(T); + constexpr int elems_per_block = logits_gather::threads * T_vector; + const int parallel_blocks = (embed_dim + elems_per_block - 1) / elems_per_block; + + const dim3 grid(parallel_blocks, n_seqs, 1); + const dim3 block(logits_gather::threads, 1, 1); + + logits_gather_kernel<<>>( + final_token_acts, all_acts, ragged_batch, inflight_batch, embed_dim); +} + +#define INSTANTIATE_FOR_TYPE(T) \ + template void launch_logits_gather(T * final_token_acts, \ + const T* all_acts, \ + const RaggedBatchDescriptor* ragged_batch, \ + const InflightSeqDescriptor* inflight_batch, \ + const int32_t n_seqs, \ + const int32_t embed_dim, \ + cudaStream_t stream); + +INSTANTIATE_FOR_TYPE(float) +INSTANTIATE_FOR_TYPE(__half) + +#ifdef BF16_AVAILABLE +INSTANTIATE_FOR_TYPE(__nv_bfloat16) +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..096c0d984a5a2aa21fb950627d609f5f9c2c30b5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .moe_gather import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather.cpp b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather.cpp new file mode 100644 index 0000000000000000000000000000000000000000..506629406f0db2a93ada1e44fef9b7cc794009c2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather.cpp @@ -0,0 +1,59 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "moe_gather.h" +#include + +#define DISPATCH_MOE_GATHER(T_TYPE, C_TYPE) \ + if (layer_output.options().dtype() == torch::T_TYPE) { \ + launch_moe_gather((C_TYPE*)layer_output.data_ptr(), \ + (const C_TYPE*)moe_output.data_ptr(), \ + (const float*)scores.data_ptr(), \ + (const int32_t*)mapped_slots.data_ptr(), \ + (int32_t*)expert_count.data_ptr(), \ + n_channels, \ + n_experts, \ + n_tokens, \ + n_top_k, \ + normalize_scales, \ + at::cuda::getCurrentCUDAStream()); \ + return; \ + } + +/* +Re-gather the outputs of MoE and scale them by the gating score. +*/ +void moe_gather(torch::Tensor& layer_output, + const torch::Tensor& moe_output, + const torch::Tensor& scores, + const torch::Tensor& mapped_slots, + const torch::Tensor& expert_count, + const bool normalize_scales) +{ + const int32_t n_channels = layer_output.size(1); + const int32_t n_experts = expert_count.size(0); + const int32_t n_tokens = layer_output.size(0); + const int32_t n_top_k = mapped_slots.size(1); + + TORCH_CHECK(moe_output.size(0) == n_tokens * n_top_k); + TORCH_CHECK(moe_output.size(1) == n_channels); + TORCH_CHECK(scores.size(0) == n_tokens); + TORCH_CHECK(mapped_slots.size(0) == n_tokens); + + TORCH_CHECK(scores.size(1) == n_top_k); + + TORCH_CHECK(layer_output.scalar_type() == moe_output.scalar_type()); + TORCH_CHECK(scores.scalar_type() == torch::kFloat32); + TORCH_CHECK(mapped_slots.scalar_type() == torch::kInt32); + TORCH_CHECK(expert_count.scalar_type() == torch::kInt32); + + DISPATCH_MOE_GATHER(kHalf, __half); + +#ifdef BF16_AVAILABLE + DISPATCH_MOE_GATHER(kBFloat16, __nv_bfloat16); +#endif + + TORCH_CHECK(false, "Unsupported data type for MoE gather"); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather.cuh b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather.cuh new file mode 100644 index 0000000000000000000000000000000000000000..b348d0cfb330369089b53d2ed20dba6a14211487 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather.cuh @@ -0,0 +1,22 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include "ds_kernel_utils.h" +#include "ragged_dtypes.h" + +template +void launch_moe_gather(T* layer_output, + const T* moe_output, + const float* scores, + const int32_t* mapped_slots, + int32_t* expert_counts, + const int32_t n_channels, + const int32_t n_experts, + const int32_t n_tokens, + const int32_t n_top_k, + const bool normalize_scales, + cudaStream_t stream); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather.h new file mode 100644 index 0000000000000000000000000000000000000000..ec9e03057eb80b6a90ce5e0281df147cbf9c223d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather.h @@ -0,0 +1,20 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include "moe_gather.cuh" + +/* +Re-gather the outputs of MoE and scale them by the gating score. +*/ +void moe_gather(torch::Tensor& layer_output, + const torch::Tensor& moe_output, + const torch::Tensor& scores, + const torch::Tensor& mapped_slots, + const torch::Tensor& expert_counts, + const bool normalize_scales); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather.py new file mode 100644 index 0000000000000000000000000000000000000000..f03938171ba45f0977eeafaf75e104fd1d7fcb04 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather.py @@ -0,0 +1,53 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ... import DSKernelBase +from ....inference_utils import DtypeEnum +from deepspeed.ops.op_builder import RaggedOpsBuilder + + +class MoEGather(DSKernelBase): + """ + CUDA implementation of MoE gather. This will bring the tokens back + to their original indices and perform the output scaling. + """ + + supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16] + + def __init__(self, dtype: DtypeEnum, channels: int, normalize_scores: bool = False) -> None: + + if not isinstance(dtype, DtypeEnum): + dtype = DtypeEnum(dtype) + + if dtype not in MoEGather.supported_dtypes: + raise RuntimeError(f"Unsupported dtype {dtype}") + + if channels % 8 != 0: + raise RuntimeError(f"Channels {channels} must be divisible by 8") + + inf_module = RaggedOpsBuilder().load() + self.kernel = inf_module.moe_gather + self.normalize_scores = normalize_scores + + def __call__(self, layer_output: torch.Tensor, moe_output: torch.Tensor, scores: torch.Tensor, + mapped_slots: torch.Tensor, expert_counts: torch.Tensor) -> torch.Tensor: + """ + Reorders the moe_output tokens into their original order and scales them by their + gating scale. This will be a no-op for padded tokens. + + Arguments: + layer_output (torch.Tensor): The output of the layer of shape [n_tokens, hidden_size]. This has been scaled appropriately. + moe_output (torch.Tensor): The output of the MoE of shape [n_tokens * n_top_k, hidden_size]. + scores (torch.Tensor): The gating scores of shape [n_tokens]. + mapped_slots (torch.Tensor): The index of the token in the expert's input of shape [n_tokens, n_top_k]. The indices of token ``i`` in layer_output is ``mapped_slots[i]``. + expert_counts (torch.Tensor): The number of tokens assigned to each expert of shape [n_experts]. This is passed to fuse the clearing of this data structure into the gather. + + Returns: + layer_output + """ + self.kernel(layer_output, moe_output, scores, mapped_slots, expert_counts, self.normalize_scores) + return layer_output diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather_cuda.cu b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..4153a2a3636fa536c0884df636c77263441c5cf7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/moe_gather/moe_gather_cuda.cu @@ -0,0 +1,169 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "conversion_utils.h" +#include "ds_kernel_utils.h" +#include "moe_gather.cuh" +#include "reduction_utils.h" +#include "top_k_gating.cuh" +#include "top_k_utils.h" + +namespace gather { + +constexpr int access_granularity = 16; +constexpr int threads = 256; + +} // namespace gather + +template +__global__ void moe_gather_kernel(T* layer_output, + const T* moe_output, + const float* scores, + const int32_t* mapped_slots, + int32_t* expert_counts, + const int32_t n_channels, + const int32_t n_experts, + const bool normalize_scales) +{ + constexpr int32_t vector_size = gather::access_granularity / sizeof(T); + constexpr int32_t stride = vector_size * gather::threads; + + const int32_t token_idx = blockIdx.x; + int32_t token_mapped_slots[N_TOP_K]; + + bool all_slots_invalid = true; + for (int i = 0; i < N_TOP_K; i++) { + token_mapped_slots[i] = mapped_slots[token_idx * N_TOP_K + i]; + all_slots_invalid &= (token_mapped_slots[i] == gating::unassigned); + } + + if (token_idx == 0) { + // Reset expert counts for its next use. + if (threadIdx.x < n_experts) { expert_counts[threadIdx.x] = 0; } + } + + if (all_slots_invalid) { + // This token was not assigned to anything. + // TODO(cmikeh2): It's possible we want different behavior here moving forward. + return; + } + + float token_scores[N_TOP_K]; + for (int i = 0; i < N_TOP_K; i++) { token_scores[i] = scores[token_idx * N_TOP_K + i]; } + + if (normalize_scales) { + // Normalize the scores so that they sum to 1. + float sum = 0.0f; + for (int i = 0; i < N_TOP_K; i++) { sum += token_scores[i]; } + + if (sum > 0.0f) { + for (int i = 0; i < N_TOP_K; i++) { token_scores[i] /= sum; } + } + } + + const int32_t channel_offset = threadIdx.x * vector_size; + + const T* moe_output_bases[N_TOP_K]; +#pragma unroll + for (int i = 0; i < N_TOP_K; i++) { + moe_output_bases[i] = moe_output + token_mapped_slots[i] * n_channels + channel_offset; + } + + T* layer_output_base = layer_output + token_idx * n_channels + channel_offset; + +#pragma unroll + for (int i = 0; i < copyUnroll; i++) { + if (i * stride + channel_offset < n_channels) { + float accum_buffer[vector_size]; + for (int j = 0; j < vector_size; j++) { + accum_buffer[j] = reduce::init(); + } + +#pragma unroll + for (int j = 0; j < N_TOP_K; j++) { + T reg_buffer[vector_size]; + mem_access::load_global( + reg_buffer, moe_output_bases[j] + i * stride); + +#pragma unroll + for (int k = 0; k < vector_size; k++) { + float up_cast = conversion::to(reg_buffer[k]); + accum_buffer[k] += up_cast * token_scores[j]; + } + } + + T store_buffer[vector_size]; +#pragma unroll + for (int j = 0; j < vector_size; j++) { + store_buffer[j] = conversion::to(accum_buffer[j]); + } + + mem_access::store_global(layer_output_base + i * stride, + store_buffer); + } + } +} + +#define LAUNCH_FOR_UNROLL(COUNT) \ + case COUNT: \ + moe_gather_kernel<<>>(layer_output, \ + moe_output, \ + scores, \ + mapped_slots, \ + expert_counts, \ + n_channels, \ + n_experts, \ + normalize_scales); \ + break; + +template +void launch_moe_gather(T* layer_output, + const T* moe_output, + const float* scores, + const int32_t* mapped_slots, + int32_t* expert_counts, + const int32_t n_channels, + const int32_t n_experts, + const int32_t n_tokens, + const int32_t n_top_k, + const bool normalize_scales, + cudaStream_t stream) +{ + constexpr int vals_per_unroll = gather::threads * gather::access_granularity / sizeof(T); + const int copy_unroll = (n_channels + vals_per_unroll - 1) / vals_per_unroll; + + const dim3 block(gather::threads); + const dim3 grid(n_tokens); + + TOP_K_SWITCH(n_top_k, [&] { + switch (copy_unroll) { + LAUNCH_FOR_UNROLL(1) + LAUNCH_FOR_UNROLL(2) + LAUNCH_FOR_UNROLL(3) + LAUNCH_FOR_UNROLL(4) + LAUNCH_FOR_UNROLL(5) + LAUNCH_FOR_UNROLL(6) + } + }); +} + +#define INSTANTIATE_GATHER_FOR_TYPE(TYPE) \ + template void launch_moe_gather(TYPE * layer_output, \ + const TYPE* moe_output, \ + const float* scores, \ + const int32_t* mapped_slots, \ + int32_t* expert_counts, \ + const int32_t n_channels, \ + const int32_t n_experts, \ + const int32_t n_tokens, \ + const int32_t n_top_k, \ + const bool normalize_scales, \ + cudaStream_t stream); + +INSTANTIATE_GATHER_FOR_TYPE(__half) + +#ifdef BF16_AVAILABLE +INSTANTIATE_GATHER_FOR_TYPE(__nv_bfloat16) +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/ragged_helpers/ragged_dtypes.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/ragged_helpers/ragged_dtypes.h new file mode 100644 index 0000000000000000000000000000000000000000..7876b354af0df4172e53d81422208a566832870b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/ragged_helpers/ragged_dtypes.h @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include + +struct +#ifdef __CUDA_CC__ + __align__(8) +#endif +{ + int32_t n_tokens; + int32_t n_sequences; +} +typedef RaggedBatchDescriptor; + +struct +#ifdef __CUDA_CC__ + __align__(16) +#endif +{ + int32_t start_idx; + int32_t n_tokens; + int32_t seen_tokens; + int32_t UNUSED; // Explicit padding to match the Python code pattern. +} +typedef InflightSeqDescriptor; + +struct +#ifdef __CUDA_CC__ + __align__(8) +#endif +{ + int32_t** block_lists; + int32_t block_size; + int32_t n_blocks; +} +typedef KVCacheDescriptor; + +struct { + const RaggedBatchDescriptor* batch_metadata; // Offset 0 + const InflightSeqDescriptor* seq_metadata; // Offset 8 + const int32_t* tokens_to_seq; // Offset 16 + const KVCacheDescriptor kv_desc; // Offset 24 +} typedef BatchWrapperCPP; diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/ragged_helpers/ragged_kernel_helpers.cpp b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/ragged_helpers/ragged_kernel_helpers.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a6cb7f2753665350ef85d31c92b4b62cb388f710 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/ragged_helpers/ragged_kernel_helpers.cpp @@ -0,0 +1,28 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "ragged_kernel_helpers.h" + +BatchWrapperCPP make_cpp_batch_wrapper(torch::Tensor& batch_metadata, + torch::Tensor& seq_metadata, + torch::Tensor& tokens_to_seq, + torch::Tensor& kv_cache_desc, + int32_t block_size, + int32_t n_blocks) +{ + const RaggedBatchDescriptor* batch_metadata_raw = + reinterpret_cast(batch_metadata.data_ptr()); + + const InflightSeqDescriptor* seq_metadata_raw = + reinterpret_cast(seq_metadata.data_ptr()); + + const int32_t* tokens_to_seq_raw = tokens_to_seq.data_ptr(); + + int32_t** kv_ptrs_raw = reinterpret_cast(kv_cache_desc.data_ptr()); + KVCacheDescriptor kv_desc = {kv_ptrs_raw, block_size, n_blocks}; + + BatchWrapperCPP wrapper = {batch_metadata_raw, seq_metadata_raw, tokens_to_seq_raw, kv_desc}; + return wrapper; +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/ragged_helpers/ragged_kernel_helpers.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/ragged_helpers/ragged_kernel_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..7ce082d31853011a711cbaeb98e01181ab54a0be --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/ragged_helpers/ragged_kernel_helpers.h @@ -0,0 +1,16 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include "ragged_dtypes.h" + +BatchWrapperCPP make_cpp_batch_wrapper(torch::Tensor& batch_metadata, + torch::Tensor& seq_metadata, + torch::Tensor& tokens_to_seq, + torch::Tensor& kv_cache_desc, + int32_t block_size, + int32_t n_blocks); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/__pycache__/top_k_gating.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/__pycache__/top_k_gating.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7d4e5dbb8523e842979272428de6acec954ccff4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/__pycache__/top_k_gating.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_19_data.npz b/venv/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_19_data.npz new file mode 100644 index 0000000000000000000000000000000000000000..90168ad4e888fba29a772ee13798ec126016140e --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_19_data.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38e8fc7b041df0b23d7e5ca15ead1a065e6467611ef9a848cc7db93f80adfd87 +size 34050 diff --git a/venv/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_6_data.npz b/venv/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_6_data.npz new file mode 100644 index 0000000000000000000000000000000000000000..35d1681786c95602c4f0d5260fc5ad0ff4236189 --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/linalg/tests/data/carex_6_data.npz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1b2a0736b541ebf5c4b9b4c00d6dab281e73c9fb9913c6e2581a781b37b602f9 +size 15878 diff --git a/venv/lib/python3.10/site-packages/scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so b/venv/lib/python3.10/site-packages/scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..2dbd8a2e2a77f3142fb42223085db323ce5b6daa --- /dev/null +++ b/venv/lib/python3.10/site-packages/scipy/special/cython_special.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1bf60cae96c56c239c91bca4071923bf09cfc4ff6da80ba883a28c33cb568bfd +size 3161976 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/data/diabetes_data_raw.csv.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/data/diabetes_data_raw.csv.gz new file mode 100644 index 0000000000000000000000000000000000000000..fc968bc750f5e995ed4092180e7434b2f780b9cf --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/data/diabetes_data_raw.csv.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3e94cc7cea00f8a84fa5f6345203913a68efa42df18f87ddf9bead721bfd503 +size 7105 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/data/diabetes_target.csv.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/data/diabetes_target.csv.gz new file mode 100644 index 0000000000000000000000000000000000000000..b11a1924f6085214fbedb70b19e689b05750cd11 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/data/diabetes_target.csv.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e53f65eb811df43c206f3534bb3af0e5fed213bc37ed6ba36310157d6023803 +size 1050 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/data/digits.csv.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/data/digits.csv.gz new file mode 100644 index 0000000000000000000000000000000000000000..b655e3ffa0818ef8048d461352aaa58599baa4e0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/data/digits.csv.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:09f66e6debdee2cd2b5ae59e0d6abbb73fc2b0e0185d2e1957e9ebb51e23aa22 +size 57523