diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..01b7b0580073c731fdabb581a845aba14ca14837 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .ds_kernel import DSKernelBase diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ds_kernel.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ds_kernel.py new file mode 100644 index 0000000000000000000000000000000000000000..8dbfa1de86a649ab9083b902e64b0b3e4a9c4057 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ds_kernel.py @@ -0,0 +1,32 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from abc import ABC, abstractmethod + + +class DSKernelBase(ABC): + + @abstractmethod + def __init__(self, *args, **kwargs): + """ + If necessary trigger compilation and warmup + Autotuning of the kernel would happen at this stage to + eliminate any potential hangs that might occur mid-deployment + Validate that the desired run configuration is compatible. + + It is not necessary to call super on this method. + """ + raise NotImplementedError() + + @abstractmethod + def __call__(self, *args, **kwargs): + """ + However the kernel needs to be called, it can be called here. Auto-tuning + should never be performed here. + + All inputs/outputs should be passed as arguments to this function. No allocations + should be performed here. + """ + raise NotImplementedError() diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..38a4ebd6fba325b4a8ee4c185e2018a71f07cff0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/__init__.py @@ -0,0 +1,13 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .atom_builder import * +from .blocked_flash import * +from .embed import * +from .linear_blocked_kv_rotary import * +from .logits_gather import * +from .moe_gather import * +from .moe_scatter import * +from .top_k_gating import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0e239dd6b4c789c7a202d4c62c9e21f465aa75fb --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__init__.py @@ -0,0 +1,8 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .blocked_kv_rotary import * +from .blocked_trained_kv_rotary import * +from .linear_blocked_kv_copy import * diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e8f380a81ec8d189ff1fbd86faaf6c26876efdc Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/blocked_kv_rotary.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/blocked_kv_rotary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..669e8150ccf6b8effd996f9a28dbd2fbb3df90c9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/blocked_kv_rotary.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/blocked_trained_kv_rotary.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/blocked_trained_kv_rotary.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c46b63701331d9af037cf31697c618609fc2ffb Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/blocked_trained_kv_rotary.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/linear_blocked_kv_copy.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/linear_blocked_kv_copy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6faf889d3001511def6c71f930667fddc83ab175 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/__pycache__/linear_blocked_kv_copy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.cpp b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.cpp new file mode 100644 index 0000000000000000000000000000000000000000..634a63b81a316ee942536b0eca47773915939106 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.cpp @@ -0,0 +1,195 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "blocked_kv_rotary.h" +#include "ragged_kernel_helpers.h" + +#define DISPATCH_KV_ROTARY(T_TYPE, C_TYPE) \ + if (q.options().dtype() == torch::T_TYPE) { \ + launch_kv_rotary_kernel((C_TYPE*)kv_cache.data_ptr(), \ + (C_TYPE*)q.data_ptr(), \ + (C_TYPE*)k.data_ptr(), \ + (C_TYPE*)v.data_ptr(), \ + (C_TYPE*)inv_freq_ptr, \ + rotary_dim, \ + theta_base, \ + batch_wrapper, \ + qkv_stride, \ + kv_cache_stride, \ + v_offset, \ + inv_freq_stride, \ + q_ratio, \ + head_size, \ + n_tokens, \ + n_q_heads, \ + at::cuda::getCurrentCUDAStream()); \ + } + +/* +Rotary position embeddings + copy into KV cache. This implementation assumes +that the inverse frequencies should be ready from global memory rather than +synthesized in the kernel. + +Arguments: + kv_cache: [n_blocks, block_size, 2, n_kv_heads, head_size] + q: [n_tokens, n_q_heads * head_size] + k: [n_tokens, n_kv_heads * head_size] + v: [n_tokens, n_kv_heads * head_size] + inv_freq: [max_seq_len, head_size // 2] +*/ +void kv_trained_rotary_embeddings(torch::Tensor& kv_cache, + torch::Tensor& q, + torch::Tensor& k, + torch::Tensor& v, + torch::Tensor& inv_freq, + torch::Tensor& batch_metadata, + torch::Tensor& seq_metadata, + torch::Tensor& tokens_to_seq, + torch::Tensor& kv_ptrs) +{ + const int32_t n_tokens = q.size(0); + TORCH_CHECK(n_tokens == k.size(0)); + TORCH_CHECK(n_tokens == v.size(0)); + + const float theta_base = 0.f; + const int32_t rotary_dim = inv_freq.size(0) * 2; + + // Dimensions + const int32_t block_size = kv_cache.size(1); + const int32_t n_kv_heads = kv_cache.size(3); + const int32_t head_size = kv_cache.size(4); + + // Strides + const int32_t qkv_stride = q.stride(0); // Per token + const int32_t kv_cache_stride = kv_cache.stride(1); // Per token + const int32_t v_offset = kv_cache.stride(2); // From k_cache to v_cache + const int32_t inv_freq_stride = inv_freq.stride(0); // Per token idx + + const int n_q_heads = q.size(1) / head_size; + const int q_ratio = n_q_heads / n_kv_heads; + + void* inv_freq_ptr = (void*)inv_freq.data_ptr(); + + BatchWrapperCPP batch_wrapper = make_cpp_batch_wrapper( + batch_metadata, seq_metadata, tokens_to_seq, kv_ptrs, block_size, kv_cache.size(0)); + + DISPATCH_KV_ROTARY(kHalf, __half); + +#ifdef BF16_AVAILABLE + DISPATCH_KV_ROTARY(kBFloat16, __nv_bfloat16); +#endif +} + +/* +Rotary position embeddings + copy into KV cache. This implementation assumes +that the inverse frequencies should be synthesized in the kernel. + +Arguments: + kv_cache: [n_blocks, block_size, 2, n_kv_heads, head_size] + q: [n_tokens, n_q_heads * head_size] + k: [n_tokens, n_kv_heads * head_size] + v: [n_tokens, n_kv_heads * head_size] +*/ +void kv_rotary_embeddings(torch::Tensor& kv_cache, + torch::Tensor& q, + torch::Tensor& k, + torch::Tensor& v, + const int32_t rotary_dim, + const float theta_base, + torch::Tensor& batch_metadata, + torch::Tensor& seq_metadata, + torch::Tensor& tokens_to_seq, + torch::Tensor& kv_ptrs) +{ + const int32_t n_tokens = q.size(0); + TORCH_CHECK(n_tokens == k.size(0)); + TORCH_CHECK(n_tokens == v.size(0)); + + // Dimensions + const int32_t block_size = kv_cache.size(1); + const int32_t n_kv_heads = kv_cache.size(3); + const int32_t head_size = kv_cache.size(4); + + // Strides + const int32_t qkv_stride = q.stride(0); // Per token + const int32_t kv_cache_stride = kv_cache.stride(1); // Per token + const int32_t v_offset = kv_cache.stride(2); // From k_cache to v_cache + const int32_t inv_freq_stride = 0; // Per token idx + + const int n_q_heads = q.size(1) / head_size; + const int q_ratio = n_q_heads / n_kv_heads; + + void* inv_freq_ptr = nullptr; + + BatchWrapperCPP batch_wrapper = make_cpp_batch_wrapper( + batch_metadata, seq_metadata, tokens_to_seq, kv_ptrs, block_size, kv_cache.size(0)); + + DISPATCH_KV_ROTARY(kHalf, __half); + +#ifdef BF16_AVAILABLE + DISPATCH_KV_ROTARY(kBFloat16, __nv_bfloat16); +#endif +} + +#define DISPATCH_KV_COPY(T_TYPE, C_TYPE) \ + if (q.options().dtype() == torch::T_TYPE) { \ + launch_kv_copy_kernel((C_TYPE*)kv_cache.data_ptr(), \ + (C_TYPE*)q.data_ptr(), \ + (C_TYPE*)k.data_ptr(), \ + (C_TYPE*)v.data_ptr(), \ + batch_wrapper, \ + qkv_stride, \ + kv_cache_stride, \ + v_offset, \ + q_ratio, \ + head_size, \ + n_tokens, \ + n_q_heads, \ + at::cuda::getCurrentCUDAStream()); \ + } + +/* +Copy into linear KV cache. +*/ +void linear_kv_copy(torch::Tensor& kv_cache, + torch::Tensor& q, + torch::Tensor& k, + torch::Tensor& v, + torch::Tensor& batch_metadata, + torch::Tensor& seq_metadata, + torch::Tensor& tokens_to_seq, + torch::Tensor& kv_ptrs) +{ + const int32_t n_tokens = q.size(0); + TORCH_CHECK(n_tokens == k.size(0)); + TORCH_CHECK(n_tokens == v.size(0)); + + // Dimensions + const int32_t block_size = kv_cache.size(1); + const int32_t n_kv_heads = kv_cache.size(3); + const int32_t head_size = kv_cache.size(4); + + // Strides + const int32_t qkv_stride = q.stride(0); // Per token + TORCH_CHECK(qkv_stride == k.stride(0)); + TORCH_CHECK(qkv_stride == v.stride(0)); + + const int32_t kv_cache_stride = kv_cache.stride(1); // Per token + const int32_t v_offset = kv_cache.stride(2); // From k_cache to v_cache + + const int n_q_heads = q.size(1) / head_size; + + TORCH_CHECK(n_q_heads % n_kv_heads == 0); + const int q_ratio = n_q_heads / n_kv_heads; + + BatchWrapperCPP batch_wrapper = make_cpp_batch_wrapper( + batch_metadata, seq_metadata, tokens_to_seq, kv_ptrs, block_size, kv_cache.size(0)); + + DISPATCH_KV_COPY(kHalf, __half); + +#ifdef BF16_AVAILABLE + DISPATCH_KV_COPY(kBFloat16, __nv_bfloat16); +#endif +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.cuh b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.cuh new file mode 100644 index 0000000000000000000000000000000000000000..ff24b3f5bd80673c3625e2e7023fe28a2cf33b4b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.cuh @@ -0,0 +1,47 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include "ds_kernel_utils.h" +#include "ragged_dtypes.h" + +#ifdef BF16_AVAILABLE +#include +#endif + +template +void launch_kv_rotary_kernel(T* kv_cache, + T* q, + T* k, + T* v, + T* inv_freq, + const int32_t rotary_dim, + const float theta_base, + const BatchWrapperCPP batch_desc, + const int qkv_stride, + const int kv_cache_stride, + const int v_offset, + const int inv_freq_stride, + const int q_ratio, + const int head_size, + const int n_tokens, + const int n_q_heads, + cudaStream_t stream); + +template +void launch_kv_copy_kernel(T* kv_cache, + T* q, + T* k, + T* v, + const BatchWrapperCPP batch_desc, + const int qkv_stride, + const int kv_cache_stride, + const int v_offset, + const int q_ratio, + const int head_size, + const int n_tokens, + const int n_q_heads, + cudaStream_t stream); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.h new file mode 100644 index 0000000000000000000000000000000000000000..c0700eda714706e29692dc2f82613cf992a64cfd --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.h @@ -0,0 +1,65 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include "blocked_kv_rotary.cuh" + +/* +Rotary position embeddings + copy into KV cache. This implementation assumes +that the inverse frequencies should be ready from global memory rather than +synthesized in the kernel. + +Arguments: + kv_cache: [n_blocks, block_size, 2, n_kv_heads, head_size] + q: [n_tokens, n_q_heads * head_size] + k: [n_tokens, n_kv_heads * head_size] + v: [n_tokens, n_kv_heads * head_size] + inv_freq: [max_seq_len, head_size // 2] +*/ +void kv_trained_rotary_embeddings(torch::Tensor& kv_cache, + torch::Tensor& q, + torch::Tensor& k, + torch::Tensor& v, + torch::Tensor& inv_freq, + torch::Tensor& batch_metadata, + torch::Tensor& seq_metadata, + torch::Tensor& tokens_to_seq, + torch::Tensor& kv_ptrs); + +/* +Rotary position embeddings + copy into KV cache. This implementation assumes +that the inverse frequencies should be synthesized in the kernel. + +Arguments: + kv_cache: [n_blocks, block_size, 2, n_kv_heads, head_size] + q: [n_tokens, n_q_heads * head_size] + k: [n_tokens, n_kv_heads * head_size] + v: [n_tokens, n_kv_heads * head_size] +*/ +void kv_rotary_embeddings(torch::Tensor& kv_cache, + torch::Tensor& q, + torch::Tensor& k, + torch::Tensor& v, + const int32_t rotary_dim, + const float theta_base, + torch::Tensor& batch_metadata, + torch::Tensor& seq_metadata, + torch::Tensor& tokens_to_seq, + torch::Tensor& kv_ptrs); + +/* +Copy into linear KV cache. +*/ +void linear_kv_copy(torch::Tensor& kv_cache, + torch::Tensor& q, + torch::Tensor& k, + torch::Tensor& v, + torch::Tensor& batch_metadata, + torch::Tensor& seq_metadata, + torch::Tensor& tokens_to_seq, + torch::Tensor& kv_ptrs); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.py new file mode 100644 index 0000000000000000000000000000000000000000..7fe38d258e6c2bf7b181b48db27239609558df0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary.py @@ -0,0 +1,73 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ....inference_utils import DtypeEnum +from deepspeed.ops.op_builder import RaggedOpsBuilder +from ....ragged import RaggedBatchWrapper +from ... import DSKernelBase + + +class BlockedRotaryEmbeddings(DSKernelBase): + """ + CUDA Kernel implementation that will perform rotary position embeddings on the queries and keys + before copying into a blocked KV cache. + """ + + supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16] + supported_head_sizes = [64, 80, 128] + supported_q_ratios = [1, 2, 4, 5, 8, 16, 29, 35, 36, 71] + + def __init__(self, head_size: int, n_q_heads: int, n_kv_heads: int, dtype: torch.dtype, rotary_dim: int, + theta_base: float) -> None: + """ + Args: + head_size: The size of the attention head. + q_ratio: Ratio of q heads to kv heads (for GQA) + dtype: Data type for the input/output. Supported values are torch.float16 and torch.bfloat16. + """ + + q_ratio = n_q_heads // n_kv_heads + + if head_size not in BlockedRotaryEmbeddings.supported_head_sizes: + raise ValueError("Unsupported head size: {}, supported_head_sizes are {}".format( + head_size, BlockedRotaryEmbeddings.supported_head_sizes)) + + if q_ratio not in BlockedRotaryEmbeddings.supported_q_ratios: + raise ValueError("Unsupported q_ratio: {}, supported_q_ratios are {}".format( + q_ratio, BlockedRotaryEmbeddings.supported_q_ratios)) + + if not isinstance(dtype, DtypeEnum): + dtype = DtypeEnum(dtype) + + if dtype not in BlockedRotaryEmbeddings.supported_dtypes: + raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format( + dtype, BlockedRotaryEmbeddings.supported_dtypes)) + + inf_module = RaggedOpsBuilder().load() + self.kernel = inf_module.kv_rotary_embeddings + self.head_size = head_size + self.n_q_heads = n_q_heads + self.n_kv_heads = n_kv_heads + self.rotary_dim = rotary_dim + self.theta_base = theta_base + + def __call__(self, kv_cache: torch.Tensor, qkv: torch.Tensor, ragged_batch: RaggedBatchWrapper) -> None: + """ + Perform rotary embeddings on the queries and keys before copying into a blocked KV cache. + + Args: + kv_cache (torch.Tensor): Pre-allocated KV cache of [num_blocks, block_size, 2, n_kv_heads, head_size] + qkv: Input tensor of shape [num_tokens, head_size * (n_q_heads + 2 * n_kv_heads)] + ragged_batch: Wrapper for the ragged batch. + """ + + q = qkv[:, :self.head_size * self.n_q_heads] + k = qkv[:, self.head_size * self.n_q_heads:self.head_size * (self.n_q_heads + self.n_kv_heads)] + v = qkv[:, self.head_size * (self.n_q_heads + self.n_kv_heads):] + + self.kernel(kv_cache, q, k, v, self.rotary_dim, self.theta_base, ragged_batch.batch_metadata_buffer(), + ragged_batch.inflight_seq_descriptors(), ragged_batch.tokens_to_seq(), ragged_batch.kv_ptrs()) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary_cuda.cu b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..6f94838f55e9c6e2cfb5f0a45d12a362b4c4646d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_kv_rotary_cuda.cu @@ -0,0 +1,385 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include "blocked_kv_rotary.cuh" +#include "conversion_utils.h" +#include "ds_kernel_utils.h" +#include "memory_access_utils.h" + +namespace cg = cooperative_groups; + +namespace kv_rot { + +constexpr int granularity = 16; +constexpr int threads = 256; + +} // namespace kv_rot + +/* +Supports head size 32, 64, 128, 256 +*/ + +template +__global__ void kv_rotary_pos_kernel(T* kv_cache, + T* q, + T* k, + T* v, + const T* inv_freq, + const int32_t rotary_dim, + const float theta_base, + const BatchWrapperCPP batch_desc, + const int qkv_stride, + const int kv_cache_stride, + const int v_offset, + const int inv_freq_stride) +{ + // Derived constexpr + constexpr int vector_T = kv_rot::granularity / sizeof(T); + constexpr int real_threads_per_head = headSize / vector_T; + constexpr int threads_per_head = paddedHeadSize / vector_T; + + constexpr int tokens_per_block = kv_rot::threads / threads_per_head; + + // CG helpers + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + cg::thread_block_tile head_group = cg::tiled_partition(tb); + + // Parallelize on the head dimension for X blocks + const int head_idx = blockIdx.x; + + const int block_seq_idx = threadIdx.x / threads_per_head; + const int base_neuron_idx = head_group.thread_rank() * vector_T; + const int half_rotary_size = rotary_dim / 2; + const int half_dim_lanes = half_rotary_size / vector_T; + const int half_idx = base_neuron_idx % half_rotary_size; + + // Multiple tokens processed by the same threadblock + const int token_idx = blockIdx.y * tokens_per_block + block_seq_idx; + const bool valid_token = token_idx < batch_desc.batch_metadata->n_tokens; + + const bool valid_thread = valid_token && (head_group.thread_rank() < real_threads_per_head); + const bool load_inv_freq = (inv_freq != nullptr) && valid_thread; + + // If we have GQA, then only one of the Q heads needs to do rotary + copy + // for each of the heads in the group. + bool need_kv = head_idx % qRatio == 0; + // Make sure the following code is warp uniform + need_kv = warp.shfl(need_kv, 0); + + const int kv_head_idx = head_idx / qRatio; + + // Ensure we don't access invalid portions of the seq_metadata + const int32_t seq_id = (valid_thread) ? batch_desc.tokens_to_seq[token_idx] : 0; + const InflightSeqDescriptor seq_desc = batch_desc.seq_metadata[seq_id]; + // This will give an invalid index if valid_thread is false, but should never affect memory. + const int32_t global_token_idx = seq_desc.seen_tokens + (token_idx - seq_desc.start_idx); + + T* q_row = q + token_idx * qkv_stride + head_idx * headSize; + T q_reg[vector_T]; + + if (need_kv) { + // The following logic assumes a linearly blocked KV cache. This means that no sparsity has + // been introduced into cache history. + const KVCacheDescriptor kv_desc = batch_desc.kv_desc; + const int32_t seq_kv_block_idx = global_token_idx / kv_desc.block_size; + const int32_t mapped_kv_block_idx = + (valid_thread) ? kv_desc.block_lists[seq_id][seq_kv_block_idx] : 0; + + const int32_t kv_block_offset = global_token_idx % kv_desc.block_size; + const int32_t kv_offset = + (mapped_kv_block_idx * kv_desc.block_size + kv_block_offset) * kv_cache_stride + + kv_head_idx * headSize; + + // Load indices from QKV output + T* k_row = k + token_idx * qkv_stride + kv_head_idx * headSize; + T* v_row = v + token_idx * qkv_stride + kv_head_idx * headSize; + + T k_reg[vector_T], v_reg[vector_T], inv_freq_reg[vector_T]; + + mem_access::load_global(q_reg, q_row + base_neuron_idx, valid_thread); + mem_access::load_global(k_reg, k_row + base_neuron_idx, valid_thread); + mem_access::load_global(v_reg, v_row + base_neuron_idx, valid_thread); + mem_access::load_global( + inv_freq_reg, inv_freq + half_idx, load_inv_freq); + if constexpr (doRotary) { +#pragma unroll + for (int i = 0; i < vector_T; i++) { + const int head_neuron_idx = base_neuron_idx + i; + + float inv_freq_flt; + if (inv_freq != nullptr) { + inv_freq_flt = conversion::to(inv_freq_reg[i]) * (float)global_token_idx; + } else { + inv_freq_flt = + (float)((head_neuron_idx % half_rotary_size) * 2) / (float)rotary_dim; + // Conversion to T and back means that both branches of this if statement + // will produce the same results if using the same algo for producing the + // freqs. + T trunc_freq = conversion::to(1.0 / powf(theta_base, inv_freq_flt)); + inv_freq_flt = conversion::to(trunc_freq) * (float)global_token_idx; + } + + float rotary_sign = (head_neuron_idx >= half_rotary_size) ? -1.0f : 1.0f; + float q_f = conversion::to(q_reg[i]); + float k_f = conversion::to(k_reg[i]); + float q_rot = q_f * rotary_sign; + float k_rot = k_f * rotary_sign; + + const int target_lane = (head_neuron_idx < half_rotary_size) + ? head_group.thread_rank() + half_dim_lanes + : head_group.thread_rank() - half_dim_lanes; + + const float q_rot_temp = head_group.shfl(q_rot, target_lane); + const float k_rot_temp = head_group.shfl(k_rot, target_lane); + + if (base_neuron_idx < rotary_dim) { + q_reg[i] = conversion::to(q_f * cosf(inv_freq_flt) + + q_rot_temp * sinf(inv_freq_flt)); + k_reg[i] = conversion::to(k_f * cosf(inv_freq_flt) + + k_rot_temp * sinf(inv_freq_flt)); + } + } + } + + if (valid_thread) { + mem_access::store_global(kv_cache + kv_offset + base_neuron_idx, + k_reg); + mem_access::store_global( + kv_cache + kv_offset + base_neuron_idx + v_offset, v_reg); + } + } else { + T inv_freq_reg[vector_T]; + + mem_access::load_global(q_reg, q_row + base_neuron_idx, valid_thread); + mem_access::load_global( + inv_freq_reg, inv_freq + half_idx, load_inv_freq); + + if constexpr (doRotary) { +#pragma unroll + for (int i = 0; i < vector_T; i++) { + const int head_neuron_idx = base_neuron_idx + i; + + float inv_freq_flt; + if (inv_freq != nullptr) { + inv_freq_flt = conversion::to(inv_freq_reg[i]) * (float)global_token_idx; + } else { + inv_freq_flt = + (float)((head_neuron_idx % half_rotary_size) * 2) / (float)rotary_dim; + inv_freq_flt = 1.0 / powf(theta_base, inv_freq_flt) * (float)global_token_idx; + } + + float rotary_sign = (head_neuron_idx >= half_rotary_size) ? -1.0f : 1.0f; + float q_f = conversion::to(q_reg[i]); + float q_rot = q_f * rotary_sign; + + const int target_lane = (head_neuron_idx < half_rotary_size) + ? head_group.thread_rank() + half_dim_lanes + : head_group.thread_rank() - half_dim_lanes; + + const float q_rot_temp = head_group.shfl(q_rot, target_lane); + if (base_neuron_idx < rotary_dim) + q_reg[i] = conversion::to(q_f * cosf(inv_freq_flt) + + q_rot_temp * sinf(inv_freq_flt)); + } + } + } + + if (valid_thread && doRotary) { + mem_access::store_global(q_row + base_neuron_idx, q_reg); + } +} + +#define DISPATCH_KV_ROTARY_IMPL(Q_RATIO, HEAD_SIZE, PADDED_HEAD_SIZE) \ + if (q_ratio == Q_RATIO && head_size == HEAD_SIZE) \ + kv_rotary_pos_kernel \ + <<>>(kv_cache, \ + q, \ + k, \ + v, \ + inv_freq, \ + rotary_dim, \ + theta_base, \ + batch_desc, \ + qkv_stride, \ + kv_cache_stride, \ + v_offset, \ + inv_freq_stride); + +#define LAUNCH_KV_ROTARY_FOR_Q_RATIO_HEAD_SIZE(Q_RATIO, HEAD_SIZE) \ + if (padded_head_size == 64) { \ + DISPATCH_KV_ROTARY_IMPL(Q_RATIO, HEAD_SIZE, 64); \ + } else if (padded_head_size == 128) { \ + DISPATCH_KV_ROTARY_IMPL(Q_RATIO, HEAD_SIZE, 128); \ + } else { \ + assert(false); \ + } + +#define LAUNCH_KV_ROTARY_FOR_Q_RATIO(Q_RATIO) \ + if (head_size == 64) { \ + LAUNCH_KV_ROTARY_FOR_Q_RATIO_HEAD_SIZE(Q_RATIO, 64); \ + } else if (head_size == 80) { \ + LAUNCH_KV_ROTARY_FOR_Q_RATIO_HEAD_SIZE(Q_RATIO, 80); \ + } else if (head_size == 128) { \ + LAUNCH_KV_ROTARY_FOR_Q_RATIO_HEAD_SIZE(Q_RATIO, 128); \ + } else { \ + assert(false); \ + } + +template +void launch_kv_rotary_kernel(T* kv_cache, + T* q, + T* k, + T* v, + T* inv_freq, + const int32_t rotary_dim, + const float theta_base, + const BatchWrapperCPP batch_desc, + const int qkv_stride, + const int kv_cache_stride, + const int v_offset, + const int inv_freq_stride, + const int q_ratio, + const int head_size, + const int n_tokens, + const int n_q_heads, + cudaStream_t stream) +{ + constexpr int vector_T = kv_rot::granularity / sizeof(T); + + const int padded_head_size = next_pow2(head_size); + const int threads_per_head = padded_head_size / vector_T; + + const int tokens_per_block = kv_rot::threads / threads_per_head; + + const dim3 block(kv_rot::threads); + const int token_blocks = (n_tokens + tokens_per_block - 1) / tokens_per_block; + const dim3 grid(n_q_heads, token_blocks); + + LAUNCH_KV_ROTARY_FOR_Q_RATIO(1) + LAUNCH_KV_ROTARY_FOR_Q_RATIO(2) + LAUNCH_KV_ROTARY_FOR_Q_RATIO(4) + LAUNCH_KV_ROTARY_FOR_Q_RATIO(5) + LAUNCH_KV_ROTARY_FOR_Q_RATIO(8) + LAUNCH_KV_ROTARY_FOR_Q_RATIO(16) + LAUNCH_KV_ROTARY_FOR_Q_RATIO(29) + LAUNCH_KV_ROTARY_FOR_Q_RATIO(35) + LAUNCH_KV_ROTARY_FOR_Q_RATIO(36) + LAUNCH_KV_ROTARY_FOR_Q_RATIO(71) +} + +#define INSTANTIATE_KV_ROTARY_KERNEL(TYPE) \ + template void launch_kv_rotary_kernel(TYPE * kv_cache, \ + TYPE * q, \ + TYPE * k, \ + TYPE * v, \ + TYPE * inv_freq, \ + const int32_t rotary_dim, \ + const float theta_base, \ + const BatchWrapperCPP batch_desc, \ + const int qkv_stride, \ + const int kv_cache_stride, \ + const int v_offset, \ + const int inv_freq_stride, \ + const int q_ratio, \ + const int head_size, \ + const int n_tokens, \ + const int n_q_heads, \ + cudaStream_t stream); + +INSTANTIATE_KV_ROTARY_KERNEL(__half) + +#ifdef BF16_AVAILABLE +INSTANTIATE_KV_ROTARY_KERNEL(__nv_bfloat16) +#endif + +#define DISPATCH_KV_COPY_IMPL(Q_RATIO, HEAD_SIZE, PADDED_HEAD_SIZE) \ + if (q_ratio == Q_RATIO && head_size == HEAD_SIZE) \ + kv_rotary_pos_kernel \ + <<>>(kv_cache, \ + q, \ + k, \ + v, \ + nullptr, \ + -1, \ + 0.f, \ + batch_desc, \ + qkv_stride, \ + kv_cache_stride, \ + v_offset, \ + 0); + +#define LAUNCH_KV_COPY_FOR_Q_RATIO_HEAD_SIZE(Q_RATIO, HEAD_SIZE) \ + if (padded_head_size == 64) { \ + DISPATCH_KV_COPY_IMPL(Q_RATIO, HEAD_SIZE, 64); \ + } else if (padded_head_size == 128) { \ + DISPATCH_KV_COPY_IMPL(Q_RATIO, HEAD_SIZE, 128); \ + } else { \ + assert(false); \ + } + +#define LAUNCH_KV_COPY_FOR_Q_RATIO(Q_RATIO) \ + if (head_size == 64) { \ + LAUNCH_KV_COPY_FOR_Q_RATIO_HEAD_SIZE(Q_RATIO, 64); \ + } else if (head_size == 80) { \ + LAUNCH_KV_COPY_FOR_Q_RATIO_HEAD_SIZE(Q_RATIO, 80); \ + } else if (head_size == 128) { \ + LAUNCH_KV_COPY_FOR_Q_RATIO_HEAD_SIZE(Q_RATIO, 128); \ + } else { \ + assert(false); \ + } + +template +void launch_kv_copy_kernel(T* kv_cache, + T* q, + T* k, + T* v, + const BatchWrapperCPP batch_desc, + const int qkv_stride, + const int kv_cache_stride, + const int v_offset, + const int q_ratio, + const int head_size, + const int n_tokens, + const int n_q_heads, + cudaStream_t stream) +{ + constexpr int vector_T = kv_rot::granularity / sizeof(T); + const int padded_head_size = next_pow2(head_size); + const int threads_per_head = padded_head_size / vector_T; + const int tokens_per_block = kv_rot::threads / threads_per_head; + + const dim3 block(kv_rot::threads); + const int token_blocks = (n_tokens + tokens_per_block - 1) / tokens_per_block; + const dim3 grid(n_q_heads, token_blocks); + + LAUNCH_KV_COPY_FOR_Q_RATIO(1) + LAUNCH_KV_COPY_FOR_Q_RATIO(2) + LAUNCH_KV_COPY_FOR_Q_RATIO(4) + LAUNCH_KV_COPY_FOR_Q_RATIO(5) + LAUNCH_KV_COPY_FOR_Q_RATIO(8) +} + +#define INSTANTIATE_KV_COPY_KERNEL(TYPE) \ + template void launch_kv_copy_kernel(TYPE * kv_cache, \ + TYPE * q, \ + TYPE * k, \ + TYPE * v, \ + const BatchWrapperCPP batch_desc, \ + const int qkv_stride, \ + const int kv_cache_stride, \ + const int v_offset, \ + const int q_ratio, \ + const int head_size, \ + const int n_tokens, \ + const int n_q_heads, \ + cudaStream_t stream); + +INSTANTIATE_KV_COPY_KERNEL(__half) + +#ifdef BF16_AVAILABLE +INSTANTIATE_KV_COPY_KERNEL(__nv_bfloat16) +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_trained_kv_rotary.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_trained_kv_rotary.py new file mode 100644 index 0000000000000000000000000000000000000000..f8c5b2b138048817b00e21caba1949fe6d233f3d --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/blocked_trained_kv_rotary.py @@ -0,0 +1,76 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ....inference_utils import DtypeEnum +from deepspeed.ops.op_builder import RaggedOpsBuilder +from ....ragged import RaggedBatchWrapper +from ... import DSKernelBase + + +class BlockedTrainedRotaryEmbeddings(DSKernelBase): + """ + CUDA Kernel implementation that will perform rotary position embeddings on the queries and keys + before copying into a blocked KV cache. + """ + + supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16] + supported_head_sizes = [64, 80, 128] + supported_q_ratios = [1, 2, 4, 5, 8] + + def __init__(self, head_size: int, n_q_heads: int, n_kv_heads: int, dtype: torch.dtype) -> None: + """ + Args: + head_size: The size of the attention head. + dtype: Data type for the input/output. Supported values are torch.float16 and torch.bfloat16. + """ + + q_ratio = n_q_heads // n_kv_heads + + if head_size not in BlockedTrainedRotaryEmbeddings.supported_head_sizes: + raise ValueError("Unsupported head size: {}, supported_head_sizes are {}".format( + head_size, BlockedTrainedRotaryEmbeddings.supported_head_sizes)) + + if q_ratio not in BlockedTrainedRotaryEmbeddings.supported_q_ratios: + raise ValueError("Unsupported q_ratio: {}, supported_q_ratios are {}".format( + q_ratio, BlockedTrainedRotaryEmbeddings.supported_q_ratios)) + + if not isinstance(dtype, DtypeEnum): + dtype = DtypeEnum(dtype) + + if dtype not in BlockedTrainedRotaryEmbeddings.supported_dtypes: + raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format( + dtype, BlockedTrainedRotaryEmbeddings.supported_dtypes)) + + inf_module = RaggedOpsBuilder().load() + self.kernel = inf_module.kv_trained_rotary_embeddings + self.head_size = head_size + self.n_q_heads = n_q_heads + self.n_kv_heads = n_kv_heads + + def __call__(self, kv_cache: torch.Tensor, qkv: torch.Tensor, ragged_batch: RaggedBatchWrapper, + inverse_freqs: torch.Tensor) -> None: + """ + Perform rotary embeddings on the queries and keys before copying into a blocked KV cache. + + Args: + kv_cache (torch.Tensor): Pre-allocated KV cache of [num_blocks, block_size, 2, n_kv_heads, head_size] + qkv: Input tensor of shape [num_tokens, head_size * (n_q_heads + 2 * n_kv_heads)] + ragged_batch: Wrapper for the ragged batch. + inverse_freqs: Inverse frequencies for the rotary embeddings. Shape [max_seq_len, rotary_dim // 2] + """ + + q = qkv[:, :self.head_size * self.n_q_heads] + k = qkv[:, self.head_size * self.n_q_heads:self.head_size * (self.n_q_heads + self.n_kv_heads)] + v = qkv[:, self.head_size * (self.n_q_heads + self.n_kv_heads):] + + self.kernel(kv_cache, q, k, v, inverse_freqs, ragged_batch.batch_metadata_buffer(), + ragged_batch.inflight_seq_descriptors(), ragged_batch.tokens_to_seq(), ragged_batch.kv_ptrs()) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/linear_blocked_kv_copy.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/linear_blocked_kv_copy.py new file mode 100644 index 0000000000000000000000000000000000000000..a885eadd78a195df504a96adc12dc7b6a518c096 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/linear_blocked_kv_rotary/linear_blocked_kv_copy.py @@ -0,0 +1,74 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from ....inference_utils import DtypeEnum +from ....ragged import RaggedBatchWrapper +from deepspeed.ops.op_builder import RaggedOpsBuilder +from ... import DSKernelBase + + +class LinearBlockedKVCopy(DSKernelBase): + """ + CUDA Kernel implementation that will perform rotary position embeddings on the queries and keys + before copying into a blocked KV cache. + """ + + supported_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16] + supported_head_sizes = [64, 80, 128] + supported_q_ratios = [1, 2, 4, 5, 8] + + def __init__(self, head_size: int, n_q_heads: int, n_kv_heads: int, dtype: torch.dtype) -> None: + """ + Args: + head_size: The size of the attention head. + dtype: Data type for the input/output. Supported values are torch.float16 and torch.bfloat16. + """ + + q_ratio = n_q_heads // n_kv_heads + + if head_size not in LinearBlockedKVCopy.supported_head_sizes: + raise ValueError("Unsupported head size: {}, supported_head_sizes are {}".format( + head_size, LinearBlockedKVCopy.supported_head_sizes)) + + if q_ratio not in LinearBlockedKVCopy.supported_q_ratios: + raise ValueError("Unsupported q_ratio: {}, supported_q_ratios are {}".format( + q_ratio, LinearBlockedKVCopy.supported_q_ratios)) + + if not isinstance(dtype, DtypeEnum): + dtype = DtypeEnum(dtype) + + if dtype not in LinearBlockedKVCopy.supported_dtypes: + raise ValueError("Unsupported data type: {}, supported_dtypes are {}".format( + dtype, LinearBlockedKVCopy.supported_dtypes)) + + inf_module = RaggedOpsBuilder().load() + self.kernel = inf_module.linear_kv_copy + self.head_size = head_size + self.n_q_heads = n_q_heads + self.n_kv_heads = n_kv_heads + + def __call__(self, kv_cache: torch.Tensor, qkv: torch.Tensor, ragged_batch: RaggedBatchWrapper) -> None: + """ + Perform rotary embeddings on the queries and keys before copying into a blocked KV cache. + + Args: + kv_cache (torch.Tensor): Pre-allocated KV cache of [num_blocks, block_size, 2, n_kv_heads, head_size] + qkv: Input tensor of shape [num_tokens, head_size * (n_q_heads + 2 * n_kv_heads)] + ragged_batch: Wrapper for the ragged batch. + """ + + q = qkv[:, :self.head_size * self.n_q_heads] + k = qkv[:, self.head_size * self.n_q_heads:self.head_size * (self.n_q_heads + self.n_kv_heads)] + v = qkv[:, self.head_size * (self.n_q_heads + self.n_kv_heads):] + + self.kernel(kv_cache, q, k, v, ragged_batch.batch_metadata_buffer(), ragged_batch.inflight_seq_descriptors(), + ragged_batch.tokens_to_seq(), ragged_batch.kv_ptrs()) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/ragged_ops.cpp b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/ragged_ops.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f320f46e2620408f97ef5eb2c12855aaa68e51fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/ragged_ops.cpp @@ -0,0 +1,48 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include + +#include "atom_builder.h" +#include "blocked_flash.h" +#include "blocked_kv_rotary.h" +#include "embed.h" +#include "logits_gather.h" +#include "moe_gather.h" +#include "moe_scatter.h" +#include "top_k_gating.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + // atom_builder.h + m.def("build_atoms", &build_atoms, "Host kernel for building the atoms."); + + // blocked_flash.h + m.def("flash_attn_by_atoms", + &flash_attn_by_atoms, + "Blocked flash attention scheduled with atoms"); + + // blocked_kv_rotary.h + m.def("kv_rotary_embeddings", &kv_rotary_embeddings, "KV rotary embedding for blocked KV"); + m.def("kv_trained_rotary_embeddings", + &kv_trained_rotary_embeddings, + "KV rotary embeddings for blocked KV"); + m.def("linear_kv_copy", &linear_kv_copy, "Linear copy for blocked KV"); + + // embed.h + m.def("ragged_embed", &ragged_embed, "Embedding lookup for ragged batch"); + + // logits_gather.h + m.def("gather_for_logits", &gather_for_logits, "Sparse gather from ragged batch"); + + // moe_gather.h + m.def("moe_gather", &moe_gather, "MoE gather for top-1-gating."); + + // moe_scatter.h + m.def("moe_scatter", &moe_scatter, "MoE scatter for top-1-gating."); + + // top_k_gating.h + m.def("top_k_gating", &top_k_gating, "Top-1 gating for MoE with ragged batch awareness."); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..487735b015b0f724de0fc2d33a5db19fd0ae4655 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .top_k_gating import RaggedTopKGating diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f09240da0961908a2ddca6a9f6d7d5f2c49dce0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.cpp b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5eec7e2b955fa096fb3d44c4eace2b649cfee0ac --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.cpp @@ -0,0 +1,61 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "top_k_gating.h" +#include + +#define DISPATCH_TOP_K_GATING(T_TYPE, C_TYPE) \ + if (logits.options().dtype() == torch::T_TYPE) { \ + launch_top_k_gating((int32_t*)expert_counts.data_ptr(), \ + (float*)scores.data_ptr(), \ + (int32_t*)assignments.data_ptr(), \ + (int32_t*)offsets.data_ptr(), \ + (const C_TYPE*)logits.data_ptr(), \ + batch_metadata_ptr, \ + n_tokens, \ + n_experts, \ + n_top_k, \ + at::cuda::getCurrentCUDAStream()); \ + return; \ + } + +/* +Perform softmax plus atomics in order to do first pass of top_k_gating. +*/ +void top_k_gating(torch::Tensor& expert_counts, + torch::Tensor& scores, + torch::Tensor& assignments, + torch::Tensor& offsets, + torch::Tensor& logits, + torch::Tensor& batch_metadata) +{ + const int32_t n_tokens = scores.size(0); + const int32_t n_top_k = scores.size(1); + + // Should have the same buffer size for scores, offsets, and assignments + TORCH_CHECK(n_tokens == offsets.size(0)); + TORCH_CHECK(n_tokens == logits.size(0)); + TORCH_CHECK(n_tokens == assignments.size(0)); + + TORCH_CHECK(n_top_k == offsets.size(1)); + TORCH_CHECK(n_top_k == assignments.size(1)); + + TORCH_CHECK(expert_counts.scalar_type() == torch::kInt32); + TORCH_CHECK(scores.scalar_type() == torch::kFloat); + TORCH_CHECK(assignments.scalar_type() == torch::kInt32); + TORCH_CHECK(offsets.scalar_type() == torch::kInt32); + + const int32_t n_experts = logits.size(1); + const RaggedBatchDescriptor* batch_metadata_ptr = + reinterpret_cast(batch_metadata.data_ptr()); + + DISPATCH_TOP_K_GATING(kFloat, float) + DISPATCH_TOP_K_GATING(kHalf, __half) +#ifdef BF16_AVAILABLE + DISPATCH_TOP_K_GATING(kBFloat16, __nv_bfloat16) +#endif + + TORCH_CHECK(false, "Unsupported dtype for logits in top_k_gating"); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.cuh b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.cuh new file mode 100644 index 0000000000000000000000000000000000000000..c525cc5f524eac1dcc2c61979582b70f6d8f4c12 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.cuh @@ -0,0 +1,25 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include "ds_kernel_utils.h" +#include "ragged_dtypes.h" + +namespace gating { +constexpr int unassigned = -1; +} // namespace gating + +template +void launch_top_k_gating(int32_t* expert_counts, + float* scores, + int32_t* assignments, + int32_t* offsets, + const T* logits, + const RaggedBatchDescriptor* batch_metadata, + const int32_t n_tokens, + const int32_t n_experts, + const int32_t n_top_k, + cudaStream_t stream); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.h b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.h new file mode 100644 index 0000000000000000000000000000000000000000..00840c3c93b50ee0059ad5d17c6af241ad045cb6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.h @@ -0,0 +1,21 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include "ragged_dtypes.h" +#include "top_k_gating.cuh" + +/* +Perform softmax plus atomics to get token mapping. +*/ +void top_k_gating(torch::Tensor& expert_counts, + torch::Tensor& scores, + torch::Tensor& assignments, + torch::Tensor& offsets, + torch::Tensor& logits, + torch::Tensor& batch_metadata); diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.py new file mode 100644 index 0000000000000000000000000000000000000000..72ba2b6019bb9e9f1ce58bd7ae36671a07a5bda2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating.py @@ -0,0 +1,59 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from typing import Tuple + +from ... import DSKernelBase +from ....inference_utils import DtypeEnum +from ....ragged import RaggedBatchWrapper +from deepspeed.ops.op_builder import RaggedOpsBuilder + + +class RaggedTopKGating(DSKernelBase): + """ + CUDA implementation of top-1 gating. This will perform a softmax on the logits, + and return the scale as well as its idx within that expert's allocation. + """ + + supported_logit_dtypes = [DtypeEnum.fp16, DtypeEnum.bf16, DtypeEnum.fp32] + + def __init__(self, logit_dtype: DtypeEnum) -> None: + + if not isinstance(logit_dtype, DtypeEnum): + logit_dtype = DtypeEnum(logit_dtype) + + if logit_dtype not in RaggedTopKGating.supported_logit_dtypes: + raise RuntimeError(f"Unsupported logit dtype {logit_dtype}") + + inf_module = RaggedOpsBuilder().load() + self.kernel = inf_module.top_k_gating + + def __call__(self, expert_counts: torch.Tensor, scores: torch.Tensor, assignments: torch.Tensor, + offsets: torch.Tensor, logits: torch.Tensor, + batch: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """ + Perform the ragged top_k_gating. + + Arguments: + expert_counts (torch.Tensor): Tensor of 0s of shape [n_experts] to be filled with + number of tokens assigned to each expert. This must be filled with 0s else + the copy kernel will buffer overflow. In order to minimize the zero-fill cost, + it is recommended to write to 0 during the MoE output remapping. + scores (torch.Tensor): Preallocated output of shape [n_tokens, n_top_k] to place expert scaling + value. + expert_assignment (torch.Tensor): Preallocated output of shape [n_tokens, n_top_k] to place + which expert a token has been assigned to. + expert_offset (torch.Tensor): Preallocated output of shape [n_tokens, n_top_k] to place which + offset within an experts group a token is. + logits (torch.Tensor): Raw logits of gating function. + batch (RaggedBatchWrapper): Batch information for ragged tensor. + + Returns: + tuple of (expert_counts, scores, expert_assignment, expert_offset) + """ + self.kernel(expert_counts, scores, assignments, offsets, logits, batch.batch_metadata_buffer()) + return expert_counts, scores, assignments, offsets diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating_cuda.cu b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..58f95c0455933f83b5f913973f89d25c77ae7570 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/kernels/ragged_ops/top_k_gating/top_k_gating_cuda.cu @@ -0,0 +1,125 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "conversion_utils.h" +#include "memory_access_utils.h" +#include "reduction_utils.h" +#include "top_k_gating.cuh" +#include "top_k_utils.h" + +using ROp = reduce::ROpType; + +template +__global__ void top_k_gating_kernel(int32_t* expert_counts, + float* scores, + int32_t* assignments, + int32_t* offsets, + const T* logits, + const RaggedBatchDescriptor* batch_metadata, + const int32_t n_experts) +{ + const int32_t token_idx = blockIdx.x; + const int32_t expert_idx = threadIdx.x; + const int32_t max_warps = 1024 / hw_warp_size; + + // CG helpers + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + // Padding tokens do not require + if (token_idx >= batch_metadata->n_tokens) { + if (threadIdx.x == 0) { +#pragma unroll + for (int i = 0; i < TOP_K; i++) { + assignments[token_idx * TOP_K + i] = gating::unassigned; + offsets[token_idx * TOP_K + i] = gating::unassigned; + } + } + return; + } + + const T* token_logits = logits + token_idx * n_experts; + + float logit_val; + if (expert_idx < n_experts) { + logit_val = conversion::to(token_logits[expert_idx]); + } else { + reduce::init(&logit_val); + } + float reduce_val = logit_val; + + int32_t local_assigned_experts[TOP_K]; + float local_assigned_logits[TOP_K]; + + // Training code tends to use ``torch.argmax`` to select the expert, which + // which has ties broken by the lower index. Since our fused comparison algorithm + // breaks ties by the higher index (since it's the lower 32-bits of the 64-bit + // comparison), we invert the expert index to break ties by the lower index. + int32_t inverted_expert = n_experts - expert_idx - 1; + + // Find the top k logits + for (int i = 0; i < TOP_K; ++i) { + const reduce::IdxReduceResult res = + reduce::idx_reduce(tb, warp, reduce_val, inverted_expert); + local_assigned_experts[i] = n_experts - res.idx - 1; + local_assigned_logits[i] = res.val; + + // Set the max logit to -inf so that it is not selected again + if (threadIdx.x == n_experts - res.idx - 1) { reduce::init(&reduce_val); } + } + + const float max_logit = local_assigned_logits[0]; + float softmax_sum = __expf(logit_val - max_logit); + reduce::block(tb, warp, softmax_sum); + + for (int i = 0; i < TOP_K; ++i) { + const float softmax = __expf(local_assigned_logits[i] - max_logit) / softmax_sum; + + if (threadIdx.x == 0) { + scores[token_idx * TOP_K + i] = softmax; + assignments[token_idx * TOP_K + i] = local_assigned_experts[i]; + offsets[token_idx * TOP_K + i] = + atomicAdd(expert_counts + local_assigned_experts[i], 1); + } + } +} + +template +void launch_top_k_gating(int32_t* expert_counts, + float* scores, + int32_t* assignments, + int32_t* offsets, + const T* logits, + const RaggedBatchDescriptor* batch_metadata, + const int32_t n_tokens, + const int32_t n_experts, + const int32_t n_top_k, + cudaStream_t stream) +{ + const dim3 grid(n_tokens); + const dim3 block(((n_experts + hw_warp_size - 1) / hw_warp_size) * hw_warp_size); + + TOP_K_SWITCH(n_top_k, [&] { + top_k_gating_kernel<<>>( + expert_counts, scores, assignments, offsets, logits, batch_metadata, n_experts); + }); +} + +#define INSTANTIATE_top_k_KERNEL(T) \ + template void launch_top_k_gating(int32_t * expert_counts, \ + float* scores, \ + int32_t* assignments, \ + int32_t* offsets, \ + const T* logits, \ + const RaggedBatchDescriptor* batch_metadata, \ + const int32_t n_tokens, \ + const int32_t n_experts, \ + const int32_t n_top_k, \ + cudaStream_t stream); + +INSTANTIATE_top_k_KERNEL(float) INSTANTIATE_top_k_KERNEL(__half) +#ifdef BF16_AVAILABLE + INSTANTIATE_top_k_KERNEL(__nv_bfloat16) +#endif diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..79605a76a4c28151336040e33e5a87f6ab7ec64b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .policy import Llama2Policy diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/container.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/container.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..423081b5f5dc3b33e47e7a24a67f67a039436f44 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/__pycache__/container.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/container.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/container.py new file mode 100644 index 0000000000000000000000000000000000000000..9de9bdb345743c413841424b3d5ed8755152832a --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/container.py @@ -0,0 +1,78 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# Create a container object to save model-specific tensors using the policy file above. + +from ..common_parameters import * +from ..layer_container_base import LayerContainer +''' + # HF Llama model looks like this: + +LlamaForCausalLM( + (model): LlamaModel( + (embed_tokens): Embedding(32000, 4096, padding_idx=0) + (layers): ModuleList( + (0-31): 32 x LlamaDecoderLayer( + (self_attn): LlamaAttention( + (q_proj): Linear(in_features=4096, out_features=4096, bias=False) + (k_proj): Linear(in_features=4096, out_features=4096, bias=False) + (v_proj): Linear(in_features=4096, out_features=4096, bias=False) + (o_proj): Linear(in_features=4096, out_features=4096, bias=False) + (rotary_emb): LlamaRotaryEmbedding() + ) + (mlp): LlamaMLP( + (gate_proj): Linear(in_features=4096, out_features=11008, bias=False) + (up_proj): Linear(in_features=4096, out_features=11008, bias=False) + (down_proj): Linear(in_features=11008, out_features=4096, bias=False) + (act_fn): SiLUActivation() + ) + (input_layernorm): LlamaRMSNorm() + (post_attention_layernorm): LlamaRMSNorm() + ) + ) + (norm): LlamaRMSNorm() + ) + (lm_head): Linear(in_features=4096, out_features=32000, bias=False) +) +''' + + +class Llama2TransformerContainer(LayerContainer): + """ + Transformer layer container for the Llama-2 model. + """ + qkv_w: UnfusedQKVParameter + attn_out_w: AttentionOutputParameter + mlp_1_w: GatedMLPParameter + mlp_2_w: MLP2Parameter + attn_norm_gamma: NormParameter + mlp_norm_gamma: NormParameter + + PARAM_MAPPING = { + "self_attn.q_proj.weight": "qkv_w.q_params", + "self_attn.k_proj.weight": "qkv_w.k_params", + "self_attn.v_proj.weight": "qkv_w.v_params", + "self_attn.o_proj.weight": "attn_out_w.params", + "mlp.gate_proj.weight": "mlp_1_w.gate_params", + "mlp.up_proj.weight": "mlp_1_w.up_params", + "mlp.down_proj.weight": "mlp_2_w.params", + "input_layernorm.weight": "attn_norm_gamma.params", + "post_attention_layernorm.weight": "mlp_norm_gamma.params", + } + + +class Llama2NonTransformerContainer(LayerContainer): + """ + Non-Transformer layer container for the Llama-2 model. + """ + word_emb: EmbeddingParameter + word_unembed: UnembedParameter + final_norm: NormParameter + + PARAM_MAPPING = { + "model.embed_tokens.weight": "word_emb.params", + "model.norm.weight": "final_norm.params", + "lm_head.weight": "word_unembed.params", + } diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/model.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/model.py new file mode 100644 index 0000000000000000000000000000000000000000..a0c81f4d749e13709e44fd2a10a6912d54f04310 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/model.py @@ -0,0 +1,209 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Iterable, Optional, Tuple + +import torch + +import deepspeed.comm as dist + +from ...allocator import empty_from +from ...inference_utils import ActivationType, DtypeEnum +from .. import * +from ...modules.configs import * +from ...modules.interfaces import * +from ...ragged import RaggedBatchWrapper + +from .container import Llama2NonTransformerContainer, Llama2TransformerContainer + + +class Llama2InferenceModel(DSTransformerModelBase): + """ + Inference model implementation for ragged batching for Llama-2 models. + """ + + _non_transformer: Optional[Llama2NonTransformerContainer] + """ + Embed + unembed container. Specializing the type annotation. + """ + + _transformer: Optional[Iterable[Llama2TransformerContainer]] + """ + Per-layer transformer container. Specializing the type annotation. + """ + """ + Properties ineherited from `DSInferenceModelBase` + """ + + @property + def max_sequence_length(self) -> int: + return self._config.max_seq_length + + """ + Properties ineherited from `DSTransformerModelBase` + """ + + @property + def num_layers(self) -> int: + return self._config.num_hidden_layers + + @property + def model_dim(self) -> int: + return self._config.hidden_size + + @property + def vocab_size(self) -> int: + return self._config.vocab_size + + @property + def head_size(self) -> int: + return self.model_dim // self.n_heads + + @property + def n_heads(self) -> int: + return self._config.num_attention_heads + + @property + def intermediate_dim(self) -> int: + return self._config.intermediate_size + + @property + def n_heads_kv(self) -> int: + return self._config.num_key_value_heads + + @property + def activation_dtype(self) -> DtypeEnum: + if self._config.torch_dtype == torch.float16: + return DtypeEnum.fp16 + elif self._config.torch_dtype == torch.bfloat16: + return DtypeEnum.bf16 + else: + raise NotImplementedError("Only fp16 and bf16 are supported") + + @property + def mlp_activation_fn(self) -> ActivationType: + activation = self._config.hidden_act.lower() + # llama model family is special and is always gated so force gated versions of relu, gelu, silu + if activation == "gelu": + return ActivationType.GEGLU + elif activation == "relu": + return ActivationType.ReGLU + elif activation == "gegelu": + return ActivationType.GEGLU + elif activation == "silu": + return ActivationType.SiGLU + else: + raise NotImplementedError(f"Activation {activation} not supported") + + @property + def norm_type(self) -> NormTypeEnum: + return NormTypeEnum.RMSNorm + + @property + def positional_embedding_type(self) -> PositionalEmbeddingType: + return PositionalEmbeddingType.rotate_half + + @property + def positional_embedding_config(self) -> Optional[RotateHalfConfig]: + return RotateHalfConfig(theta_base=self._config.rope_theta) + + """ + Forward implementations + """ + + def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor: + """ + Performs the embedding lookup prior to running the transformer of the model. + + Arguments: + ragged_batch (RaggedBatchWrapper): The batch to embed. + + Returns: + torch.Tensor: The embedded batch. + """ + embed = self.embed(ragged_batch, self._non_transformer.word_emb) + + if embed.shape[-1] != self.model_dim: + raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}") + + return embed + + def _forward_transformer_layer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor, + ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Executes one (slightly offset) layer of the transformer. This implementation does a peak-ahead + optimization to fuse the layer norm of the next layer into the current layer. + + Arguments: + layer_idx (int): The index of the layer to execute. + residual (torch.Tensor): The residual tensor from the previous layer. + hidden_states (torch.Tensor): The hidden states from the previous layer. This is the + hidden states after pre normalization. + ragged_batch_info (RaggedBatchWrapper): The batch metadata. + """ + # TODO(cmikeh2): Distribute ragged_batch_info to all modules + + cur_params = self._transformer[layer_idx] + kv_cache = self.state_manager.get_cache(layer_idx) + + hidden_states = self.qkv(hidden_states, cur_params.qkv_w, b=None) + hidden_states = self.attn(hidden_states, kv_cache, ragged_batch_info) + hidden_states = self.attn_out(hidden_states, cur_params.attn_out_w, b=None) + + if self.tp_size > 1: + dist.all_reduce(hidden_states, group=self._base_mp_group) + + residual, hidden_states = self.norm(residual, hidden_states, cur_params.mlp_norm_gamma, beta=None) + + # Should be configurable in the future + hidden_states = self.mlp_1(hidden_states, cur_params.mlp_1_w, b=None) + hidden_states = self.mlp_2(hidden_states, cur_params.mlp_2_w, b=None) + + if self.tp_size > 1: + dist.all_reduce(hidden_states, group=self._base_mp_group) + + if layer_idx != self.num_layers - 1: + next_params = self._transformer[layer_idx + 1] + residual, hidden_states = self.norm(residual, hidden_states, next_params.attn_norm_gamma, beta=None) + else: + # On last layer, we just need to perform the residual add. Adding into the residual + # here is safe. + residual.add_(hidden_states) + + return residual, hidden_states + + def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor: + """ + Performs unembedding of the hidden states to logits. This will only sample the final + token of each sequence. + """ + logits = self.unembed(hidden_states, + self._non_transformer.word_unembed, + ragged_batch_info, + gamma=self._non_transformer.final_norm) + + if self.tp_size > 1: + comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1])) + full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size)) + + dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group) + + full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size)) + + return full_logits + else: + return logits + + def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor: + + residual = self._forward_embed(wrapped_batch) + + residual, hidden_states = self.norm(residual, None, self._transformer[0].attn_norm_gamma, beta=None) + + for layer_idx in range(self.num_layers): + residual, hidden_states = self._forward_transformer_layer(layer_idx, residual, hidden_states, + wrapped_batch) + + return self._forward_unembed(residual, wrapped_batch) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/policy.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/policy.py new file mode 100644 index 0000000000000000000000000000000000000000..bb13ab6d5bf46fc5f834f69a97d08ca008f7e9c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/llama_v2/policy.py @@ -0,0 +1,31 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Any + +from ...config_v2 import RaggedInferenceEngineConfig +from ..inference_policy_base import ContainerMap, InferenceV2Policy +from .container import Llama2NonTransformerContainer, Llama2TransformerContainer +from .model import Llama2InferenceModel + + +class Llama2Policy(InferenceV2Policy): + + def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> Llama2InferenceModel: + return Llama2InferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group) + + def build_container_map(self) -> ContainerMap: + map = ContainerMap() + + transformer_containers = [Llama2TransformerContainer(self.model) for _ in range(self.model.num_layers)] + + map.set_transformer_params(['model.layers'], transformer_containers) + + map.set_non_transformer_params(Llama2NonTransformerContainer(self.model)) + + map.set_unmapped_params( + [f'model.layers.{i}.self_attn.rotary_emb.inv_freq' for i in range(self.model.num_layers)]) + + return map diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..60d636693ef3ad9d82af98310ffb7206ec23a781 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__init__.py @@ -0,0 +1,6 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .policy import MistralPolicy diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e5f38e3cdf081403a1b5dce2615e0a17bf331bf Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/container.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/container.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9ef11f9702cdcce9e138f71a0ae61dc13d904156 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/container.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/model.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9df29fa1281be2bf5e08915966ca005cafb7855b Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/policy.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/policy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27734deee032c88366c15f26f36fc254cd01a4f4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/__pycache__/policy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/container.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/container.py new file mode 100644 index 0000000000000000000000000000000000000000..b4c0956f4049bd1515eb9d95cb865468eb6740b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/container.py @@ -0,0 +1,77 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# Create a container object to save model-specific tensors using the policy file above. + +from deepspeed.inference.v2.model_implementations.common_parameters import * +from deepspeed.inference.v2.model_implementations.layer_container_base import LayerContainer +''' + # HF Mistral model (mistralai/Mistral-7B-v0.1) looks like this: +MistralForCausalLM( + (model): MistralModel( + (embed_tokens): Embedding(32000, 4096) + (layers): ModuleList( + (0-31): 32 x MistralDecoderLayer( + (self_attn): MistralAttention( + (q_proj): Linear(in_features=4096, out_features=4096, bias=False) + (k_proj): Linear(in_features=4096, out_features=1024, bias=False) + (v_proj): Linear(in_features=4096, out_features=1024, bias=False) + (o_proj): Linear(in_features=4096, out_features=4096, bias=False) + (rotary_emb): MistralRotaryEmbedding() + ) + (mlp): MistralMLP( + (gate_proj): Linear(in_features=4096, out_features=14336, bias=False) + (up_proj): Linear(in_features=4096, out_features=14336, bias=False) + (down_proj): Linear(in_features=14336, out_features=4096, bias=False) + (act_fn): SiLUActivation() + ) + (input_layernorm): MistralRMSNorm() + (post_attention_layernorm): MistralRMSNorm() + ) + ) + (norm): MistralRMSNorm() + ) + (lm_head): Linear(in_features=4096, out_features=32000, bias=False) +) +''' + + +class MistralTransformerContainer(LayerContainer): + """ + Transformer layer container for the Mistral model. + """ + qkv_w: UnfusedQKVParameter + attn_out_w: AttentionOutputParameter + mlp_1_w: GatedMLPParameter + mlp_2_w: MLP2Parameter + attn_norm_gamma: NormParameter + mlp_norm_gamma: NormParameter + + PARAM_MAPPING = { + "self_attn.q_proj.weight": "qkv_w.q_params", + "self_attn.k_proj.weight": "qkv_w.k_params", + "self_attn.v_proj.weight": "qkv_w.v_params", + "self_attn.o_proj.weight": "attn_out_w.params", + "mlp.gate_proj.weight": "mlp_1_w.gate_params", + "mlp.up_proj.weight": "mlp_1_w.up_params", + "mlp.down_proj.weight": "mlp_2_w.params", + "input_layernorm.weight": "attn_norm_gamma.params", + "post_attention_layernorm.weight": "mlp_norm_gamma.params", + } + + +class MistralNonTransformerContainer(LayerContainer): + """ + Non-Transformer layer container for the Mistral model. + """ + word_emb: EmbeddingParameter + word_unembed: UnembedParameter + final_norm: NormParameter + + PARAM_MAPPING = { + "model.embed_tokens.weight": "word_emb.params", + "model.norm.weight": "final_norm.params", + "lm_head.weight": "word_unembed.params", + } diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/model.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/model.py new file mode 100644 index 0000000000000000000000000000000000000000..318d362f1a64acc117a3e266d88afdd0b336ba56 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/model.py @@ -0,0 +1,207 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Iterable, Optional, Tuple + +import torch + +import deepspeed.comm as dist + +from ...allocator import empty_from +from ...inference_utils import ActivationType, DtypeEnum +from ...model_implementations import * +from ...modules.configs import * +from ...modules.interfaces import * +from ...ragged import RaggedBatchWrapper + +from .container import MistralNonTransformerContainer, MistralTransformerContainer + + +class MistralInferenceModel(DSTransformerModelBase): + """ + Inference model implementation for ragged batching for Mistral models. + """ + + _non_transformer: Optional[MistralNonTransformerContainer] + """ + Embed + unembed container. Specializing the type annotation. + """ + + _transformer: Optional[Iterable[MistralTransformerContainer]] + """ + Per-layer transformer container. Specializing the type annotation. + """ + """ + Properties ineherited from `DSInferenceModelBase` + """ + + @property + def max_sequence_length(self) -> int: + return self._config.max_seq_length + + """ + Properties ineherited from `DSTransformerModelBase` + """ + + @property + def num_layers(self) -> int: + return self._config.num_hidden_layers + + @property + def model_dim(self) -> int: + return self._config.hidden_size + + @property + def vocab_size(self) -> int: + return self._config.vocab_size + + @property + def head_size(self) -> int: + return self.model_dim // self.n_heads + + @property + def n_heads(self) -> int: + return self._config.num_attention_heads + + @property + def intermediate_dim(self) -> int: + return self._config.intermediate_size + + @property + def n_heads_kv(self) -> int: + return self._config.num_key_value_heads + + @property + def activation_dtype(self) -> DtypeEnum: + if self._config.torch_dtype == torch.float16: + return DtypeEnum.fp16 + elif self._config.torch_dtype == torch.bfloat16: + return DtypeEnum.bf16 + else: + raise NotImplementedError("Only fp16 and bf16 are supported") + + @property + def mlp_activation_fn(self) -> ActivationType: + activation = self._config.hidden_act.lower() + if activation == "gelu": + return ActivationType.GEGLU + elif activation == "relu": + return ActivationType.ReGLU + elif activation == "gegelu": + return ActivationType.GEGLU + elif activation == "silu": + return ActivationType.SiGLU + else: + raise NotImplementedError(f"Activation {activation} not supported") + + @property + def norm_type(self) -> NormTypeEnum: + return NormTypeEnum.RMSNorm + + @property + def positional_embedding_type(self) -> PositionalEmbeddingType: + return PositionalEmbeddingType.rotate_half + + @property + def positional_embedding_config(self) -> Optional[RotateHalfConfig]: + return RotateHalfConfig(theta_base=self._config.rope_theta) + + """ + Forward implementations + """ + + def _forward_embed(self, ragged_batch: RaggedBatchWrapper) -> torch.Tensor: + """ + Performs the embedding lookup prior to running the transformer of the model. + + Arguments: + ragged_batch (RaggedBatchWrapper): The batch to embed. + + Returns: + torch.Tensor: The embedded batch. + """ + embed = self.embed(ragged_batch, self._non_transformer.word_emb) + + if embed.shape[-1] != self.model_dim: + raise ValueError(f"Embedding output shape {embed.shape} does not match model_dim {self.model_dim}") + + return embed + + def _forward_transformer(self, layer_idx: int, residual: torch.Tensor, hidden_states: torch.Tensor, + ragged_batch_info: RaggedBatchWrapper) -> Tuple[torch.Tensor, torch.Tensor]: + """ + Executes one (slightly offset) layer of the transformer. This implementation does a peak-ahead + optimization to fuse the layer norm of the next layer into the current layer. + + Arguments: + layer_idx (int): The index of the layer to execute. + residual (torch.Tensor): The residual tensor from the previous layer. + hidden_states (torch.Tensor): The hidden states from the previous layer. This is the + hidden states after pre normalization. + ragged_batch_info (RaggedBatchWrapper): The batch metadata. + """ + # TODO(cmikeh2): Distribute ragged_batch_info to all modules + + cur_params = self._transformer[layer_idx] + kv_cache = self.state_manager.get_cache(layer_idx) + + hidden_states = self.qkv(hidden_states, cur_params.qkv_w, b=None) + hidden_states = self.attn(hidden_states, kv_cache, ragged_batch_info) + hidden_states = self.attn_out(hidden_states, cur_params.attn_out_w, b=None) + + if self.tp_size > 1: + dist.all_reduce(hidden_states, group=self._base_mp_group) + + residual, hidden_states = self.norm(residual, hidden_states, cur_params.mlp_norm_gamma, beta=None) + + # Should be configurable in the future + hidden_states = self.mlp_1(hidden_states, cur_params.mlp_1_w, b=None) + hidden_states = self.mlp_2(hidden_states, cur_params.mlp_2_w, b=None) + + if self.tp_size > 1: + dist.all_reduce(hidden_states, group=self._base_mp_group) + + if layer_idx != self.num_layers - 1: + next_params = self._transformer[layer_idx + 1] + residual, hidden_states = self.norm(residual, hidden_states, next_params.attn_norm_gamma, beta=None) + else: + # On last layer, we just need to perform the residual add. Adding into the residual + # here is safe. + residual.add_(hidden_states) + + return residual, hidden_states + + def _forward_unembed(self, hidden_states: torch.Tensor, ragged_batch_info: RaggedBatchWrapper) -> torch.Tensor: + """ + Performs unembedding of the hidden states to logits. This will only sample the final + token of each sequence. + """ + logits = self.unembed(hidden_states, + self._non_transformer.word_unembed, + ragged_batch_info, + gamma=self._non_transformer.final_norm) + + if self.tp_size > 1: + comm_buffer = empty_from(self._comm_logits, (self.tp_size, logits.shape[0], logits.shape[1])) + full_logits = empty_from(self._return_logits, (logits.shape[0], self.vocab_size)) + + dist.all_gather_into_tensor(comm_buffer, logits, group=self._base_mp_group) + + full_logits.copy_(comm_buffer.permute(1, 0, 2).reshape(logits.shape[0], self.vocab_size)) + + return full_logits + else: + return logits + + def forward(self, wrapped_batch: RaggedBatchWrapper) -> torch.Tensor: + + residual = self._forward_embed(wrapped_batch) + + residual, hidden_states = self.norm(residual, None, self._transformer[0].attn_norm_gamma, beta=None) + + for layer_idx in range(self.num_layers): + residual, hidden_states = self._forward_transformer(layer_idx, residual, hidden_states, wrapped_batch) + + return self._forward_unembed(residual, wrapped_batch) diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/policy.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/policy.py new file mode 100644 index 0000000000000000000000000000000000000000..b67ec311c952df8f1de538952f4d922cbba6b2ba --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/mistral/policy.py @@ -0,0 +1,30 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from typing import Any + +from ...config_v2 import RaggedInferenceEngineConfig +from ..inference_policy_base import ContainerMap, InferenceV2Policy +from .container import MistralNonTransformerContainer, MistralTransformerContainer +from .model import MistralInferenceModel + + +class MistralPolicy(InferenceV2Policy): + + def instantiate_model(self, engine_config: RaggedInferenceEngineConfig, mp_group: Any) -> MistralInferenceModel: + return MistralInferenceModel(config=self._model_config, engine_config=engine_config, base_mp_group=mp_group) + + def build_container_map(self) -> ContainerMap: + map = ContainerMap() + + transformer_containers = [MistralTransformerContainer(self.model) for _ in range(self.model.num_layers)] + + map.set_transformer_params(['model.layers'], transformer_containers) + + map.set_non_transformer_params(MistralNonTransformerContainer(self.model)) + + map.set_unmapped_params([]) + + return map diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..42875b7e9cfea8dd3d4e66d5e66636bd3076f8e5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/container.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/container.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd33639999765f87eae1c7fc60216804b6a320b0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/container.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/model.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d9706aaf9862085221e343dfd8b4599e71ca6ab Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/model.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/policy.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/policy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..962acb9535bbf0ec4a4626ad1a0035b639d3ff2f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/opt/__pycache__/policy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0230713d277c154542e56b687c9bff12d102e9f Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/attn.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/attn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df5765e48c1665412cb3ef992f0dd1ae6bc1fe53 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/attn.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/attn_out.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/attn_out.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..727474d8835656bcd523b1f83def070ceb64aa82 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/attn_out.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/embedding.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/embedding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d73ea5cebce9c833034d54430414ae17bdc60d82 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/embedding.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/mlp.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/mlp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57c07884a633d98a914a903d048a2ee10df1efe0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/mlp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/qkv.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/qkv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6a2fc549d3727ff93f888208ede450f9809b56c Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/qkv.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/types.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/types.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..512c039ce266c08f57ab1852fd32294e5c5595b0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/types.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/unembed.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/unembed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..811f02276c490354a20d000d225a8f61bad75e1e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/unembed.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/utils.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97c61c33fd791add8b3860bf358596d2c7caf7b1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/__pycache__/utils.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/embedding.py b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/embedding.py new file mode 100644 index 0000000000000000000000000000000000000000..00d335768ae69b1d897dac92c4c222fdedd1e609 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/inference/v2/model_implementations/sharding/embedding.py @@ -0,0 +1,34 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + +from .types import ShardingType +from .utils import shard_param, get_shard_endpoints + + +def shard_embedding_param(param: torch.Tensor, shard_rank: int, num_shards: int) -> torch.Tensor: + """ + Utility method for sharding an embedding parameter. + + Args: + param (torch.Tensor): The parameter to shard. Should be of shape [vocab_size, model_dim] + shard_rank (int): Which shard of the partitioned tensor to return. + num_shards (int): The total number of shards the parameter is distributed across. + """ + return shard_param(param, ShardingType.INNER_DIMENSION, shard_rank, num_shards) + + +def sharded_embedding_dim(embedding_size: int, shard_rank: int, num_shards: int) -> int: + """ + Utility method for getting the size of the embedding dimension of a sharded embedding. + + Args: + embedding_size (int): The size of the embedding. + shard_rank (int): Which shard of the partitioned tensor to return. + num_shards (int): The total number of shards the parameter is distributed across. + """ + start_idx, end_idx = get_shard_endpoints(embedding_size, shard_rank, num_shards) + return end_idx - start_idx diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/attention.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/attention.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ac3364539ff1e47bceffcf79298f2bbc91845ac8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/attention.cpp @@ -0,0 +1,62 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include + +void attention_impl(torch::Tensor& q, + torch::Tensor& k, + torch::Tensor& v, + torch::Tensor& bias1, + torch::Tensor& bias2, + torch::Tensor& o, + torch::Tensor& lse); +void attention(torch::Tensor& q, + torch::Tensor& k, + torch::Tensor& v, + torch::Tensor& bias1, + torch::Tensor& bias2, + torch::Tensor& o, + torch::Tensor& lse) +{ + attention_impl(q, k, v, bias1, bias2, o, lse); +} + +void attention_back_impl(torch::Tensor& go, + torch::Tensor& q, + torch::Tensor& k, + torch::Tensor& v, + torch::Tensor& o, + torch::Tensor& lse, + torch::Tensor& delta, + torch::Tensor& bias1, + torch::Tensor& bias2, + torch::Tensor& gq, + torch::Tensor& gk, + torch::Tensor& gv, + torch::Tensor& gb1, + torch::Tensor& gb2); +void attention_bwd(torch::Tensor& go, + torch::Tensor& q, + torch::Tensor& k, + torch::Tensor& v, + torch::Tensor& o, + torch::Tensor& lse, + torch::Tensor& delta, + torch::Tensor& bias1, + torch::Tensor& bias2, + torch::Tensor& gq, + torch::Tensor& gk, + torch::Tensor& gv, + torch::Tensor& gb1, + torch::Tensor& gb2) +{ + attention_back_impl(go, q, k, v, o, lse, delta, bias1, bias2, gq, gk, gv, gb1, gb2); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("attention", &attention, ""); + m.def("attention_bwd", &attention_bwd, ""); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/attention_back.cu b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/attention_back.cu new file mode 100644 index 0000000000000000000000000000000000000000..a82c4ec68a13fccfe63c684a481afeebfe378031 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/attention_back.cu @@ -0,0 +1,218 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include +#include +#include "gemm_kernel_utils.h" +#include "kernel_backward.h" +#include "transform/bias_broadcast.h" + +constexpr auto kBlockSizeI = 64; +constexpr auto kBlockSizeJ = 64; + +template + class Broadcast1_, + template + class Broadcast2_> +typename std::enable_if::value>::type attention_back_impl_template( + torch::Tensor& go, + torch::Tensor& q, + torch::Tensor& k, + torch::Tensor& v, + torch::Tensor& o, + torch::Tensor& lse, + torch::Tensor& delta, + torch::Tensor& bias1, + torch::Tensor& bias2, + torch::Tensor& gq, + torch::Tensor& gk, + torch::Tensor& gv, + torch::Tensor& gb1, + torch::Tensor& gb2) +{ + EVOFORMER_CHECK(false, "Unsupported GPU and data type combination") +} + +template + class Broadcast1_, + template + class Broadcast2_> +typename std::enable_if::value>::type attention_back_impl_template( + torch::Tensor& go, + torch::Tensor& q, + torch::Tensor& k, + torch::Tensor& v, + torch::Tensor& o, + torch::Tensor& lse, + torch::Tensor& delta, + torch::Tensor& bias1, + torch::Tensor& bias2, + torch::Tensor& gq, + torch::Tensor& gk, + torch::Tensor& gv, + torch::Tensor& gb1, + torch::Tensor& gb2) +{ + constexpr bool kPreload_ = arch::kMinComputeCapability >= 80; + using Kernel = AttentionBackwardKernel; + int head_size = q.size(-1); + int head_number = q.size(-2); + int seq_length = q.size(-3); + auto q_view = q.view({-1, seq_length, head_number, head_size}); + auto k_view = k.view({-1, seq_length, head_number, head_size}); + auto v_view = v.view({-1, seq_length, head_number, head_size}); + auto o_view = o.view({-1, seq_length, head_number, head_size}); + auto do_view = go.view({-1, seq_length, head_number, head_size}); + auto dk_view = gk.view({-1, seq_length, head_number, head_size}); + auto dv_view = gv.view({-1, seq_length, head_number, head_size}); + auto dq_view = gq.view({-1, seq_length, head_number, head_size}); + auto q_ptr = reinterpret_cast(q.data_ptr()); + auto k_ptr = reinterpret_cast(k.data_ptr()); + auto v_ptr = reinterpret_cast(v.data_ptr()); + auto o_ptr = reinterpret_cast(o.data_ptr()); + auto do_ptr = reinterpret_cast(go.data_ptr()); + auto dk_ptr = reinterpret_cast(gk.data_ptr()); + auto dv_ptr = reinterpret_cast(gv.data_ptr()); + auto dq_ptr = reinterpret_cast(gq.data_ptr()); + auto db1_ptr = gb1.size(0) > 0 ? reinterpret_cast(gb1.data_ptr()) : nullptr; + auto db2_ptr = gb2.size(0) > 0 ? reinterpret_cast(gb2.data_ptr()) : nullptr; + auto lse_ptr = reinterpret_cast(lse.data_ptr()); + auto delta_ptr = reinterpret_cast(delta.data_ptr()); + auto bias1_ptr = reinterpret_cast(bias1.data_ptr()); + auto bias2_ptr = reinterpret_cast(bias2.data_ptr()); + static_assert(Kernel::kKernelComputesDelta, "Kernel must compute delta"); + + typename Kernel::Params p; + p.query_ptr = q_ptr; + p.key_ptr = k_ptr; + p.value_ptr = v_ptr; + p.logsumexp_ptr = lse_ptr; + p.output_ptr = o_ptr; + p.grad_output_ptr = do_ptr; + p.delta_ptr = delta_ptr; + p.grad_query_ptr = dq_ptr; + p.grad_key_ptr = dk_ptr; + p.grad_value_ptr = dv_ptr; + + p.grad_bias1_ptr = db1_ptr; + p.grad_bias2_ptr = db2_ptr; + p.B = q.size(0); + p.N = q.size(1); + p.bias1_ptr = bias1.size(0) ? bias1_ptr : nullptr; + p.bias2_ptr = bias2.size(0) ? bias2_ptr : nullptr; + + p.scale = 1.0f / sqrtf(head_size); + + p.head_dim = head_size; + p.head_dim_value = head_size; + p.num_queries = seq_length; + p.num_keys = seq_length; + p.num_heads = head_number; + + p.q_strideM = q_view.stride(-3); + p.k_strideM = k_view.stride(-3); + p.v_strideM = v_view.stride(-3); + p.gO_strideM = do_view.stride(-3); + p.o_strideH = o_view.stride(-2); + p.q_strideH = q_view.stride(-2); + p.k_strideH = k_view.stride(-2); + p.v_strideH = v_view.stride(-2); + p.o_strideB = o_view.stride(-4); + p.q_strideB = q_view.stride(-4); + p.k_strideB = k_view.stride(-4); + p.v_strideB = v_view.stride(-4); + p.lse_strideB = lse.stride(-3); + p.lse_strideH = lse.stride(-2); + p.delta_strideB = delta.stride(-3); + p.delta_strideH = delta.stride(-2); + p.num_batches = q_view.size(-4); + + p.gO_strideB = do_view.stride(-4); + p.gQ_strideB = dq_view.stride(-4); + p.gK_strideB = dk_view.stride(-4); + p.gV_strideB = dv_view.stride(-4); + p.gO_strideH = do_view.stride(-2); + p.gQ_strideH = dq_view.stride(-2); + p.gK_strideH = dk_view.stride(-2); + p.gV_strideH = dv_view.stride(-2); + + torch::Tensor workspace = torch::empty(p.workspace_size() / 4, lse.options()); + p.workspace = workspace.data_ptr(); + + auto kernel_fn = attention_kernel_backward_batched_impl; + size_t smem_bytes = sizeof(typename Kernel::SharedStorage); + cudaFuncSetAttribute(kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, int(smem_bytes)); + if (!Kernel::check_supported(p)) { throw std::runtime_error("Unsupported parameters"); } + kernel_fn<<>>(p); +} + +#define CODE(scalar_t, torch_scalar_t) \ + do { \ + if (bias1.size(0) == 0 && bias2.size(0) == 0) { \ + attention_back_impl_template( \ + go, q, k, v, o, lse, delta, bias1, bias2, gq, gk, gv, gb1, gb2); \ + } else if (bias1.size(0) > 0 && bias2.size(0) > 0) { \ + attention_back_impl_template( \ + go, q, k, v, o, lse, delta, bias1, bias2, gq, gk, gv, gb1, gb2); \ + } else if (bias1.size(0) > 0) { \ + attention_back_impl_template( \ + go, q, k, v, o, lse, delta, bias1, bias2, gq, gk, gv, gb1, gb2); \ + } else { \ + attention_back_impl_template( \ + go, q, k, v, o, lse, delta, bias1, bias2, gq, gk, gv, gb1, gb2); \ + } \ + } while (0) + +void attention_back_impl(torch::Tensor& go, + torch::Tensor& q, + torch::Tensor& k, + torch::Tensor& v, + torch::Tensor& o, + torch::Tensor& lse, + torch::Tensor& delta, + torch::Tensor& bias1, + torch::Tensor& bias2, + torch::Tensor& gq, + torch::Tensor& gk, + torch::Tensor& gv, + torch::Tensor& gb1, + torch::Tensor& gb2) +{ + cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties(); + DISPATCH_ARCHTAG(prop->major * 10 + prop->minor, + DISPATCH_TYPES(q, { CODE(scalar_t, torch_scalar_t); })); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/attention_cu.cu b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/attention_cu.cu new file mode 100644 index 0000000000000000000000000000000000000000..37636c4bf98842543fe1bd055a848cb52d0a6449 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/attention_cu.cu @@ -0,0 +1,160 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include +#include "gemm_kernel_utils.h" +#include "kernel_forward.h" +#include "transform/bias_broadcast.h" + +template + class Broadcast1_, + template + class Broadcast2_> +typename std::enable_if::value>::type attention_impl_template( + torch::Tensor& q, + torch::Tensor& k, + torch::Tensor& v, + torch::Tensor& bias1, + torch::Tensor& bias2, + torch::Tensor& o, + float* lse_ptr) +{ + EVOFORMER_CHECK(false, "Unsupported GPU and data type combination") +} + +template + class Broadcast1_, + template + class Broadcast2_> +typename std::enable_if::value>::type attention_impl_template( + torch::Tensor& q, + torch::Tensor& k, + torch::Tensor& v, + torch::Tensor& bias1, + torch::Tensor& bias2, + torch::Tensor& o, + float* lse_ptr) +{ + // Attention definition goes here, replaced with BroadcastType1 and + // BroadcastType2 + using Attention = AttentionKernel; + + static_assert(!Attention::kNeedsOutputAccumulatorBuffer, + "This test does not support output accumulator buffer"); + int head_size = q.size(-1); + int head_number = q.size(-2); + int seq_length = q.size(-3); + auto q_view = q.view({-1, seq_length, head_number, head_size}); + auto k_view = k.view({-1, seq_length, head_number, head_size}); + auto v_view = v.view({-1, seq_length, head_number, head_size}); + auto o_view = o.view({-1, seq_length, head_number, head_size}); + int batch_size = q_view.size(0); + auto q_ptr = reinterpret_cast(q.data_ptr()); + auto k_ptr = reinterpret_cast(k.data_ptr()); + auto v_ptr = reinterpret_cast(v.data_ptr()); + auto o_ptr = reinterpret_cast(o.data_ptr()); + + auto bias1_ptr = reinterpret_cast(bias1.data_ptr()); + auto bias2_ptr = reinterpret_cast(bias2.data_ptr()); + + typename Attention::Params p; + { // set parameters + p.query_ptr = q_ptr; + p.key_ptr = k_ptr; + p.value_ptr = v_ptr; + p.logsumexp_ptr = lse_ptr; // Only needed for bw + p.output_accum_ptr = nullptr; + p.output_ptr = o_ptr; + p.scale = 1.0f / sqrt(float(head_size)); + + p.bias1_ptr = bias1_ptr; + p.bias2_ptr = bias2_ptr; + p.B = q.size(0); + p.N = q.size(1); + + p.num_heads = head_number; + p.num_batches = batch_size; + p.head_dim = head_size; + p.head_dim_value = head_size; + p.num_queries = seq_length; + p.num_keys = seq_length; + + // All tensors are in BMHK shapes + p.q_strideH = q_view.stride(-2); + p.k_strideH = k_view.stride(-2); + p.v_strideH = v_view.stride(-2); + p.q_strideM = q_view.stride(-3); + p.k_strideM = k_view.stride(-3); + p.v_strideM = v_view.stride(-3); + p.o_strideM = o_view.stride(-3); + p.q_strideB = q_view.stride(-4); + p.k_strideB = k_view.stride(-4); + p.v_strideB = v_view.stride(-4); + } + + constexpr auto kernel_fn = attention_kernel_batched_impl; + int smem_bytes = sizeof(typename Attention::SharedStorage); + if (smem_bytes > 0xc000) { + cudaFuncSetAttribute(kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes); + } + if (!Attention::check_supported(p)) { throw std::runtime_error("Parameters not supported"); } + kernel_fn<<>>(p); +} + +#define CODE(scalar_t, torch_scalar_t) \ + do { \ + if (bias1.size(0) == 0 && bias2.size(0) == 0) { \ + attention_impl_template(q, k, v, bias1, bias2, o, lse_ptr); \ + } else if (bias1.size(0) == 0) { \ + attention_impl_template(q, k, v, bias1, bias2, o, lse_ptr); \ + } else if (bias2.size(0) == 0) { \ + attention_impl_template(q, k, v, bias1, bias2, o, lse_ptr); \ + } else { \ + attention_impl_template( \ + q, k, v, bias1, bias2, o, lse_ptr); \ + } \ + } while (0) + +// Function to select and call the correct template based on biases sizes +void attention_impl(torch::Tensor& q, + torch::Tensor& k, + torch::Tensor& v, + torch::Tensor& bias1, + torch::Tensor& bias2, + torch::Tensor& o, + torch::Tensor& lse) +{ + auto lse_ptr = lse.size(0) == 0 ? nullptr : reinterpret_cast(lse.data_ptr()); + cudaDeviceProp* prop = at::cuda::getCurrentDeviceProperties(); + DISPATCH_ARCHTAG(prop->major * 10 + prop->minor, + DISPATCH_TYPES(q, { CODE(scalar_t, torch_scalar_t); })); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm_kernel_utils.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm_kernel_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..2a4300c5cac13f38ca2a1659e7b0d3ed7a6adc11 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/gemm_kernel_utils.h @@ -0,0 +1,254 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holdvr nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include "cutlass/arch/mma.h" + +template +struct CheckArch { + static constexpr bool isPreVolta = arch::kMinComputeCapability < 70; + static constexpr bool isPreAmpere = + arch::kMinComputeCapability < 80 && arch::kMinComputeCapability >= 70; + static constexpr bool isAmpere = arch::kMinComputeCapability >= 80; +#if defined(__CUDA_ARCH__) + static constexpr bool compiler_cc = arch::kMinComputeCapability * 10 <= __CUDA_ARCH__; +#else + static constexpr bool compiler_cc = true; +#endif + static constexpr bool value = (isPreVolta && std::is_same_v) || + (isPreAmpere && !std::is_same_v) || + isAmpere && compiler_cc; +}; + +#define DISPATCH_ARCHTAG(CC, func) \ + { \ + if constexpr (GPU_ARCH >= 80) { \ + if (CC >= 80) { \ + using ArchTag = cutlass::arch::Sm80; \ + func; \ + } else { \ + EVOFORMER_CHECK(false, "Compile flag error. Unexpected GPU"); \ + } \ + } else if constexpr (GPU_ARCH >= 75) { \ + if (CC >= 75) { \ + using ArchTag = cutlass::arch::Sm75; \ + func; \ + } else { \ + EVOFORMER_CHECK(false, "Compile flag error. Unexpected GPU"); \ + } \ + } else if constexpr (GPU_ARCH >= 70) { \ + if (CC >= 70) { \ + using ArchTag = cutlass::arch::Sm70; \ + func; \ + } else { \ + EVOFORMER_CHECK(false, "Compile flag error. Unexpected GPU"); \ + } \ + } else { \ + EVOFORMER_CHECK(false, "Only GPUs with Tensor Core are supported for now"); \ + } \ + } + +#define DISPATCH_TYPES(tensor, func) \ + { \ + if (tensor.scalar_type() == at::ScalarType::Half) { \ + using scalar_t = cutlass::half_t; \ + using torch_scalar_t = at::Half; \ + func; \ + } else if (tensor.scalar_type() == at::ScalarType::BFloat16) { \ + using scalar_t = cutlass::bfloat16_t; \ + using torch_scalar_t = at::BFloat16; \ + func; \ + } else { \ + EVOFORMER_CHECK(false, "Only fp16 and bf16 supported at the moment"); \ + } \ + } + +#define DISPATCH_BOOL(BOOL_V, BOOL_NAME, F) \ + { \ + if (BOOL_V) { \ + constexpr bool BOOL_NAME = true; \ + F(); \ + } else { \ + constexpr bool BOOL_NAME = false; \ + F(); \ + } \ + } + +#ifdef TORCH_CHECK +#define CHECK_ALIGNED_PTR(PTR, ALIGNMENT) \ + EVOFORMER_CHECK(uint64_t(PTR) % ALIGNMENT == 0, #PTR " is not correctly aligned") +#define EVOFORMER_CHECK TORCH_CHECK +#elif defined(__CUDACC_RTC__) +#define CHECK_ALIGNED_PTR(PTR, ALIGNMENT) \ + if (!(uint64_t(PTR) % ALIGNMENT == 0)) { return false; } +#define EVOFORMER_CHECK(COND, ERR) \ + if (!(COND)) { return false; } +#else +#include +#define CHECK_ALIGNED_PTR(PTR, ALIGNMENT) \ + if (!(uint64_t(PTR) % ALIGNMENT == 0)) { \ + std::cerr << #PTR " is not correctly aligned\n"; \ + return false; \ + } +#define EVOFORMER_CHECK(COND, ERR) \ + if (!(COND)) { \ + std::cerr << "[Evoformer Attention]" \ + << "'" #COND "' failed: " << ERR << "\n"; \ + return false; \ + } +#endif + +namespace gemm_kernel_utils { + +template +constexpr CUTLASS_HOST_DEVICE integer ceil_div(integer n, integer m) +{ + return (n + m - 1) / m; +} + +template +constexpr CUTLASS_HOST_DEVICE integer align_up(integer n, integer m) +{ + return ((n + m - 1) / m) * m; +} + +//////////////////////////////////////////////////////////////////////////////// +// Determine the type of GEMM we do (TensorCores or not, Shapes ...) +// TODO: Maybe we could rely on Cutlass's DefaultGemm templates +//////////////////////////////////////////////////////////////////////////////// + +// Fallback to Simt (FMA on cuda cores) if not in a special case below +template +struct DefaultGemmType { + static constexpr int ThreadK = 8; + static constexpr int WarpK = 8; + static constexpr int kMinimumAlignment = 1; + using InstructionShape = cutlass::gemm::GemmShape<1, 1, 1>; + using OpClass = cutlass::arch::OpClassSimt; + using Operator = cutlass::arch::OpMultiplyAdd; +}; + +// Specialization for tensorcores with f32 +template +struct DefaultGemmType< + ArchTag, + float, + typename cutlass::platform::enable_if= 80>::type> { + static constexpr int ThreadK = 32; + static constexpr int WarpK = 32; + static constexpr int kMinimumAlignment = 4; + using OpClass = cutlass::arch::OpClassTensorOp; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Operator = cutlass::arch::OpMultiplyAddFastF32; +}; + +// Specialization for tensorcores with f16/bf16 - Sm75+ +template +struct DefaultGemmType< + ArchTag, + scalar_t, + typename cutlass::platform::enable_if= 75 && + cutlass::sizeof_bits::value == 16>::type> { + static constexpr int ThreadK = 32; + static constexpr int WarpK = 32; + static constexpr int kMinimumAlignment = 4; + using OpClass = cutlass::arch::OpClassTensorOp; + using InstructionShape = cutlass::gemm::GemmShape<16, 8, 8>; + using Operator = cutlass::arch::OpMultiplyAdd; +}; + +// Specialization for tensorcores with f16 - Volta +template <> +struct DefaultGemmType { + static constexpr int ThreadK = 32; + static constexpr int WarpK = 32; + static constexpr int kMinimumAlignment = 2; + using OpClass = cutlass::arch::OpClassTensorOp; + using InstructionShape = cutlass::gemm::GemmShape<8, 8, 4>; + using Operator = cutlass::arch::OpMultiplyAdd; +}; + +// Enables to do +// `auto x = kCondition ? fa(arg) : fb(arg)` +// when `fa` and `fb` have different types +template +struct call_conditional; + +template +struct call_conditional { + template + static CUTLASS_HOST_DEVICE auto apply(TA ta, TB tb, Arg arg) -> decltype(ta(arg)) + { + return ta(arg); + } +}; + +template +struct call_conditional { + template + static CUTLASS_HOST_DEVICE auto apply(TA ta, TB tb, Arg arg) -> decltype(tb(arg)) + { + return tb(arg); + } +}; + +//////////////////////////////////////////////////////////////////////////////// +// Mark a variable as warp-uniform - enables some compiler optimizations +// The cheapest way to do it is just to broadcast it from lane 0 +//////////////////////////////////////////////////////////////////////////////// + +CUTLASS_DEVICE int32_t warp_uniform(int32_t value) +{ + return (int32_t)__shfl_sync(0xffffffff, (unsigned)value, 0); +} + +template +CUTLASS_DEVICE T* warp_uniform(T* ptr) +{ + struct { + union { + T* ptr; + uint32_t asInt[2]; + }; + } p; + p.ptr = ptr; + p.asInt[0] = warp_uniform(p.asInt[0]); + p.asInt[1] = warp_uniform(p.asInt[1]); + return p.ptr; +} +} // namespace gemm_kernel_utils diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/kernel_backward.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/kernel_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..87e6df18bb04ad52278062f6bb03f40c36edc601 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/kernel_backward.h @@ -0,0 +1,1965 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holdvr nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include + +#include + +#include "cutlass/cutlass.h" +#include "cutlass/epilogue/thread/linear_combination.h" +#include "cutlass/epilogue/thread/scale_type.h" +#include "cutlass/fast_math.h" +#include "cutlass/functional.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/vector.h" +#include "cutlass/numeric_conversion.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_ref.h" + +#include "gemm_kernel_utils.h" + +#include "cutlass/epilogue/thread/linear_combination_relu.h" +#include "cutlass/epilogue/threadblock/epilogue_smem_accumulator.h" +#include "cutlass/epilogue/warp/fragment_iterator_tensor_op.h" +#include "cutlass/epilogue/warp/tile_iterator_tensor_op.h" +#include "cutlass/gemm/device/default_gemm_configuration.h" +#include "cutlass/gemm/kernel/default_gemm.h" +#include "cutlass/gemm/threadblock/default_mma.h" +#include "cutlass/gemm/threadblock/default_mma_core_simt.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm70.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm75.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm80.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/platform/platform.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" +#include "cutlass/transform/threadblock/vector_iterator.h" +#include "epilogue/epilogue_pipelined.h" +#include "iterators/epilogue_predicated_tile_iterator.h" + +#include "epilogue/epilogue_grad_bias.h" +#include "gemm/custom_mma.h" +#include "gemm/find_default_mma.h" +#include "gemm/mma_accum_lambda_iterator.h" +#include "gemm/mma_from_smem.h" +#include "transform/bias_broadcast.h" +#include "transform/tile_smem_loader.h" + +#include + +using namespace gemm_kernel_utils; + +namespace { + +template +struct GmemTile { + /* + Helper functions to efficient store/load RF to gmem + + GEMM accumulators have a particular format on A100, and + it takes some compute/shared-memory to rearrange them to + a RowMajor or ColumnMajor format in global memory through + an Epilogue. The same complexity goes for loading into RF. + + This class loads/stores RF as they are, and can be used for + efficient accumulation across gemms for instance: + + ``` + GmemTile tile; + for (int i = 0; i < N; ++i) { + // ... + + Fragment accum; + if (i == 0) { + accum.clear(); + } else { + tile.load(accum); + } + mma(accum, ...); + if (i < N-1) { + // Store for next GEMM + tile.store(accum); + } else { + // Store in tensor (eg RowMajor) + epilogue(accum); + } + + // ... + } + ``` + */ + + // 128bits per thread + using AccessType = cutlass::Array; + static constexpr int32_t kBytes = sizeof(AccessType); + static constexpr int32_t kStride = kNumThreads * AccessType::kElements; + static constexpr int32_t kNumIters = FragmentType::kElements / AccessType::kElements; + static constexpr int32_t kElementsStored = kNumThreads * FragmentType::kElements; + static_assert(FragmentType::kElements % AccessType::kElements == 0, + "fragment not aligned on 128 bits"); + + float* ptr; + + CUTLASS_DEVICE void load(FragmentType& fragment, int thread_id) + { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kNumIters; ++i) { + AccessType* __restrict__ gmem_ptr = reinterpret_cast( + ptr + thread_id * AccessType::kElements + i * kStride); + AccessType sub_fragment; + cutlass::arch::global_load(sub_fragment, gmem_ptr, true); + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < AccessType::kElements; ++j) { + fragment[i * AccessType::kElements + j] = sub_fragment[j]; + } + } + } + + CUTLASS_DEVICE void store(FragmentType const& fragment, int thread_id) + { + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < kNumIters; ++i) { + AccessType* __restrict__ gmem_ptr = reinterpret_cast( + ptr + thread_id * AccessType::kElements + i * kStride); + AccessType sub_fragment; + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < AccessType::kElements; ++j) { + sub_fragment[j] = fragment[i * AccessType::kElements + j]; + } + cutlass::arch::global_store(sub_fragment, gmem_ptr, true); + } + } +}; + +template +constexpr int getWarpsPerSm() +{ + constexpr bool is_half = !cutlass::platform::is_same::value; + if (Arch::kMinComputeCapability >= 80) { return is_half ? 12 : 8; } + return 8; +} +} // namespace + +template < + // which arch we target (eg `cutlass::arch::Sm80`) + typename ArchTag_, + // input/output type + typename scalar_t_, + // run optimized kernel because memory accesses will be aligned + bool kIsAligned_, + // use dropout if enabled + bool kApplyDropout_, + // when doing a GEMM, preload the next one (uses more shmem) + bool kPreload_, + // block dimensions + int kBlockSizeI_, + int kBlockSizeJ_, + // upperbound on `max(value.shape[-1], query.shape[-1])` + int kMaxK_ = (int)cutlass::platform::numeric_limits::max(), + template class Broadcast1_ = BroadcastNoLoad, + template class Broadcast2_ = BroadcastNoLoad> +struct AttentionBackwardKernel { + using scalar_t = scalar_t_; + using output_t = scalar_t; + using output_accum_t = float; + using lse_scalar_t = float; + using accum_t = float; + using ArchTag = ArchTag_; + static constexpr bool kIsAligned = kIsAligned_; + static constexpr bool kApplyDropout = kApplyDropout_; + static constexpr bool kPreload = kPreload_; + static constexpr int kBlockSizeI = kBlockSizeI_; + static constexpr int kBlockSizeJ = kBlockSizeJ_; + static constexpr int kMaxK = kMaxK_; + + struct Params { + // Input tensors + scalar_t* query_ptr; // [Mq, nH, K] + scalar_t* key_ptr; // [Mk, nH, K] + scalar_t* value_ptr; // [Mk, nH, Kv] + lse_scalar_t* logsumexp_ptr; // [nH, Mq] + scalar_t* output_ptr; // [Mq, nH, Kv] + scalar_t* grad_output_ptr; // [Mq, nH, Kv] + accum_t* delta_ptr; // [nH, Mq] + int32_t* cu_seqlens_q_ptr = nullptr; + int32_t* cu_seqlens_k_ptr = nullptr; + + // Output tensors + output_t* grad_query_ptr; // [Mq, nH, K] + output_t* grad_key_ptr; // [Mk, nH, K] + output_t* grad_value_ptr; // [Mk, nH, Kv] + + accum_t* grad_bias1_ptr = nullptr; + accum_t* grad_bias2_ptr = nullptr; + int32_t B = 0; + int32_t N = 0; + scalar_t* bias1_ptr = nullptr; + scalar_t* bias2_ptr = nullptr; + + // Accumulators + union { + output_accum_t* workspace = nullptr; // [Mq, Kq] + [Mkv, Kq] + [Mkv, Kv] + output_accum_t* workspace_gk; + }; + output_accum_t* workspace_gv; // (will be calculated by the kernel) + output_accum_t* workspace_gq; // (will be calculated by the kernel) + + // Scale + accum_t scale; + + // Dimensions/strides + int32_t head_dim = -1; + int32_t head_dim_value = -1; + int32_t num_queries = -1; + int32_t num_keys = -1; + int32_t num_heads = -1; + + int32_t q_strideM; + int32_t k_strideM; + int32_t v_strideM; + int32_t gO_strideM; + int32_t gB_strideM; + int8_t gQKV_strideM_multiplier = 1; // 3 for packed, 1 otherwise + + // RNG sequence offset based on batch_id and head_id + unsigned long long dropout_batch_head_rng_offset; + float dropout_prob = 0.0f; + + CUTLASS_HOST_DEVICE int32_t o_strideM() const { return head_dim_value * num_heads; } + CUTLASS_HOST_DEVICE int32_t gQ_strideM() const + { + return gQKV_strideM_multiplier * num_heads * head_dim; + } + CUTLASS_HOST_DEVICE int32_t gK_strideM() const + { + return gQKV_strideM_multiplier * num_heads * head_dim; + } + CUTLASS_HOST_DEVICE int32_t gV_strideM() const + { + return gQKV_strideM_multiplier * num_heads * head_dim_value; + } + + // Everything below is only used in `advance_to_block` + // and shouldn't use registers + int64_t o_strideH; + int32_t q_strideH; + int32_t k_strideH; + int32_t v_strideH; + int64_t o_strideB; + int64_t q_strideB; + int64_t k_strideB; + int64_t v_strideB; + int64_t lse_strideB; + int64_t lse_strideH; + int64_t delta_strideB; + int64_t delta_strideH; + int32_t num_batches; + + int64_t gO_strideB = 0; + int64_t gQ_strideB = 0; + int64_t gK_strideB = 0; + int64_t gV_strideB = 0; + int64_t gB_strideB = 0; + int64_t gO_strideH = 0; + int64_t gQ_strideH = 0; + int64_t gK_strideH = 0; + int64_t gV_strideH = 0; + int64_t gB_strideH = 0; + + CUTLASS_DEVICE bool advance_to_block() + { + int64_t batch_id = blockIdx.z; + int32_t head_id = blockIdx.y; + + if (kNeedsAccumGradQ || kNeedsAccumGradK || kNeedsAccumGradV) { + assert(workspace_size() == 0 || workspace != nullptr); + + workspace += (batch_id * num_heads + head_id) * workspace_strideBH(); + workspace = warp_uniform(workspace); + workspace_gv = workspace + workspace_elements_gk(); + workspace_gq = workspace_gv + workspace_elements_gv(); + } else { + workspace = nullptr; + } + + // Advance pointers that depend on the total concatenated + // number of queries, as `num_queries` is modified in the block + // below + dropout_batch_head_rng_offset = batch_id * (num_heads * num_queries * num_keys) + + head_id * (num_queries * num_keys); + logsumexp_ptr += batch_id * lse_strideB + head_id * lse_strideH; + + query_ptr += batch_id * q_strideB + head_id * q_strideH; + key_ptr += batch_id * k_strideB + head_id * k_strideH; + value_ptr += batch_id * v_strideB + head_id * v_strideH; + output_ptr += batch_id * o_strideB + head_id * o_strideH; + grad_output_ptr += batch_id * gO_strideB + head_id * gO_strideH; + delta_ptr += batch_id * delta_strideB + head_id * delta_strideH; + + grad_query_ptr += batch_id * gQ_strideB + head_id * gQ_strideH; + grad_key_ptr += batch_id * gK_strideB + head_id * gK_strideH; + grad_value_ptr += batch_id * gV_strideB + head_id * gV_strideH; + using broadcast_1 = Broadcast1_; + using broadcast_2 = Broadcast2_; + + if (broadcast_1::kEnable && grad_bias1_ptr) { + grad_bias1_ptr += batch_id * num_queries; + } + if (broadcast_2::kEnable && grad_bias2_ptr) { + auto strideB = num_heads * num_queries * num_keys; + auto strideH = num_queries * num_keys; + grad_bias2_ptr += (batch_id / N) * strideB + head_id * strideH; + } + if (broadcast_1::kEnable && bias1_ptr) { + bias1_ptr = broadcast_1::advance(bias1_ptr, + batch_id / N, + batch_id % N, + head_id, + num_queries * N, + num_queries, + 0); + } + if (broadcast_2::kEnable && bias2_ptr) { + auto strideB = num_heads * num_queries * num_keys; + auto strideH = num_queries * num_keys; + bias2_ptr = broadcast_2::advance( + bias2_ptr, batch_id / N, batch_id % N, head_id, strideB, 0, strideH); + } + + num_queries = warp_uniform(num_queries); + num_keys = warp_uniform(num_keys); + + query_ptr = warp_uniform(query_ptr); + key_ptr = warp_uniform(key_ptr); + value_ptr = warp_uniform(value_ptr); + logsumexp_ptr = warp_uniform(logsumexp_ptr); + output_ptr = warp_uniform(output_ptr); + grad_output_ptr = warp_uniform(grad_output_ptr); + delta_ptr = warp_uniform(delta_ptr); + + grad_query_ptr = warp_uniform(grad_query_ptr); + grad_key_ptr = warp_uniform(grad_key_ptr); + grad_value_ptr = warp_uniform(grad_value_ptr); + if (broadcast_1::kEnable) { + grad_bias1_ptr = warp_uniform(grad_bias1_ptr); + bias1_ptr = warp_uniform(bias1_ptr); + } + if (broadcast_2::kEnable) { + grad_bias2_ptr = warp_uniform(grad_bias2_ptr); + bias2_ptr = warp_uniform(bias2_ptr); + } + + return true; + } + + __host__ dim3 getBlocksGrid() const { return dim3(1, num_heads, num_batches); } + __host__ dim3 getThreadsGrid() const { return dim3(kWarpSize * kNumWarpsPerBlock, 1, 1); } + CUTLASS_HOST_DEVICE int64_t workspace_elements_gk() const + { + if (!kNeedsAccumGradK) { return 0; } + return align_up(num_keys, (int32_t)kBlockSizeJ) * + align_up(head_dim, (int32_t)kBlockSizeI); + } + CUTLASS_HOST_DEVICE int64_t workspace_elements_gv() const + { + if (!kNeedsAccumGradV) { return 0; } + return align_up(num_keys, (int32_t)kBlockSizeJ) * + align_up(head_dim_value, (int32_t)kBlockSizeI); + } + CUTLASS_HOST_DEVICE int64_t workspace_elements_gq() const + { + if (!kNeedsAccumGradQ) { return 0; } + if (num_keys <= kBlockSizeJ) { return 0; } + return align_up(num_queries, (int32_t)kBlockSizeI) * + align_up(head_dim, (int32_t)kBlockSizeJ); + } + CUTLASS_HOST_DEVICE int64_t workspace_strideBH() const + { + // Aligned on 128bits + return align_up( + workspace_elements_gk() + workspace_elements_gv() + workspace_elements_gq(), + int64_t(4)); + } + CUTLASS_HOST_DEVICE int64_t workspace_size() const + { + // Returns size of buffer we need to run this kernel + return num_batches * num_heads * workspace_strideBH() * sizeof(float); + } + }; + + static constexpr int64_t kWarpSize = 32; + + // If this is true, we store and accumulate dK/dV in RF + // rather than going back to gmem every time + static constexpr bool kIsHalf = cutlass::sizeof_bits::value <= 16; + static constexpr bool kOutputInRF = kIsHalf && kMaxK <= kBlockSizeI; + static_assert(!kPreload || (kIsHalf && ArchTag::kMinComputeCapability >= 80 && kOutputInRF), + "preload MMA not supported"); + static constexpr bool kPrologueQK = kPreload; + static constexpr bool kPrologueGV = kPreload; + static constexpr bool kPrologueDOV = kPreload; + static constexpr bool kPrologueGQ = kPreload; + static constexpr bool kPrologueGK = kPreload; + + static constexpr int64_t kNumWarpsPerBlock = (kBlockSizeI * kBlockSizeJ) / (32 * 32); + + // Compute delta for the f16 kernels + // TODO: Figure out why it's slower on the f32 kernels + // (something due to RF pressure?) + // TODO: Remove condition on `kOutputInRF` - this is needed to work + // around a compiler bug on V100, not exactly sure why but I spent + // too much time on this already. Reproducible with + // (B, Mq, Mkv, K) = (1, 1, 1, 136) for instance + static constexpr bool kKernelComputesDelta = + kIsHalf && (kOutputInRF || ArchTag::kMinComputeCapability != 70); + + static constexpr bool kNeedsAccumGradQ = + !cutlass::platform::is_same::value; + static constexpr bool kNeedsAccumGradK = + !kOutputInRF && !cutlass::platform::is_same::value; + static constexpr bool kNeedsAccumGradV = + !kOutputInRF && !cutlass::platform::is_same::value; + + // Launch bounds + static constexpr int64_t kNumThreads = kWarpSize * kNumWarpsPerBlock; + static constexpr int64_t kMinBlocksPerSm = + getWarpsPerSm() / kNumWarpsPerBlock; + + using GemmType = DefaultGemmType; + using DefaultConfig = + typename cutlass::gemm::device::DefaultGemmConfiguration; + static constexpr auto kOptimalAlignement = + cutlass::platform::max(DefaultConfig::kAlignmentA, DefaultConfig::kAlignmentB); + static constexpr auto kMinimumAlignment = GemmType::kMinimumAlignment; + + struct MatmulQK { + /* + attn_T = k_j @ q_i.transpose(-2, -1) # matmul + attn_T = (attn_T - logsumexp[i_start:i_end].unsqueeze(1).transpose(-2, + -1)).exp() # epilogue + + with attn_T.shape = (kBlockSizeJ, kBlockSizeI) + */ + using ThreadblockShape = + cutlass::gemm::GemmShape; + using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>; + using DefaultMma = typename cutlass::gemm::threadblock::DefaultMma< + scalar_t, // ElementA + cutlass::layout::RowMajor, // LayoutA + kIsAligned ? DefaultConfig::kAlignmentA : GemmType::kMinimumAlignment, + scalar_t, // ElementB + cutlass::layout::ColumnMajor, // LayoutB + kIsAligned ? DefaultConfig::kAlignmentB : GemmType::kMinimumAlignment, + accum_t, // ElementC + cutlass::layout::RowMajor, // LayoutC + typename GemmType::OpClass, + ArchTag, + ThreadblockShape, + WarpShape, + typename GemmType::InstructionShape, + DefaultConfig::kStages, + typename GemmType::Operator, + false, // AccumulatorsInRowMajor = false, + cutlass::gemm::SharedMemoryClearOption::kNone>; + using MmaCore = typename DefaultMma::MmaCore; + using Mma = typename MakeCustomMma::Mma; + + // used for efficient load of bias tile (Bij) from global memory to shared + // memory + using BiasLoader = + TileSmemLoader, + MmaCore::kThreads, + // input restriction: kv_len has to be a multiple of this value + 128 / cutlass::sizeof_bits::value>; + + // Epilogue to store to shared-memory in a format that we can use later for + // the second matmul + using B2bGemm = + typename cutlass::gemm::threadblock::B2bGemm; + using AccumLambdaIterator = + typename DefaultMmaAccumLambdaIterator::Iterator; + using AccumulatorSharedStorage = typename B2bGemm::AccumulatorSharedStorage; + }; + + struct MatmulGradV { + /* + grad_v[j_start:j_end] += attn_T @ do_i # matmul + + Dimensions: (kBlockSizeJ * kNumWarpsPerBlock, kBlockSizeI, K) + (we might need to iterate multiple times on K) + */ + using ThreadblockShape = + cutlass::gemm::GemmShape; + using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>; + using InstructionShape = typename GemmType::InstructionShape; + + using DefaultGemm = + cutlass::gemm::kernel::DefaultGemm; + + // if dropout: + // for computing dVj += (Pij.T * Zij) @ dOi + // Pij_dropped.T = Pij.T * Zij is computed on the fly as fragments of + // Pij.T are loaded in. The reason we do it this way is because Pij.T and + // Zij are reused in later steps, while Pij_dropped.T is only needed in + // this step. computing Pij_dropped.T on the fly allows us to avoid + // keeping all 3 of Pij_dropped.T, Pij.T, and Zij in shared memory at the + // same time. + // if no dropout: + // for computing dVj += Pij.T @ dOi + using DefaultMmaFromSmem = typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory< + typename DefaultGemm::Mma, + typename MatmulQK::AccumulatorSharedStorage, + kApplyDropout>; // kScaleOperandA + + using Mma = typename DefaultMmaFromSmem::Mma; + using WarpIteratorA = typename DefaultMmaFromSmem::WarpIteratorA; + using IteratorB = typename Mma::IteratorB; + using WarpCount = typename Mma::WarpCount; + + // Epilogue + using DefaultOutputOp = typename DefaultConfig::EpilogueOutputOp; + using DefaultEpilogue = typename DefaultGemm::Epilogue; + using OutputTileIterator = + typename cutlass::epilogue::threadblock::MakePrefetchableIterator< + typename DefaultEpilogue::OutputTileIterator>::Iterator; + using AccumTileGmem = GmemTile; + }; + + struct MatmulDOIVJ { + /* + doi_t_vj = do_i @ v_j.transpose(-2, -1) # matmul + tmp = (doi_t_vj - Di.unsqueeze(1)) * attn # inplace / epilogue? + */ + using ThreadblockShape = + cutlass::gemm::GemmShape; + using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>; + + using ElementC = accum_t; // CSY: Change it for better accuracy + using ElementAccum = accum_t; + + // no-op output op - epilogue just stores result to global memory + using BiasGradEpilogueOutputOp = typename cutlass::epilogue::thread::LinearCombination< + ElementC, + DefaultConfig::EpilogueOutputOp::kCount, + typename DefaultConfig::EpilogueOutputOp::ElementAccumulator, + typename DefaultConfig::EpilogueOutputOp::ElementCompute, + cutlass::epilogue::thread::ScaleType::Nothing>; + + using DefaultGemm = typename cutlass::gemm::kernel::DefaultGemm< + scalar_t, // ElementA + cutlass::layout::RowMajor, // LayoutA + kIsAligned ? DefaultConfig::kAlignmentA : GemmType::kMinimumAlignment, + scalar_t, // ElementB + cutlass::layout::ColumnMajor, // LayoutB + kIsAligned ? DefaultConfig::kAlignmentB : GemmType::kMinimumAlignment, + ElementC, // ElementC + cutlass::layout::RowMajor, // LayoutC + ElementAccum, // ElementAccumulator + typename GemmType::OpClass, + ArchTag, + ThreadblockShape, + WarpShape, + typename GemmType::InstructionShape, + BiasGradEpilogueOutputOp, // EpilogueOutputOp + void, // ThreadblockSwizzle (not used) + // multiple preloads, dropout Zij tile, and 3 stages push us over shared + // memory capacity on A100. set a ceiling on number of stages to save + // shared memory if dropout is in use. + kPreload && kApplyDropout && (kBlockSizeI * kBlockSizeJ > 64 * 64) + ? cutlass::const_min(2, DefaultConfig::kStages) + : DefaultConfig::kStages, // Stages + false, // SplitKSerial + typename GemmType::Operator, + cutlass::gemm::SharedMemoryClearOption::kNone>; + using Mma = typename MakeCustomMma::Mma; + + // epilogue used to write bias gradient, which is just the output of this + // matmul with some operations applied to the fragment + using BiasGradEpilogue = typename DefaultGemm::Epilogue; + + // Epilogue to store to shared-memory in a format that we can use later for + // the second matmul + using B2bGemm = + typename cutlass::gemm::threadblock::B2bGemm; + using AccumulatorSharedStorage = typename B2bGemm::AccumulatorSharedStorage; + }; + + struct MatmulGradQ { + // grad_q <- tmp @ k_j + using ThreadblockShape = + cutlass::gemm::GemmShape; + using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>; + using InstructionShape = typename GemmType::InstructionShape; + + using DefaultGemm = + cutlass::gemm::kernel::DefaultGemm; + + using DefaultMmaFromSmem = typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory< + typename DefaultGemm::Mma, + typename MatmulDOIVJ::AccumulatorSharedStorage, + false>; // kScaleOperandA + using Mma = typename DefaultMmaFromSmem::Mma; + using IteratorB = typename Mma::IteratorB; + using WarpCount = typename Mma::WarpCount; + + // Epilogue + using DefaultOutputOp = typename DefaultConfig::EpilogueOutputOp; + using DefaultEpilogue = typename DefaultGemm::Epilogue; + using OutputTileIterator = + typename cutlass::epilogue::threadblock::MakePrefetchableIterator< + typename DefaultEpilogue::OutputTileIterator>::Iterator; + using AccumTileGmem = GmemTile; + }; + struct MatmulGradK { + // grad_k <- tmp.transpose(-2, -1) @ q_i + using ThreadblockShape = + cutlass::gemm::GemmShape; + using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>; + using InstructionShape = typename GemmType::InstructionShape; + + using DefaultGemm = + cutlass::gemm::kernel::DefaultGemm; + + using DefaultMmaFromSmemN = typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory< + typename DefaultGemm::Mma, + typename MatmulQK::AccumulatorSharedStorage, + false>; // kScaleOperandA + using DefaultMmaFromSmemT = typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory< + typename DefaultGemm::Mma, + typename MatmulDOIVJ::AccumulatorSharedStorage, + false, // kScaleOperandA + kPreload>; // kTransposeA + using DefaultMmaFromSmem = + typename cutlass::platform::conditional::type; + using Mma = typename DefaultMmaFromSmem::Mma; + using IteratorB = typename Mma::IteratorB; + using WarpCount = typename Mma::WarpCount; + + // Epilogue + using DefaultOutputOp = typename DefaultConfig::EpilogueOutputOp; + using DefaultEpilogue = typename DefaultGemm::Epilogue; + using OutputTileIterator = + typename cutlass::epilogue::threadblock::MakePrefetchableIterator< + typename DefaultEpilogue::OutputTileIterator>::Iterator; + using AccumTileGmem = GmemTile; + }; + + using broadcast_1 = Broadcast1_; + using broadcast_2 = Broadcast2_; + + // shared storage for keeping Zij matrix. not needed if we aren't using + // dropout, in which case we use an empty array to save shared memory + using ZijSharedStorage = typename cutlass::platform::conditional< + kApplyDropout, + typename MatmulQK::AccumulatorSharedStorage, + // dummy shared storage object that takes up no space. + typename cutlass::gemm::threadblock::AccumulatorSharedStorage< +#ifdef _WIN32 + // windows builds throw the error: + // "type containing an unknown-size array is not allowed" + // if we try to make Zij shared storage zero-sized. + // To get around this just make it sized 1 on windows. + typename cutlass::gemm::GemmShape<1, 1, 0>, +#else + typename cutlass::gemm::GemmShape<0, 0, 0>, +#endif + typename MatmulQK::AccumulatorSharedStorage::Element, + typename MatmulQK::AccumulatorSharedStorage::Layout, + typename cutlass::MatrixShape<0, 0>>>::type; + + struct SharedStoragePrologue { + struct { + cutlass::Array di; // (do_i * o_i).sum(-1) + typename MatmulQK::Mma::SharedStorageA mm_qk_k; + } persistent; + union { + struct { + // part1 - after Q.K / dV / dO.V + union { + // 1. efficient load of bias tile Bij, which is then applied to Pij + // typename MatmulQK::BiasLoader::SmemTile bias; + cutlass::AlignedBuffer bias; + // 4. store Pij. it is needed: + // - in dVj += (Pij.T * Zij) @ dOi + // - in dSij = Pij * (dPij - Di) + // 6. dVj += (Pij.T * Zij) @ dOi + // 10. write to fragment + typename MatmulQK::AccumulatorSharedStorage attn_shared_storage; + }; + // 5. store Zij. it is needed: + // - to compute Pij_dropped = Pij * Zij on the fly as fragments of Pij + // are loaded for the computation of dVj. + // - to compute dPij = (dOi @ Vj.T) * Zij + // 6. used in dVj += (Pij.T * Zij) @ dOi + // 9. used in dPij = dPij_dropped * Zij + ZijSharedStorage zij; + + union { + // 2. prologue for dVj + // 6. workspace for dVj += (Pij.T * Zij) @ dOi + typename MatmulGradV::Mma::SharedStorage mm_gradV; + // 7. dVj epilogue + typename MatmulGradV::DefaultEpilogue::SharedStorage gradV_epilogue; + }; + + // 3. prologue for dPij_dropped + // 8. used in dPij_dropped = dOi @ Vj.T + typename MatmulDOIVJ::Mma::SharedStorage mm_doivj; + } part1; + + struct { + // part2 - dQ + union { + typename MatmulQK::AccumulatorSharedStorage + tmpT_shared_storage; // (from part1) + typename MatmulDOIVJ::AccumulatorSharedStorage tmp_shared_storage; + }; + typename MatmulGradK::Mma::SharedStorage mm_gradK; // (preload) + typename MatmulGradQ::Mma::SharedStorage mm_gradQ; // (preload) + union { + // store dB = dSij to global memory + typename MatmulDOIVJ::BiasGradEpilogue::SharedStorage gradB_epilogue; + typename MatmulGradQ::DefaultEpilogue::SharedStorage gradQ_epilogue; + }; + + } part2; + + struct { + // part3 - after last iteration on dQ's epilogue / dK + union { + typename MatmulQK::AccumulatorSharedStorage + tmpT_shared_storage; // (from part1) + typename MatmulDOIVJ::AccumulatorSharedStorage tmp_shared_storage; + }; + typename MatmulGradK::Mma::SharedStorage mm_gradK; // (preload) + typename MatmulGradQ::DefaultEpilogue::SharedStorage gradQ_epilogue_lastIter; + + typename MatmulGradK::DefaultEpilogue::SharedStorage gradK_epilogue; + } part3; + + struct { + // part4 - after last iteration on dK's epilogue / preload next K.Q_t + typename MatmulQK::Mma::SharedStorageB mm_qk_q; + + // If we reach end of current key, dump RF->gmem with "final" epilogues + typename MatmulGradK::DefaultEpilogue::SharedStorage gradK_epilogue_final; + typename MatmulGradV::DefaultEpilogue::SharedStorage gradV_epilogue_final; + } part4; + }; +// =========================================== +#define FIELD(INSIDE_STRUCT, FIELDNAME) \ + CUTLASS_DEVICE auto& FIELDNAME() { return INSIDE_STRUCT.FIELDNAME; } + + FIELD(persistent, di) + FIELD(persistent, mm_qk_k) + FIELD(part1, bias) + FIELD(part1, attn_shared_storage) + FIELD(part1, zij) + FIELD(part1, mm_gradV) + FIELD(part1, gradV_epilogue) + FIELD(part1, mm_doivj) + FIELD(part2, mm_gradK) + FIELD(part2, mm_gradQ) + FIELD(part2, gradB_epilogue) + FIELD(part2, gradQ_epilogue) + FIELD(part2, tmp_shared_storage) + FIELD(part3, tmpT_shared_storage) + FIELD(part3, gradQ_epilogue_lastIter) + FIELD(part3, gradK_epilogue) + FIELD(part4, mm_qk_q) + FIELD(part4, gradK_epilogue_final) + FIELD(part4, gradV_epilogue_final) + }; + + struct SharedStorageNoPrologue { + struct { + cutlass::Array di; // (do_i * o_i).sum(-1) + } persistent; + union { + struct { + // part1 - Q.K matmul + typename MatmulQK::Mma::SharedStorageA mm_qk_k; + typename MatmulQK::Mma::SharedStorageB mm_qk_q; + } part1; + + struct { + // part2 - compute gradV + union { + // 1. efficient load of bias tile Bij, which is then applied to Pij + cutlass::AlignedBuffer bias; + // 2. store Pij to shared memory. it is needed: + // - in this step, where it is used in dVj += (Pij.T * Zij) @ dOi + // - in next step where it is used in dSij = Pij * (dPij - Di) + typename MatmulQK::AccumulatorSharedStorage attn_shared_storage; + }; + // 3. store Zij. it is needed: + // - in this step, where it is used to compute Pij_dropped = Pij * Zij + // on the + // fly as fragments of Pij are loaded for the computation of dVj. + // - later to compute dPij = (dOi @ Vj.T) * Zij + ZijSharedStorage zij; + + union { + typename MatmulGradV::Mma::SharedStorage mm_gradV; + typename MatmulGradV::DefaultEpilogue::SharedStorage gradV_epilogue; + }; + } part2; + + struct { + // part3 - DO.V matmul + union { + // first compute dPij = (dOi @ Vj.T) * Zij + // and dSij = Pij * (dPij - Di) + struct { + // (from part2) - Pij for computing dSij = Pij * (dPij - Di) + typename MatmulQK::AccumulatorSharedStorage attn_shared_storage; + // (from part2) - Zij for computing dPij = dPij_dropped * Zij + ZijSharedStorage zij; + // matmul to compute dOiVj + typename MatmulDOIVJ::Mma::SharedStorage mm_doivj; + }; + // then store dB = dSij to global memory + typename MatmulDOIVJ::BiasGradEpilogue::SharedStorage gradB_epilogue; + }; + } part3; + + struct { + // part4 - compute gradQ + typename MatmulQK::AccumulatorSharedStorage tmpT_shared_storage; // (from part2) + typename MatmulDOIVJ::AccumulatorSharedStorage tmp_shared_storage; + union { + typename MatmulGradQ::Mma::SharedStorage mm_gradQ; + typename MatmulGradQ::DefaultEpilogue::SharedStorage gradQ_epilogue; + typename MatmulGradQ::DefaultEpilogue::SharedStorage gradQ_epilogue_lastIter; + }; + } part4; + + struct { + // part5 - compute gradK + typename MatmulQK::AccumulatorSharedStorage tmpT_shared_storage; // (from part2) + typename MatmulDOIVJ::AccumulatorSharedStorage tmp_shared_storage; + union { + typename MatmulGradK::Mma::SharedStorage mm_gradK; + typename MatmulGradK::DefaultEpilogue::SharedStorage gradK_epilogue; + }; + } part5; + + struct { + // part6 - store RF accumulated into gmem + typename MatmulGradK::DefaultEpilogue::SharedStorage gradK_epilogue_final; + typename MatmulGradV::DefaultEpilogue::SharedStorage gradV_epilogue_final; + } part6; + }; +// =========================================== +#define FIELD(INSIDE_STRUCT, FIELDNAME) \ + CUTLASS_DEVICE auto& FIELDNAME() { return INSIDE_STRUCT.FIELDNAME; } + + FIELD(persistent, di) + FIELD(part1, mm_qk_k) + FIELD(part1, mm_qk_q) + FIELD(part2, bias) + FIELD(part2, attn_shared_storage) + FIELD(part2, zij) + FIELD(part2, mm_gradV) + FIELD(part2, gradV_epilogue) + FIELD(part3, mm_doivj) + FIELD(part3, gradB_epilogue) + FIELD(part4, tmpT_shared_storage) + FIELD(part4, tmp_shared_storage) + FIELD(part4, mm_gradQ) + FIELD(part4, gradQ_epilogue) + FIELD(part4, gradQ_epilogue_lastIter) + FIELD(part5, mm_gradK) + FIELD(part5, gradK_epilogue) + FIELD(part6, gradK_epilogue_final) + FIELD(part6, gradV_epilogue_final) + }; + + using SharedStorage = typename cutlass::platform:: + conditional::type; + + struct OutputFragments { + typename MatmulGradV::Mma::FragmentC gradV; + typename MatmulGradK::Mma::FragmentC gradK; + + CUTLASS_DEVICE void clear() + { + gradV.clear(); + gradK.clear(); + } + }; + + static bool __host__ check_supported(Params const& p) + { + CHECK_ALIGNED_PTR(p.query_ptr, kMinimumAlignment); + CHECK_ALIGNED_PTR(p.key_ptr, kMinimumAlignment); + CHECK_ALIGNED_PTR(p.value_ptr, kMinimumAlignment); + CHECK_ALIGNED_PTR(p.output_ptr, kMinimumAlignment); + CHECK_ALIGNED_PTR(p.grad_output_ptr, kMinimumAlignment); + EVOFORMER_CHECK(p.lse_strideH % 8 == 0, "LSE is not correctly aligned"); + EVOFORMER_CHECK(p.lse_strideB % 8 == 0, "LSE is not correctly aligned"); + EVOFORMER_CHECK(p.num_heads <= 1 || p.q_strideH % kMinimumAlignment == 0, + "query is not correctly aligned (strideH)"); + EVOFORMER_CHECK(p.num_heads <= 1 || p.k_strideH % kMinimumAlignment == 0, + "key is not correctly aligned (strideH)"); + EVOFORMER_CHECK(p.num_heads <= 1 || p.v_strideH % kMinimumAlignment == 0, + "value is not correctly aligned (strideH)"); + EVOFORMER_CHECK(p.num_batches <= 1 || p.q_strideB % kMinimumAlignment == 0, + "query is not correctly aligned (strideB)"); + EVOFORMER_CHECK(p.num_batches <= 1 || p.k_strideB % kMinimumAlignment == 0, + "key is not correctly aligned (strideB)"); + EVOFORMER_CHECK(p.num_batches <= 1 || p.v_strideB % kMinimumAlignment == 0, + "value is not correctly aligned (strideB)"); + EVOFORMER_CHECK(p.q_strideM % kMinimumAlignment == 0, + "query is not correctly aligned (strideM)"); + EVOFORMER_CHECK(p.k_strideM % kMinimumAlignment == 0, + "key is not correctly aligned (strideM)"); + EVOFORMER_CHECK(p.v_strideM % kMinimumAlignment == 0, + "value is not correctly aligned (strideM)"); + EVOFORMER_CHECK(p.dropout_prob <= 1.0f && p.dropout_prob >= 0.0f, + "Invalid value for `dropout_prob`"); + EVOFORMER_CHECK(kApplyDropout || p.dropout_prob == 0.0f, + "Set `kApplyDropout`=True to support `dropout_prob > 0`"); + EVOFORMER_CHECK(p.head_dim > 0, "Invalid value for `head_dim`"); + EVOFORMER_CHECK(p.head_dim_value > 0, "Invalid value for `head_dim_value`"); + EVOFORMER_CHECK(p.num_queries > 0, "Invalid value for `num_queries`"); + EVOFORMER_CHECK(p.num_keys > 0, "Invalid value for `num_keys`"); + EVOFORMER_CHECK(p.num_heads > 0, "Invalid value for `num_heads`"); + EVOFORMER_CHECK(p.num_batches > 0, "Invalid value for `num_batches`"); + EVOFORMER_CHECK(p.head_dim <= kMaxK, "kMaxK: Expected `head_dim < kMaxK`"); + EVOFORMER_CHECK(p.head_dim_value <= kMaxK, "kMaxK: Expected `head_dim_value < kMaxK`"); + return true; + } + + static CUTLASS_DEVICE void attention_kernel(Params p) + { + extern __shared__ char smem_buffer[]; + SharedStorage& shared_storage = *((SharedStorage*)smem_buffer); + + uint16_t thread_id = threadIdx.x; + uint8_t warp_id = warp_uniform(thread_id / 32); + uint8_t lane_id = thread_id % 32; + + if (kPrologueQK) { + prologueQkNextIteration(shared_storage, p, 0, 0, warp_id, lane_id); + } + + // Computes (dO*out).sum(-1) and writes it to `p.delta_ptr` + if (kKernelComputesDelta) { + constexpr int kOptimalElements = 128 / cutlass::sizeof_bits::value; + if (p.head_dim_value % kOptimalElements == 0) { + for (int query_start = 0; query_start < p.num_queries; query_start += kBlockSizeI) { + computeDelta(p, query_start, warp_id, lane_id); + } + } else { + for (int query_start = 0; query_start < p.num_queries; query_start += kBlockSizeI) { + computeDelta<1>(p, query_start, warp_id, lane_id); + } + } + __syncthreads(); + } + + OutputFragments output_frags; + + int32_t key_start = 0; + int32_t key_end = p.num_keys / kBlockSizeJ * kBlockSizeJ; + for (; key_start < key_end; key_start += kBlockSizeJ) { + output_frags.clear(); + int32_t query_start = getQueryStart(p, key_start); + int32_t query_end = + query_start + (p.num_queries - query_start) / kBlockSizeI * kBlockSizeI; + for (; query_start < query_end; query_start += kBlockSizeI) { + processBlockIJ( + shared_storage, output_frags, p, query_start, key_start, warp_id, lane_id); + } + // last (partial) query + if (query_start < p.num_queries) { + processBlockIJ( + shared_storage, output_frags, p, query_start, key_start, warp_id, lane_id); + } + if (kOutputInRF) { + writeFragsToGmem( + shared_storage, output_frags, p, key_start, warp_id, lane_id); + } else if (getQueryStart(p, key_start) >= p.num_queries) { + zfillGradKV(p, key_start, warp_id, lane_id); + } + __syncthreads(); + } + // Last (partial) key + if (key_start != p.num_keys) { + output_frags.clear(); + int32_t query_start = getQueryStart(p, key_start); + for (; query_start < p.num_queries; query_start += kBlockSizeI) { + warp_id = warp_uniform(warp_id); + processBlockIJ( + shared_storage, output_frags, p, query_start, key_start, warp_id, lane_id); + } + if (kOutputInRF) { + writeFragsToGmem( + shared_storage, output_frags, p, key_start, warp_id, lane_id); + } else if (getQueryStart(p, key_start) >= p.num_queries) { + zfillGradKV(p, key_start, warp_id, lane_id); + } + } + } + + static CUTLASS_DEVICE void loadDi(cutlass::Array& di, + Params const& p, + int32_t query_start) + { + int32_t thread_id = threadIdx.x + threadIdx.y * blockDim.x; + if (thread_id < kBlockSizeI) { + accum_t di_rf = accum_t(0); + if (query_start + thread_id < p.num_queries) { + di_rf = p.delta_ptr[query_start + thread_id]; + } + di[thread_id] = di_rf; + } + } + + template + static CUTLASS_DEVICE void zfillGradKV(Params const& p, + int32_t key_start, + uint8_t warp_id, + uint8_t lane_id) + { + constexpr int kThreadsPerKey = 8; + constexpr int kParallelKeys = kNumThreads / kThreadsPerKey; + static_assert(kBlockSizeJ % kParallelKeys == 0, ""); + // This function is not really optimized, but should rarely be used + // It's only used when some keys are "useless" and don't attend to + // any query, due to causal masking + int thread_id = 32 * warp_id + lane_id; + int k_shift = lane_id % kThreadsPerKey; + + CUTLASS_PRAGMA_UNROLL + for (int j = 0; j < kBlockSizeJ; j += kParallelKeys) { + int key = key_start + j + (thread_id / kThreadsPerKey); + if (!skipBoundsChecks && key >= p.num_keys) { continue; } + auto gv_ptr = p.grad_value_ptr + key * p.gV_strideM(); + auto gk_ptr = p.grad_key_ptr + key * p.gK_strideM(); + + for (int k = k_shift; k < p.head_dim_value; k += kThreadsPerKey) { + gv_ptr[k] = scalar_t(0); + } + for (int k = k_shift; k < p.head_dim; k += kThreadsPerKey) { gk_ptr[k] = scalar_t(0); } + } + } + + template + static CUTLASS_DEVICE void processBlockIJ(SharedStorage& shared_storage, + OutputFragments& output_frags, + Params& p, + int32_t query_start, + int32_t key_start, + uint8_t warp_id, + uint8_t lane_id) + { + cutlass::MatrixCoord no_offset{0, 0}; + accum_t scale = p.scale; + int16_t thread_id = 32 * warp_id + lane_id; + auto rematerializeThreadIds = [&]() { + // Prevents `nvcc` from keeping values deduced from + // `thread_id`, `warp_id`, ... in RF - to reduce register pressure + warp_id = warp_uniform(thread_id / 32); + lane_id = thread_id % 32; + thread_id = 32 * warp_id + lane_id; + }; + + bool isFirstQuery = (query_start == getQueryStart(p, key_start)); + int32_t next_query, next_key; + incrIteration(p, query_start, key_start, next_query, next_key); + bool isLastQuery = next_key != key_start; + __syncthreads(); + loadDi(shared_storage.di(), p, query_start); + + int32_t num_queries_in_block = + skipBoundsChecks ? MatmulQK::Mma::Shape::kN + : warp_uniform(cutlass::fast_min((int32_t)MatmulQK::Mma::Shape::kN, + p.num_queries - query_start)); + int32_t num_keys_in_block = + skipBoundsChecks ? MatmulQK::Mma::Shape::kM + : warp_uniform(cutlass::fast_min((int32_t)MatmulQK::Mma::Shape::kM, + p.num_keys - key_start)); + + auto prologueGradV = [&](int col) { + typename MatmulGradV::Mma::IteratorB iterator_dO( + {int32_t(p.gO_strideM)}, + p.grad_output_ptr + query_start * p.gO_strideM + col, + {num_queries_in_block, p.head_dim_value - col}, + thread_id, + no_offset); + MatmulGradV::Mma::prologue( + shared_storage.mm_gradV(), iterator_dO, thread_id, num_queries_in_block); + }; + auto prologueGradQ = [&](int col) { + typename MatmulGradQ::Mma::IteratorB iterator_K( + {int32_t(p.k_strideM)}, + p.key_ptr + key_start * p.k_strideM + col, + {num_keys_in_block, p.head_dim - col}, + thread_id, + no_offset); + MatmulGradQ::Mma::prologue( + shared_storage.mm_gradQ(), iterator_K, thread_id, num_keys_in_block); + }; + auto prologueGradK = [&](int col) { + typename MatmulGradK::Mma::IteratorB iterator_Q( + {int32_t(p.q_strideM)}, + p.query_ptr + query_start * p.q_strideM + col, + {num_queries_in_block, p.head_dim - col}, + thread_id, + no_offset); + MatmulGradK::Mma::prologue( + shared_storage.mm_gradK(), iterator_Q, thread_id, num_queries_in_block); + }; + auto prologueDOV = [&]() { + typename MatmulDOIVJ::Mma::IteratorA iterator_A( + {int32_t(p.gO_strideM)}, + p.grad_output_ptr + query_start * p.gO_strideM, + {num_queries_in_block, p.head_dim_value}, + thread_id, + no_offset); + typename MatmulDOIVJ::Mma::IteratorB iterator_B({int32_t(p.v_strideM)}, + p.value_ptr + key_start * p.v_strideM, + {p.head_dim_value, num_keys_in_block}, + thread_id, + no_offset); + MatmulDOIVJ::Mma::prologue( + shared_storage.mm_doivj(), iterator_A, iterator_B, thread_id, p.head_dim_value); + }; + + ///////////////////////////////////////////////////////////////////////////////////////////////// + // MatmulQK + ///////////////////////////////////////////////////////////////////////////////////////////////// + { + using Mma = typename MatmulQK::Mma; + + cutlass::gemm::GemmCoord problem_size(num_keys_in_block, + num_queries_in_block, + p.head_dim // k + ); + + // k_j + typename Mma::IteratorA iterator_A({int32_t(p.k_strideM)}, + p.key_ptr + key_start * p.k_strideM, + {problem_size.m(), problem_size.k()}, + thread_id, + no_offset); + + // q_i.transpose(-2, -1) + typename Mma::IteratorB iterator_B({int32_t(p.q_strideM)}, + p.query_ptr + query_start * p.q_strideM, + {problem_size.k(), problem_size.n()}, + thread_id, + no_offset); + + Mma mma( + shared_storage.mm_qk_k(), shared_storage.mm_qk_q(), thread_id, warp_id, lane_id); + + typename Mma::FragmentC accum; + + accum.clear(); + + auto gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma.set_prologue_done(kPrologueQK); + mma.set_zero_outside_bounds(!skipBoundsChecks); + mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum); + + // Epilogue: add LSE + exp and store that to our shared memory buffer + // shmem <- (matmul_result - + // logsumexp[i_start:i_end].unsqueeze(1)).exp() + int warp_idx_mn_0 = warp_id % (Mma::Base::WarpCount::kM * Mma::Base::WarpCount::kN); + auto output_tile_coords = cutlass::MatrixCoord{ + warp_idx_mn_0 % Mma::Base::WarpCount::kM, warp_idx_mn_0 / Mma::Base::WarpCount::kM}; + + if (broadcast_1::kEnable || broadcast_2::kEnable) { + cutlass::TensorRef bias_tensor_ref( + shared_storage.bias().data(), + cutlass::layout::RowMajor(MatmulQK::ThreadblockShape::kM)); + using Shape = cutlass::MatrixShape; + AttentionBiasEpilogue + bias_epilogue; + bias_epilogue(bias_tensor_ref, + p.bias1_ptr + key_start, + p.bias2_ptr + query_start * p.num_keys + key_start, + thread_id, + {num_queries_in_block, num_keys_in_block}, + p.num_keys); + // Pij += Bij, Pij is in register fragment and Bij is in shared memory + auto lane_offset = MatmulQK::AccumLambdaIterator::get_lane_offset( + lane_id, warp_id, output_tile_coords); + MatmulQK::AccumLambdaIterator::iterateRows( + lane_offset, + [&](int accum_n) {}, + [&](int accum_m, int accum_n, int idx) { + // remember we are transposed + accum[idx] = accum[idx] * scale + bias_tensor_ref.at({accum_n, accum_m}); + }, + [&](int accum_n) {}); + } else { + accum = cutlass::multiplies()(scale, accum); + } + + __syncthreads(); + if (kPrologueGV) { prologueGradV(0); } + if (kPrologueDOV) { prologueDOV(); } + + MatmulQK::B2bGemm::accumApplyLSEToSmem(shared_storage.attn_shared_storage(), + accum, + p.logsumexp_ptr + query_start, + problem_size.n(), + thread_id, + warp_id, + lane_id, + output_tile_coords); + + __syncthreads(); + } + rematerializeThreadIds(); + + ///////////////////////////////////////////////////////////////////////////////////////////////// + // GradV matmul + // + // grad_v[j_start:j_end] += attn_T @ do_i + ///////////////////////////////////////////////////////////////////////////////////////////////// + constexpr bool kSingleIterationGradV = kMaxK <= MatmulGradV::ThreadblockShape::kN; + for (int col = 0; col < (kSingleIterationGradV ? 1 : p.head_dim_value); + col += MatmulGradV::ThreadblockShape::kN) { + using Mma = typename MatmulGradV::Mma; + using AccumTileGmem = typename MatmulGradQ::AccumTileGmem; + + cutlass::gemm::GemmCoord problem_size( + num_keys_in_block, p.head_dim_value - col, num_queries_in_block); + auto createEpilogueIter = [&]() { + return typename MatmulGradV::OutputTileIterator( + typename MatmulGradV::OutputTileIterator::Params{p.gV_strideM()}, + p.grad_value_ptr + key_start * p.gV_strideM() + col, + {num_keys_in_block, p.head_dim_value - col}, + thread_id); + }; + typename Mma::IteratorB iterator_B({int32_t(p.gO_strideM)}, + p.grad_output_ptr + query_start * p.gO_strideM + col, + {num_queries_in_block, p.head_dim_value - col}, + thread_id, + no_offset); + + // if dropout: dVj += (Pij.T * Zij) @ dOi + // otherwise: dVj += Pij.T @ dOi + Mma mma(shared_storage.mm_gradV(), + // operand A: Pij + typename MatmulGradV::WarpIteratorA( + shared_storage.attn_shared_storage().accum_ref(), lane_id), + // if we're using dropout, operand A is Pij_dropped = Pij * Zij + // which is computed on the fly as fragments of Pij are loaded in + typename Mma::WarpIteratorAScale(shared_storage.zij().accum_ref(), lane_id), + thread_id, + warp_id, + lane_id); + + int storage_id = col / MatmulGradV::ThreadblockShape::kN; + AccumTileGmem gmem_tile{p.workspace_gv + storage_id * AccumTileGmem::kElementsStored}; + if (!kOutputInRF) { + if (isFirstQuery || !kNeedsAccumGradV) { + output_frags.gradV.clear(); + } else { + gmem_tile.load(output_frags.gradV, thread_id); + } + } + mma.set_prologue_done(kPrologueGV); + + auto gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + __syncthreads(); + + mma(gemm_k_iterations, output_frags.gradV, iterator_B, output_frags.gradV); + __syncthreads(); + if (kPrologueGV && !kSingleIterationGradV && + col + MatmulGradV::ThreadblockShape::kN < p.head_dim_value) { + prologueGradV(col + MatmulGradV::ThreadblockShape::kN); + } + + if (!kOutputInRF) { + if (kNeedsAccumGradV && !isLastQuery) { + gmem_tile.store(output_frags.gradV, thread_id); + } else { + accumulateInGmem(shared_storage.gradV_epilogue(), + output_frags.gradV, + createEpilogueIter(), + isFirstQuery || kNeedsAccumGradV, + warp_id, + lane_id); + } + } + } + __syncthreads(); + ///////////////////////////////////////////////////////////////////////////////////////////////// + // MatmulDOIVJ + ///////////////////////////////////////////////////////////////////////////////////////////////// + { + using Mma = typename MatmulDOIVJ::Mma; + // do_i + typename Mma::IteratorA iterator_A({int32_t(p.gO_strideM)}, + p.grad_output_ptr + query_start * p.gO_strideM, + {num_queries_in_block, p.head_dim_value}, + thread_id, + no_offset); + + // v_j.transpose(-2, -1) + typename Mma::IteratorB iterator_B({int32_t(p.v_strideM)}, + p.value_ptr + key_start * p.v_strideM, + {p.head_dim_value, num_keys_in_block}, + thread_id, + no_offset); + + Mma mma(shared_storage.mm_doivj(), thread_id, warp_id, lane_id); + mma.set_prologue_done(kPrologueDOV); + mma.set_zero_outside_bounds(!skipBoundsChecks); + + typename Mma::FragmentC accum; + + accum.clear(); + + auto gemm_k_iterations = (p.head_dim_value + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum); + __syncthreads(); + if (kPrologueGQ) { prologueGradQ(0); } + if (kPrologueGK) { prologueGradK(0); } + + int warp_idx_mn_0 = warp_id % (Mma::Base::WarpCount::kM * Mma::Base::WarpCount::kN); + auto output_tile_coords = cutlass::MatrixCoord{ + warp_idx_mn_0 % Mma::Base::WarpCount::kM, warp_idx_mn_0 / Mma::Base::WarpCount::kM}; + // TODO: This must be terribly inefficient. There must be a better way + // tmp [RF] <- (accum [RF] - Di [smem] ) * attn_T.T [smem] + // attn_shared_storage [smem] <- tmp.T + // tmp_shared_storage [smem] <- tmp + { + using LambdaIterator = + typename DefaultMmaAccumLambdaIterator::Iterator; + auto lane_offset = + LambdaIterator::get_lane_offset(lane_id, warp_id, output_tile_coords); + + auto attn_T = shared_storage.attn_shared_storage().accum_ref(); + accum_t current_di; + // dSij = (dPij - Di) * Pij + LambdaIterator::iterateRows( + lane_offset, + [&](int accum_m) { current_di = shared_storage.di()[accum_m]; }, + [&](int accum_m, int accum_n, int idx) { + if (skipBoundsChecks || + (accum_m < num_queries_in_block && accum_n < num_keys_in_block)) { + accum_t attn = attn_T.at({accum_n, accum_m}); + accum[idx] = (accum[idx] - current_di) * attn; + } else { + accum[idx] = 0; + } + }, + [&](int accum_m) { + + }); + + using DefaultGemm = typename MatmulDOIVJ::DefaultGemm; + using OutputOp = typename MatmulDOIVJ::BiasGradEpilogueOutputOp; + if (broadcast_1::kEnable && p.grad_bias1_ptr) { + using Epilogue = + typename BiasGradEpilogueAffineRankN::Epilogue; + cutlass::layout::AffineRankN<2> layout({0, 1}); + auto dst_ptr = p.grad_bias1_ptr + key_start; + typename Epilogue::OutputTileIterator output_iter( + {layout}, + dst_ptr, + {num_queries_in_block, num_keys_in_block}, + (int)thread_id); + Epilogue epilogue(shared_storage.gradB_epilogue(), + (int)thread_id, + (int)warp_id, + (int)lane_id); + epilogue(OutputOp(1), output_iter, accum); + } + + if (broadcast_2::kEnable && p.grad_bias2_ptr) { + if (broadcast_1::kEnable) { __syncthreads(); } + using Epilogue = + typename BiasGradEpilogue::Epilogue; + typename Epilogue::OutputTileIterator::Params params{p.num_keys}; + auto dst_ptr = p.grad_bias2_ptr + query_start * p.num_keys + key_start; + typename Epilogue::OutputTileIterator output_iter( + params, dst_ptr, {num_queries_in_block, num_keys_in_block}, (int)thread_id); + Epilogue epilogue(shared_storage.gradB_epilogue(), + (int)thread_id, + (int)warp_id, + (int)lane_id); + epilogue(OutputOp(1), output_iter, accum); + } + + accum = accum * scale; + + __syncthreads(); + if (!MatmulGradK::DefaultMmaFromSmem::kIsTransposedA) { + auto tmpT = shared_storage.tmpT_shared_storage().accum_ref(); + // attn <- attn_T.T + LambdaIterator::iterateRows( + lane_offset, + [&](int accum_m) {}, + [&](int accum_m, int accum_n, int idx) { + tmpT.at({accum_n, accum_m}) = scalar_t(accum[idx]); + }, + [&](int accum_m) {}); + } + } + + MatmulDOIVJ::B2bGemm::accumToSmem( + shared_storage.tmp_shared_storage(), accum, lane_id, output_tile_coords); + __syncthreads(); + } + p.head_dim = warp_uniform(p.head_dim); + p.k_strideM = warp_uniform(p.k_strideM); + rematerializeThreadIds(); + ///////////////////////////////////////////////////////////////////////////////////////////////// + // GradQ matmul + // + // grad_q[i_start:i_end] += tmp @ k_j + ///////////////////////////////////////////////////////////////////////////////////////////////// + // Skip the loop & associated branches if we know at compile time the number + // of iterations + constexpr bool kSingleIterationGradQ = kMaxK <= MatmulGradQ::ThreadblockShape::kN; + for (int col = 0; col < (kSingleIterationGradQ ? 1 : p.head_dim); + col += MatmulGradQ::ThreadblockShape::kN) { + using Mma = typename MatmulGradQ::Mma; + using AccumTileGmem = typename MatmulGradQ::AccumTileGmem; + + cutlass::gemm::GemmCoord problem_size( + num_queries_in_block, + false ? MatmulGradQ::ThreadblockShape::kN : p.head_dim - col, + num_keys_in_block); + + // k_j + typename Mma::IteratorB iterator_B({int32_t(p.k_strideM)}, + p.key_ptr + key_start * p.k_strideM + col, + {problem_size.k(), problem_size.n()}, + thread_id, + no_offset); + + auto a = shared_storage.tmp_shared_storage().accum_ref(); + Mma mma(shared_storage.mm_gradQ(), + shared_storage.tmp_shared_storage(), + thread_id, + warp_id, + lane_id, + problem_size.k()); + + typename Mma::FragmentC accum; + + bool isFirst = key_start == 0; + int col_id = col / MatmulGradQ::ThreadblockShape::kN; + int num_cols = + kSingleIterationGradQ ? 1 : ceil_div(p.head_dim, MatmulGradQ::ThreadblockShape::kN); + int storage_id = (col_id + query_start / kBlockSizeI * num_cols); + AccumTileGmem gmem_tile{p.workspace_gq + storage_id * AccumTileGmem::kElementsStored}; + if (isFirst || !kNeedsAccumGradQ) { + accum.clear(); + } else { + gmem_tile.load(accum, thread_id); + } + + auto gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + __syncthreads(); + mma.set_prologue_done(kPrologueGQ); + mma(gemm_k_iterations, accum, iterator_B, accum); + __syncthreads(); + bool isLastColumn = kSingleIterationGradQ || + (col + MatmulGradQ::ThreadblockShape::kN >= p.head_dim); + if (kPrologueGQ && !isLastColumn) { + prologueGradQ(col + MatmulGradQ::ThreadblockShape::kN); + } + + // Output results + int32_t next_query, next_key; + incrIteration(p, p.num_queries, key_start, next_query, next_key); + bool isLast = next_query > query_start || next_key >= p.num_keys; + if (kNeedsAccumGradQ && !isLast) { + gmem_tile.store(accum, thread_id); + } else { + typename MatmulGradQ::OutputTileIterator output_it( + typename MatmulGradQ::OutputTileIterator::Params{p.gQ_strideM()}, + p.grad_query_ptr + query_start * p.gQ_strideM() + col, + {problem_size.m(), problem_size.n()}, + thread_id); + accumulateInGmem(isLastColumn + ? shared_storage.gradQ_epilogue_lastIter() + : shared_storage.gradQ_epilogue(), + accum, + output_it, + isFirst || kNeedsAccumGradQ, + warp_id, + lane_id); + } + } + ///////////////////////////////////////////////////////////////////////////////////////////////// + // GradK matmul + // + // grad_k[i_start:i_end] += tmp.transpose(-2, -1) @ q_i + ///////////////////////////////////////////////////////////////////////////////////////////////// + rematerializeThreadIds(); + + constexpr bool kSingleIterationGradK = kMaxK <= MatmulGradK::ThreadblockShape::kN; + for (int col = 0; col < (kSingleIterationGradK ? 1 : p.head_dim); + col += MatmulGradK::ThreadblockShape::kN) { + using Mma = typename MatmulGradK::Mma; + using AccumTileGmem = typename MatmulGradQ::AccumTileGmem; + + cutlass::gemm::GemmCoord problem_size( + num_keys_in_block, + false ? MatmulGradK::ThreadblockShape::kN : p.head_dim - col, + num_queries_in_block); + auto createEpilogueIter = [&]() { + return typename MatmulGradK::OutputTileIterator( + typename MatmulGradK::OutputTileIterator::Params{p.gK_strideM()}, + p.grad_key_ptr + key_start * p.gK_strideM() + col, + {num_keys_in_block, + false ? MatmulGradK::ThreadblockShape::kN : p.head_dim - col}, + thread_id); + }; + + // q_i + typename Mma::IteratorB iterator_B({int32_t(p.q_strideM)}, + p.query_ptr + query_start * p.q_strideM + col, + {problem_size.k(), problem_size.n()}, + thread_id, + no_offset); + + auto getTmp = [&](int) { return &shared_storage.tmp_shared_storage(); }; + auto getTmpT = [&](int) { return &shared_storage.tmpT_shared_storage(); }; + // this is basically: + // opA = kIsTransposedA ? getTmp() : getTmpT(); + bool constexpr kIsTransposedA = MatmulGradK::DefaultMmaFromSmem::kIsTransposedA; + auto& opA = + *call_conditional::apply( + getTmp, getTmpT, 0); + Mma mma(shared_storage.mm_gradK(), opA, thread_id, warp_id, lane_id, problem_size.k()); + + int storage_id = col / MatmulGradK::ThreadblockShape::kN; + AccumTileGmem gmem_tile{p.workspace_gk + storage_id * AccumTileGmem::kElementsStored}; + if (!kOutputInRF) { + if (isFirstQuery || !kNeedsAccumGradK) { + output_frags.gradK.clear(); + } else { + gmem_tile.load(output_frags.gradK, thread_id); + } + } + mma.set_prologue_done(kPrologueGK); + + auto gemm_k_iterations = (problem_size.k() + Mma::Shape::kK - 1) / Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + __syncthreads(); + + mma(gemm_k_iterations, output_frags.gradK, iterator_B, output_frags.gradK); + __syncthreads(); + bool isLastColumn = kSingleIterationGradK || + col + MatmulGradK::ThreadblockShape::kN >= p.head_dim; + if (kPrologueGK && !isLastColumn) { + prologueGradK(col + MatmulGradK::ThreadblockShape::kN); + } + + if (kPrologueQK && isLastColumn) { + int32_t next_query, next_key; + incrIteration(p, query_start, key_start, next_query, next_key); + DISPATCH_BOOL(next_key != key_start, kForceReloadK, ([&]() { + prologueQkNextIteration( + shared_storage, p, next_query, next_key, warp_id, lane_id); + })); + } + + // Output results + if (!kOutputInRF) { + if (kNeedsAccumGradK && !isLastQuery) { + gmem_tile.store(output_frags.gradK, thread_id); + } else { + accumulateInGmem(isLastColumn + ? shared_storage.gradK_epilogue_final() + : shared_storage.gradK_epilogue(), + output_frags.gradK, + createEpilogueIter(), + isFirstQuery || kNeedsAccumGradK, + warp_id, + lane_id); + __syncthreads(); + } + } + } + } + + static CUTLASS_DEVICE int32_t getQueryStart(Params const& p, int32_t key_start) { return 0; }; + + static CUTLASS_DEVICE void incrIteration(Params const& p, + int32_t query_start, + int32_t key_start, + int32_t& next_query, + int32_t& next_key) + { + next_query = query_start + kBlockSizeI; + next_key = key_start; + if (next_query >= p.num_queries) { + next_key = key_start + kBlockSizeJ; + next_query = getQueryStart(p, next_key); + } + } + + template + static CUTLASS_DEVICE void prologueQkNextIteration(SharedStorage& shared_storage, + Params const& p, + int32_t query_start, + int32_t key_start, + uint8_t warp_id, + uint8_t lane_id) + { + if (query_start >= p.num_queries || key_start >= p.num_keys) { return; } + + static constexpr bool kReloadK = kForceReloadK || !MatmulQK::Mma::kSmemContainsEntireMat; + int thread_id = 32 * warp_id + lane_id; + typename MatmulQK::Mma::IteratorA iterator_A({int32_t(p.k_strideM)}, + p.key_ptr + key_start * p.k_strideM, + {p.num_keys - key_start, p.head_dim}, + thread_id, + cutlass::MatrixCoord{0, 0}); + + typename MatmulQK::Mma::IteratorB iterator_B({int32_t(p.q_strideM)}, + p.query_ptr + query_start * p.q_strideM, + {p.head_dim, p.num_queries - query_start}, + thread_id, + cutlass::MatrixCoord{0, 0}); + + MatmulQK::Mma::prologue(shared_storage.mm_qk_k(), + shared_storage.mm_qk_q(), + iterator_A, + iterator_B, + thread_id, + p.head_dim); + } + + template + static CUTLASS_DEVICE void writeFragsToGmem(SharedStorage& shared_storage, + OutputFragments& output_frags, + Params const& p, + int32_t key_start, + uint8_t warp_id, + uint8_t lane_id) + { + uint16_t thread_id = 32 * warp_id + lane_id; + int32_t num_keys_in_block = + skipBoundsChecks + ? MatmulQK::Mma::Shape::kM + : cutlass::fast_min((int32_t)MatmulQK::Mma::Shape::kM, p.num_keys - key_start); + typename MatmulGradV::OutputTileIterator outputV_it( + typename MatmulGradV::OutputTileIterator::Params{p.gV_strideM()}, + p.grad_value_ptr + key_start * p.gV_strideM(), + {num_keys_in_block, p.head_dim_value}, + thread_id); + accumulateInGmem(shared_storage.gradV_epilogue_final(), + output_frags.gradV, + outputV_it, + true, + warp_id, + lane_id); + + typename MatmulGradK::OutputTileIterator outputK_it( + typename MatmulGradK::OutputTileIterator::Params{p.gK_strideM()}, + p.grad_key_ptr + key_start * p.gK_strideM(), + {num_keys_in_block, false ? MatmulGradK::ThreadblockShape::kN : p.head_dim}, + thread_id); + accumulateInGmem(shared_storage.gradK_epilogue_final(), + output_frags.gradK, + outputK_it, + true, + warp_id, + lane_id); + } + + template + static CUTLASS_DEVICE void accumulateInGmem( + typename MatmulT::DefaultEpilogue::SharedStorage& epilogue_smem, + typename MatmulT::Mma::FragmentC const& accum, + typename MatmulT::OutputTileIterator output_it, + bool first, + uint8_t warp_id, + uint8_t lane_id) + { + using DefaultEpilogue = typename MatmulT::DefaultEpilogue; + using DefaultOutputOp = typename MatmulT::DefaultOutputOp; + using Mma = typename MatmulT::Mma; + int thread_id = 32 * warp_id + lane_id; + DISPATCH_BOOL( + first, kIsFirst, ([&]() { + static constexpr auto ScaleType = + kIsFirst ? cutlass::epilogue::thread::ScaleType::Nothing + : cutlass::epilogue::thread::ScaleType::NoBetaScaling; + using EpilogueOutputOp = typename cutlass::epilogue::thread::LinearCombination< + typename DefaultOutputOp::ElementOutput, + DefaultOutputOp::kCount, + typename DefaultOutputOp::ElementAccumulator, + typename DefaultOutputOp::ElementCompute, + ScaleType>; + using Epilogue = typename cutlass::epilogue::threadblock::EpiloguePipelined< + typename DefaultEpilogue::Shape, + typename Mma::Operator, + DefaultEpilogue::kPartitionsK, + typename MatmulT::OutputTileIterator, + typename DefaultEpilogue::AccumulatorFragmentIterator, + typename DefaultEpilogue::WarpTileIterator, + typename DefaultEpilogue::SharedLoadIterator, + EpilogueOutputOp, + typename DefaultEpilogue::Padding, + DefaultEpilogue::kFragmentsPerIteration, + true // IterationsUnroll + >; + EpilogueOutputOp rescale({1, 1}); + Epilogue epilogue(epilogue_smem, thread_id, warp_id, lane_id); + epilogue(rescale, output_it, accum, output_it); + })); + } + + template + static CUTLASS_DEVICE void computeDelta(Params const& p, + int32_t query_start, + uint8_t warp_id, + uint8_t lane_id) + { + // Each thread computes one value for Delta + // Depending on warp configuration, we might have multiple + // threads of the same warp working on the same row + using AccessType = cutlass::Array; + static_assert(kNumThreads >= kBlockSizeI, ""); + static constexpr int kNumThreadsPerLine = kNumThreads / kBlockSizeI; + int16_t thread_id = 32 * warp_id + lane_id; + + int16_t laneFirstCol = kElementsPerAccess * (lane_id % kNumThreadsPerLine); + int16_t laneRow = thread_id / kNumThreadsPerLine; + bool rowPred = (query_start + laneRow) < p.num_queries; + bool pred = rowPred; + + // on windows, previous syntax __restrict__ AccessType* + // resulted in error: "restrict" is not allowed + const AccessType* __restrict__ grad_output_ptr = reinterpret_cast( + p.grad_output_ptr + (query_start + laneRow) * p.gO_strideM + laneFirstCol); + const AccessType* __restrict__ output_ptr = reinterpret_cast( + p.output_ptr + (query_start + laneRow) * p.o_strideM() + laneFirstCol); + + static constexpr int64_t kMaxIters = kMaxK / (kElementsPerAccess * kNumThreadsPerLine); + constexpr int kPipelineStages = 2; + accum_t delta_value = accum_t(0); + using GlobalLoad = cutlass::arch::global_load; + AccessType frag_grad_output[kPipelineStages]; + AccessType frag_output[kPipelineStages]; + + auto loadAndIncrement = [&](int ld_pos, bool is_valid) { + frag_grad_output[ld_pos].clear(); + frag_output[ld_pos].clear(); + GlobalLoad(frag_grad_output[ld_pos], grad_output_ptr, is_valid); + GlobalLoad(frag_output[ld_pos], output_ptr, is_valid); + grad_output_ptr += kNumThreadsPerLine; + output_ptr += kNumThreadsPerLine; + }; + + CUTLASS_PRAGMA_UNROLL + for (int iter = 0; iter < kPipelineStages - 1; ++iter) { + int ld_pos = iter % kPipelineStages; + pred = pred && (laneFirstCol + iter * kElementsPerAccess * kNumThreadsPerLine) < + p.head_dim_value; + loadAndIncrement(ld_pos, pred); + } + auto columnIteration = [&](int iter) { + // Load for next iter + int ld_pos = (iter + kPipelineStages - 1) % kPipelineStages; + pred = pred && (laneFirstCol + (iter + kPipelineStages - 1) * kElementsPerAccess * + kNumThreadsPerLine) < p.head_dim_value; + loadAndIncrement(ld_pos, pred); + CUTLASS_PRAGMA_UNROLL + for (int i = 0; i < AccessType::kElements; ++i) { + delta_value += accum_t(frag_output[iter % kPipelineStages][i]) * + accum_t(frag_grad_output[iter % kPipelineStages][i]); + } + }; + + // If we have a small lower-bound for K, we can unroll the loop + if (kMaxK <= 256) { + CUTLASS_PRAGMA_UNROLL + for (int iter = 0; iter < kMaxIters; ++iter) { columnIteration(iter); } + } else { + int num_iters = ceil_div(p.head_dim_value, kElementsPerAccess * kNumThreadsPerLine) * + (kElementsPerAccess * kNumThreadsPerLine); + for (int iter = 0; iter < num_iters; ++iter) { columnIteration(iter); } + } + + // Reduce between workers + static_assert(kNumThreadsPerLine == 1 || kNumThreadsPerLine == 2 || kNumThreadsPerLine == 4, + ""); + CUTLASS_PRAGMA_UNROLL + for (int i = 1; i < kNumThreadsPerLine; i *= 2) { + delta_value = delta_value + __shfl_xor_sync(0xffffffff, delta_value, i); + } + + // Store in gmem + if (rowPred) { p.delta_ptr[query_start + laneRow] = delta_value; } + } +}; + +template +__global__ void __launch_bounds__(AK::kNumThreads, AK::kMinBlocksPerSm) + attention_kernel_backward_batched_impl(typename AK::Params p) +{ + if (!p.advance_to_block()) { return; } + AK::attention_kernel(p); +} + +template +__global__ void __launch_bounds__(AK::kNumThreads, AK::kMinBlocksPerSm) + attention_kernel_backward_batched(typename AK::Params params); diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/kernel_forward.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/kernel_forward.h new file mode 100644 index 0000000000000000000000000000000000000000..e3b11ebcc6615156179c5f9047acb0d65688be7b --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/deepspeed4science/evoformer_attn/kernel_forward.h @@ -0,0 +1,986 @@ +/*************************************************************************************************** + * Copyright (c) 2017 - 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. + * SPDX-License-Identifier: BSD-3-Clause + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this + * list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * 3. Neither the name of the copyright holdvr nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + **************************************************************************************************/ + +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include + +#include "cutlass/bfloat16.h" +#include "cutlass/fast_math.h" +#include "cutlass/gemm/gemm.h" +#include "cutlass/layout/matrix.h" +#include "cutlass/layout/vector.h" +#include "cutlass/matrix.h" +#include "cutlass/numeric_types.h" +#include "cutlass/tensor_ref.h" + +#include "cutlass/epilogue/threadblock/default_epilogue_simt.h" +#include "cutlass/epilogue/threadblock/default_epilogue_tensor_op.h" +#include "cutlass/epilogue/threadblock/default_epilogue_volta_tensor_op.h" +#include "cutlass/gemm/device/default_gemm_configuration.h" +#include "cutlass/gemm/kernel/default_gemm.h" +#include "cutlass/gemm/threadblock/default_mma.h" +#include "cutlass/gemm/threadblock/default_mma_core_simt.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm70.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm75.h" +#include "cutlass/gemm/threadblock/default_mma_core_sm80.h" +#include "cutlass/gemm/threadblock/threadblock_swizzle.h" +#include "cutlass/matrix_shape.h" +#include "cutlass/platform/platform.h" +#include "cutlass/transform/threadblock/predicated_tile_iterator.h" +#include "epilogue/epilogue_pipelined.h" +#include "epilogue/epilogue_rescale_output.h" +#include "gemm/find_default_mma.h" +#include "gemm/mma_from_smem.h" +#include "gemm_kernel_utils.h" +#include "transform/bias_broadcast.h" +#include "transform/tile_smem_loader.h" + +#include + +using namespace gemm_kernel_utils; + +namespace { +template +constexpr int getWarpsPerSm() +{ + return (Arch::kMinComputeCapability >= 80 && !cutlass::platform::is_same::value + ? 16 + : 12); +} +static CUTLASS_DEVICE float atomicMaxFloat(float* addr, float value) +{ + // source: https://stackoverflow.com/a/51549250 + return (value >= 0) ? __int_as_float(atomicMax((int*)addr, __float_as_int(value))) + : __uint_as_float(atomicMin((unsigned int*)addr, __float_as_uint(value))); +} +} // namespace + +template < + // The datatype of Q/K/V + typename scalar_t_, + // Architecture we are targeting (eg `cutlass::arch::Sm80`) + typename ArchTag, + // If Q/K/V are correctly aligned in memory and we can run a fast kernel + bool isAligned_, + int kQueriesPerBlock, + int kKeysPerBlock_, + bool kSingleValueIteration_, // = `value.shape[-1] <= kKeysPerBlock` + // This is quite slower on V100 for some reason + // Set to false if you know at compile-time you will never need dropout + bool kSupportsBias_ = false, + template class Broadcast1_ = BroadcastNoLoad, + template class Broadcast2_ = BroadcastNoLoad> +struct AttentionKernel { + using scalar_t = scalar_t_; + using accum_t = float; + using lse_scalar_t = float; + using output_t = scalar_t; + // Accumulator between 2 iterations + // Using `accum_t` improves perf on f16 at the cost of + // numerical errors + using output_accum_t = accum_t; + static constexpr bool kSupportsBias = kSupportsBias_; + static constexpr int kKeysPerBlock = kKeysPerBlock_; + static constexpr bool kIsAligned = isAligned_; + static constexpr bool kSingleValueIteration = kSingleValueIteration_; + static constexpr int32_t kAlignLSE = 32; // block size of backward + static constexpr bool kPreloadV = + ArchTag::kMinComputeCapability >= 80 && cutlass::sizeof_bits::value == 16; + static constexpr bool kKeepOutputInRF = kSingleValueIteration; + static constexpr bool kNeedsOutputAccumulatorBuffer = + !kKeepOutputInRF && !cutlass::platform::is_same::value; + + static_assert(kQueriesPerBlock % 32 == 0, ""); + static_assert(kKeysPerBlock % 32 == 0, ""); + static constexpr int kNumWarpsPerBlock = kQueriesPerBlock * kKeysPerBlock / (32 * 32); + static constexpr int kWarpSize = 32; + + // Launch bounds + static constexpr int kNumThreads = kWarpSize * kNumWarpsPerBlock; + static constexpr int kMinBlocksPerSm = getWarpsPerSm() / kNumWarpsPerBlock; + + struct Params { + // Input tensors + scalar_t* query_ptr; // [num_queries, num_heads, head_dim] + scalar_t* key_ptr; // [num_keys, num_heads, head_dim] + scalar_t* value_ptr; // [num_keys, num_heads, head_dim_value] + + // Output tensors + output_t* output_ptr; // [num_queries, num_heads, head_dim_value] + output_accum_t* output_accum_ptr; // [num_queries, num_heads, head_dim_value] + lse_scalar_t* logsumexp_ptr; // [num_heads, num_queries] - can be null + + // Scale + accum_t scale; + + // Dimensions/strides + int32_t head_dim; + int32_t head_dim_value; + int32_t num_queries; + int32_t num_keys; + + int32_t q_strideM; + int32_t k_strideM; + int32_t v_strideM; + // int32_t bias_strideM = 0; + + int32_t o_strideM = 0; + + // Everything below is only used in `advance_to_block` + // and shouldn't use registers + int32_t q_strideH; + int32_t k_strideH; + int32_t v_strideH; + // int32_t bias_strideH = 0; + + int64_t q_strideB; + int64_t k_strideB; + int64_t v_strideB; + // int32_t bias_strideB = 0; + + int32_t num_batches; + int32_t num_heads; + + // Parameters for biases + scalar_t* bias1_ptr = nullptr; + scalar_t* bias2_ptr = nullptr; + int32_t B = 0; + int32_t N = 0; + + // Moves pointers to what we should process + // Returns "false" if there is no work to do + CUTLASS_DEVICE bool advance_to_block() + { + auto batch_id = blockIdx.z; + auto head_id = blockIdx.y; + auto query_start = blockIdx.x * kQueriesPerBlock; + + auto lse_dim = ceil_div((int32_t)num_queries, kAlignLSE) * kAlignLSE; + + query_ptr += batch_id * q_strideB; + key_ptr += batch_id * k_strideB; + value_ptr += batch_id * v_strideB; + output_ptr += int64_t(batch_id * num_queries) * o_strideM; + if (output_accum_ptr != nullptr) { + output_accum_ptr += int64_t(batch_id * num_queries) * (head_dim_value * num_heads); + } + + int64_t q_start = 0, k_start = 0; + // Advance to the current batch / head / query_start + query_ptr += (q_start + query_start) * q_strideM + head_id * q_strideH; + key_ptr += k_start * k_strideM + head_id * k_strideH; + + value_ptr += k_start * v_strideM + head_id * v_strideH; + output_ptr += int64_t(q_start + query_start) * o_strideM + head_id * head_dim_value; + + if (output_accum_ptr != nullptr) { + output_accum_ptr += int64_t(q_start + query_start) * (head_dim_value * num_heads) + + head_id * head_dim_value; + } else { + // Accumulate directly in the destination buffer (eg for f32) + output_accum_ptr = (accum_t*)output_ptr; + } + + if (logsumexp_ptr != nullptr) { + // lse[batch_id, head_id, query_start] + logsumexp_ptr += batch_id * lse_dim * num_heads + head_id * lse_dim + query_start; + } + + using broadcast_1 = Broadcast1_; + if (kSupportsBias && broadcast_1::kEnable && bias1_ptr) { + bias1_ptr = broadcast_1::advance(bias1_ptr, + batch_id / N, + batch_id % N, + head_id, + num_queries * N, + num_queries, + 0); + } + using broadcast_2 = Broadcast2_; + if (kSupportsBias && broadcast_2::kEnable && bias2_ptr) { + auto strideB = num_heads * num_queries * num_keys; + auto strideH = num_queries * num_keys; + bias2_ptr = broadcast_2::advance( + bias2_ptr, batch_id / N, batch_id % N, head_id, strideB, 0, strideH); + } + + num_queries -= query_start; + num_batches = 0; // no longer used after + + // If num_queries == 1, and there is only one key head we're wasting + // 15/16th of tensor core compute In that case : + // - we only launch kernels for head_id % kQueriesPerBlock == 0 + // - we iterate over heads instead of queries (strideM = strideH) + if (num_queries == 1 && k_strideH == 0 && v_strideH == 0) { + if (head_id % kQueriesPerBlock != 0) return false; + q_strideM = q_strideH; + num_queries = num_heads; + num_heads = 1; // unused but here for intent + o_strideM = head_dim_value; + } + + // Make sure the compiler knows these variables are the same on all + // the threads of the warp. + query_ptr = warp_uniform(query_ptr); + key_ptr = warp_uniform(key_ptr); + value_ptr = warp_uniform(value_ptr); + output_ptr = warp_uniform(output_ptr); + output_accum_ptr = warp_uniform(output_accum_ptr); + logsumexp_ptr = warp_uniform(logsumexp_ptr); + num_queries = warp_uniform(num_queries); + num_keys = warp_uniform(num_keys); + num_heads = warp_uniform(num_heads); + head_dim = warp_uniform(head_dim); + head_dim_value = warp_uniform(head_dim_value); + o_strideM = warp_uniform(o_strideM); + if (kSupportsBias && broadcast_1::kEnable) { bias1_ptr = warp_uniform(bias1_ptr); } + if (kSupportsBias && broadcast_2::kEnable) { bias2_ptr = warp_uniform(bias2_ptr); } + return true; + } + + __host__ dim3 getBlocksGrid() const + { + return dim3(ceil_div(num_queries, (int32_t)kQueriesPerBlock), num_heads, num_batches); + } + + __host__ dim3 getThreadsGrid() const { return dim3(kWarpSize, kNumWarpsPerBlock, 1); } + }; + + struct MM0 { + /* + In this first matmul, we compute a block of `Q @ K.T`. + While the calculation result is still hot in registers, we update + `mi`, `m_prime`, `s_prime` in shared-memory, and then store this value + into a shared-memory ("AccumulatorSharedStorage") that is used later as + operand A for the second matmul (see MM1) + */ + using GemmType = DefaultGemmType; + + using OpClass = typename GemmType::OpClass; + using DefaultConfig = + typename cutlass::gemm::device::DefaultGemmConfiguration; + static constexpr int kAlignmentA = kIsAligned ? DefaultConfig::kAlignmentA + : GemmType::kMinimumAlignment; + static constexpr int kAlignmentB = kIsAligned ? DefaultConfig::kAlignmentB + : GemmType::kMinimumAlignment; + using ThreadblockShape = + cutlass::gemm::GemmShape; + using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>; + using DefaultMma = typename cutlass::gemm::threadblock::FindDefaultMma< + scalar_t, // ElementA, + cutlass::layout::RowMajor, // LayoutA, + kAlignmentA, + scalar_t, // ElementB, + cutlass::layout::ColumnMajor, // LayoutB, + kAlignmentB, + accum_t, + cutlass::layout::RowMajor, // LayoutC, + OpClass, + ArchTag, // ArchTag + ThreadblockShape, // ThreadblockShape + WarpShape, // WarpShape + typename GemmType::InstructionShape, // InstructionShape + DefaultConfig::kStages, // Should use `DefaultConfig::kStages`, but that + // uses too much smem + typename GemmType::Operator // Operator + >::DefaultMma; + using MmaCore = typename DefaultMma::MmaCore; + using IteratorA = typename DefaultMma::IteratorA; + using IteratorB = typename DefaultMma::IteratorB; + using Mma = typename DefaultMma::ThreadblockMma; + using AccumLambdaIterator = + typename DefaultMmaAccumLambdaIterator::Iterator; + static_assert(MmaCore::WarpCount::kM * MmaCore::WarpCount::kN * MmaCore::WarpCount::kK == + kNumWarpsPerBlock, + ""); + + // used for efficient load of bias tile Bij from global to shared memory + using BiasLoader = + TileSmemLoader, + MmaCore::kThreads, + // input restriction: kv_len has to be a multiple of this value + 128 / cutlass::sizeof_bits::value>; + + // Epilogue to store to shared-memory in a format that we can use later for + // the second matmul + using B2bGemm = + typename cutlass::gemm::threadblock::B2bGemm; + using AccumulatorSharedStorage = typename B2bGemm::AccumulatorSharedStorage; + }; + + struct MM1 { + /** + Second matmul: perform `attn @ V` where `attn` is the attention (not + normalized) and stored in shared memory + */ + using GemmType = DefaultGemmType; + + using OpClass = typename GemmType::OpClass; + using DefaultConfig = + typename cutlass::gemm::device::DefaultGemmConfiguration; + static constexpr int kAlignmentA = DefaultConfig::kAlignmentA; // from smem + static constexpr int kAlignmentB = kIsAligned ? DefaultConfig::kAlignmentB + : GemmType::kMinimumAlignment; + using ThreadblockShape = + cutlass::gemm::GemmShape; + using WarpShape = cutlass::gemm::GemmShape<32, 32, GemmType::WarpK>; + using InstructionShape = typename GemmType::InstructionShape; + + using LayoutB = cutlass::layout::RowMajor; + using DefaultGemm = + cutlass::gemm::kernel::DefaultGemm; + + using DefaultMmaFromSmem = typename cutlass::gemm::threadblock::DefaultMmaFromSharedMemory< + typename DefaultGemm::Mma, + typename MM0::AccumulatorSharedStorage, + false>; // kScaleOperandA + using Mma = typename DefaultMmaFromSmem::Mma; + using IteratorB = typename Mma::IteratorB; + using WarpCount = typename Mma::WarpCount; + static_assert(WarpCount::kM * WarpCount::kN * WarpCount::kK == kNumWarpsPerBlock, ""); + + using DefaultEpilogue = typename DefaultGemm::Epilogue; + using OutputTileIterator = typename cutlass::epilogue::threadblock::PredicatedTileIterator< + typename DefaultEpilogue::OutputTileIterator::ThreadMap, + output_t>; + using OutputTileIteratorAccum = + typename cutlass::epilogue::threadblock::PredicatedTileIterator< + typename DefaultEpilogue::OutputTileIterator::ThreadMap, + output_accum_t>; + + struct SharedStorageMM1 { + typename Mma::SharedStorage mm; + }; + }; + + static constexpr int64_t kAlignmentQ = MM0::kAlignmentA; + static constexpr int64_t kAlignmentK = MM0::kAlignmentB; + static constexpr int64_t kAlignmentV = 1; + + // Shared storage - depends on kernel params + struct ScalingCoefs { + cutlass::Array m_prime; + cutlass::Array s_prime; + cutlass::Array mi; + }; + + struct SharedStorageEpilogueAtEnd : ScalingCoefs { + struct SharedStorageAfterMM0 { + // Everything here might be overwritten during MM0 + union { + // typename MM0::BiasLoader::SmemTile bias; + cutlass::AlignedBuffer bias; + typename MM0::AccumulatorSharedStorage si; + }; + typename MM1::SharedStorageMM1 mm1; + }; + + union { + typename MM0::Mma::SharedStorage mm0; + SharedStorageAfterMM0 after_mm0; + typename MM1::DefaultEpilogue::SharedStorage epilogue; + }; + + CUTLASS_DEVICE typename MM1::DefaultEpilogue::SharedStorage& epilogue_shared_storage() + { + return epilogue; + } + }; + + struct SharedStorageEpilogueInLoop : ScalingCoefs { + struct SharedStorageAfterMM0 { + // Everything here might be overwritten during MM0 + union { + // typename MM0::BiasLoader::SmemTile bias; + cutlass::AlignedBuffer bias; + typename MM0::AccumulatorSharedStorage si; + }; + typename MM1::SharedStorageMM1 mm1; + typename MM1::DefaultEpilogue::SharedStorage epilogue; + }; + + union { + typename MM0::Mma::SharedStorage mm0; + SharedStorageAfterMM0 after_mm0; + }; + + CUTLASS_DEVICE typename MM1::DefaultEpilogue::SharedStorage& epilogue_shared_storage() + { + return after_mm0.epilogue; + } + }; + + using SharedStorage = + typename cutlass::platform::conditional::type; + + static bool __host__ check_supported(Params const& p) + { + CHECK_ALIGNED_PTR(p.query_ptr, kAlignmentQ); + CHECK_ALIGNED_PTR(p.key_ptr, kAlignmentK); + CHECK_ALIGNED_PTR(p.value_ptr, kAlignmentV); + EVOFORMER_CHECK(p.q_strideM % kAlignmentQ == 0, "query is not correctly aligned (strideM)"); + EVOFORMER_CHECK(p.k_strideM % kAlignmentK == 0, "key is not correctly aligned (strideM)"); + EVOFORMER_CHECK(p.v_strideM % kAlignmentV == 0, "value is not correctly aligned (strideM)"); + EVOFORMER_CHECK(p.num_heads <= 1 || p.q_strideH % kAlignmentQ == 0, + "query is not correctly aligned (strideH)"); + EVOFORMER_CHECK(p.num_heads <= 1 || p.k_strideH % kAlignmentK == 0, + "key is not correctly aligned (strideH)"); + EVOFORMER_CHECK(p.num_heads <= 1 || p.v_strideH % kAlignmentV == 0, + "value is not correctly aligned (strideH)"); + return true; + } + + static void CUTLASS_DEVICE attention_kernel(Params& p) + { + // In this block, we will only ever: + // - read query[query_start:query_end, :] + // - write to output[query_start:query_end, :] + + extern __shared__ char smem_buffer[]; + SharedStorage& shared_storage = *((SharedStorage*)smem_buffer); + auto& m_prime = shared_storage.m_prime; + auto& s_prime = shared_storage.s_prime; + auto& mi = shared_storage.mi; + const uint32_t query_start = blockIdx.x * kQueriesPerBlock; + + static_assert(kQueriesPerBlock < kNumWarpsPerBlock * kWarpSize, ""); + if (thread_id() < kQueriesPerBlock) { + s_prime[thread_id()] = accum_t(0); + m_prime[thread_id()] = -cutlass::platform::numeric_limits::infinity(); + mi[thread_id()] = -cutlass::platform::numeric_limits::infinity(); + } + typename MM1::Mma::FragmentC accum_o; + accum_o.clear(); + + auto createOutputIter = [&](int col) -> typename MM1::OutputTileIterator { + using OutputTileIterator = typename MM1::OutputTileIterator; + return OutputTileIterator( + typename OutputTileIterator::Params{(int32_t)p.o_strideM}, + p.output_ptr, + typename OutputTileIterator::TensorCoord{p.num_queries, p.head_dim_value}, + thread_id(), + {0, col}); + }; + + auto createOutputAccumIter = [&](int col) -> typename MM1::OutputTileIteratorAccum { + using OutputTileIteratorAccum = typename MM1::OutputTileIteratorAccum; + return OutputTileIteratorAccum( + typename OutputTileIteratorAccum::Params{(int32_t)(p.head_dim_value * p.num_heads)}, + p.output_accum_ptr, + typename OutputTileIteratorAccum::TensorCoord{p.num_queries, p.head_dim_value}, + thread_id(), + {0, col}); + }; + + // Iterate through keys + for (int32_t iter_key_start = 0; iter_key_start < p.num_keys; + iter_key_start += kKeysPerBlock) { + int32_t problem_size_0_m = cutlass::fast_min((int32_t)kQueriesPerBlock, p.num_queries); + int32_t problem_size_0_n = + cutlass::fast_min(int32_t(kKeysPerBlock), p.num_keys - iter_key_start); + int32_t const& problem_size_0_k = p.head_dim; + int32_t const& problem_size_1_n = p.head_dim_value; + int32_t const& problem_size_1_k = problem_size_0_n; + + auto prologueV = [&](int blockN) { + typename MM1::Mma::IteratorB iterator_V( + typename MM1::IteratorB::Params{MM1::LayoutB(p.v_strideM)}, + p.value_ptr + iter_key_start * p.v_strideM, + {problem_size_1_k, problem_size_1_n}, + thread_id(), + cutlass::MatrixCoord{0, blockN * MM1::Mma::Shape::kN}); + MM1::Mma::prologue( + shared_storage.after_mm0.mm1.mm, iterator_V, thread_id(), problem_size_1_k); + }; + + __syncthreads(); // Need to have shared memory initialized, and `m_prime` + // updated from end of prev iter + // + // MATMUL: Q.K_t + // + // Computes the block-matrix product of: + // (a) query[query_start:query_end, :] + // with + // (b) key[iter_key_start:iter_key_start + kKeysPerBlock] + // and stores that into `shared_storage.si` + // + + // Compute threadblock location + cutlass::gemm::GemmCoord tb_tile_offset = {0, 0, 0}; + + cutlass::MatrixCoord tb_offset_A{tb_tile_offset.m() * MM0::Mma::Shape::kM, + tb_tile_offset.k()}; + + cutlass::MatrixCoord tb_offset_B{tb_tile_offset.k(), + tb_tile_offset.n() * MM0::Mma::Shape::kN}; + + // Construct iterators to A and B operands + typename MM0::IteratorA iterator_A( + typename MM0::IteratorA::Params(typename MM0::MmaCore::LayoutA(p.q_strideM)), + p.query_ptr, + {problem_size_0_m, problem_size_0_k}, + thread_id(), + tb_offset_A); + + typename MM0::IteratorB iterator_B( + typename MM0::IteratorB::Params(typename MM0::MmaCore::LayoutB(p.k_strideM)), + p.key_ptr + iter_key_start * p.k_strideM, + {problem_size_0_k, problem_size_0_n}, + thread_id(), + tb_offset_B); + + auto my_warp_id = warp_id(); + auto my_lane_id = lane_id(); + + // Construct thread-scoped matrix multiply + typename MM0::Mma mma(shared_storage.mm0, thread_id(), my_warp_id, my_lane_id); + + typename MM0::Mma::FragmentC accum; + + accum.clear(); + + auto gemm_k_iterations = + (problem_size_0_k + MM0::Mma::Shape::kK - 1) / MM0::Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add + mma(gemm_k_iterations, accum, iterator_A, iterator_B, accum); + __syncthreads(); + + if (kPreloadV) { + prologueV(0); + } else { + MM1::Mma::drain_cp_asyncs(); + } + + typename MM0::Mma::Operator::IteratorC::TensorCoord iteratorC_tile_offset = { + (tb_tile_offset.m() * MM0::Mma::WarpCount::kM) + + (my_warp_id % MM0::Mma::WarpCount::kM), + (tb_tile_offset.n() * MM0::Mma::WarpCount::kN) + + (my_warp_id / MM0::Mma::WarpCount::kM)}; + + // multiply by scaling factor + // if (kSupportsBias) { + // accum = + // cutlass::multiplies()(p.scale, + // accum); + // } + + if (kSupportsBias) { + cutlass::TensorRef bias_tensor_ref( + shared_storage.after_mm0.bias.data(), + cutlass::layout::RowMajor(MM0::ThreadblockShape::kN)); + using Shape = + cutlass::MatrixShape; + AttentionBiasEpilogue + bias_epilogue; + bias_epilogue(bias_tensor_ref, + p.bias1_ptr + iter_key_start, + p.bias2_ptr + query_start * p.num_keys + iter_key_start, + thread_id(), + {problem_size_0_m, problem_size_0_n}, + p.num_keys); + // Pij += Bij, Pij is in register fragment and Bij is in shared memory + auto lane_offset = MM0::AccumLambdaIterator::get_lane_offset( + lane_id(), warp_id(), iteratorC_tile_offset); + MM0::AccumLambdaIterator::iterateRows( + lane_offset, + [&](int accum_m) {}, + [&](int accum_m, int accum_n, int idx) { + if (accum_m < problem_size_0_m && accum_n < problem_size_0_n) { + accum[idx] = + accum[idx] * p.scale + bias_tensor_ref.at({accum_m, accum_n}); + } + }, + [&](int accum_m) {}); + } + + DISPATCH_BOOL(iter_key_start == 0, kIsFirst, ([&] { + DISPATCH_BOOL( + p.num_keys - iter_key_start >= kKeysPerBlock, kFullColumns, ([&] { + // Update `mi` from accum stored in registers + // Also does accum[i] <- exp(accum[i] - mi) + iterative_softmax(accum_o, + accum, + mi, + m_prime, + s_prime, + lane_id(), + thread_id(), + warp_id(), + p.num_keys - iter_key_start, + iteratorC_tile_offset, + kSupportsBias ? 1.0f : p.scale); + })); + })); + + // Output results to shared-memory + int warp_idx_mn_0 = + my_warp_id % (MM0::Mma::Base::WarpCount::kM * MM0::Mma::Base::WarpCount::kN); + auto output_tile_coords = + cutlass::MatrixCoord{warp_idx_mn_0 % MM0::Mma::Base::WarpCount::kM, + warp_idx_mn_0 / MM0::Mma::Base::WarpCount::kM}; + + MM0::B2bGemm::accumToSmem( + shared_storage.after_mm0.si, accum, my_lane_id, output_tile_coords); + + __syncthreads(); + + // + // MATMUL: Attn . V + // Run the matmul `attn @ V` for a block of attn and V. + // `attn` is read from shared memory (in `shared_storage_si`) + // `V` is read from global memory (with iterator_B) + // + + const int64_t nBlockN = + kSingleValueIteration + ? 1 + : ceil_div((int64_t)problem_size_1_n, int64_t(MM1::ThreadblockShape::kN)); + for (int blockN = 0; blockN < nBlockN; ++blockN) { + int gemm_k_iterations = + (problem_size_1_k + MM1::Mma::Shape::kK - 1) / MM1::Mma::Shape::kK; + + // Compute threadblock-scoped matrix multiply-add and store it in accum + // (in registers) + if (!kPreloadV) { + __syncthreads(); // we share shmem between mma and epilogue + } + + typename MM1::Mma::IteratorB iterator_V( + typename MM1::IteratorB::Params{MM1::LayoutB(p.v_strideM)}, + p.value_ptr + iter_key_start * p.v_strideM, + {problem_size_1_k, problem_size_1_n}, + thread_id(), + cutlass::MatrixCoord{0, blockN * MM1::Mma::Shape::kN}); + typename MM1::Mma mma_pv(shared_storage.after_mm0.mm1.mm, + shared_storage.after_mm0.si, + (int)thread_id(), + (int)warp_id(), + (int)lane_id(), + (int)problem_size_1_k); + mma_pv.set_prologue_done(kPreloadV); + if (!kKeepOutputInRF) { accum_o.clear(); } + mma_pv(gemm_k_iterations, accum_o, iterator_V, accum_o); + __syncthreads(); + + if (kPreloadV && !kSingleValueIteration && blockN + 1 < nBlockN) { + prologueV(blockN + 1); + } + + if (!kKeepOutputInRF) { + MM1::Mma::drain_cp_asyncs(); + DISPATCH_BOOL( + iter_key_start == 0, kIsFirst, ([&] { + DISPATCH_BOOL( + (iter_key_start + kKeysPerBlock) >= p.num_keys, kIsLast, ([&] { + using DefaultEpilogue = typename MM1::DefaultEpilogue; + using DefaultOp = typename MM1::DefaultConfig::EpilogueOutputOp; + using ElementCompute = typename DefaultOp::ElementCompute; + using EpilogueOutputOp = typename cutlass::epilogue::thread:: + MemoryEfficientAttentionNormalize< + typename cutlass::platform:: + conditional:: + type, + output_accum_t, + DefaultOp::kCount, + typename DefaultOp::ElementAccumulator, + ElementCompute, + kIsFirst, + kIsLast, + cutlass::Array>; + using Epilogue = + typename cutlass::epilogue::threadblock::EpiloguePipelined< + typename DefaultEpilogue::Shape, + typename MM1::Mma::Operator, + DefaultEpilogue::kPartitionsK, + typename cutlass::platform::conditional< + kIsLast, + typename MM1::OutputTileIterator, + typename MM1::OutputTileIteratorAccum>::type, + typename DefaultEpilogue::AccumulatorFragmentIterator, + typename DefaultEpilogue::WarpTileIterator, + typename DefaultEpilogue::SharedLoadIterator, + EpilogueOutputOp, + typename DefaultEpilogue::Padding, + DefaultEpilogue::kFragmentsPerIteration, + true, // IterationsUnroll + typename MM1::OutputTileIteratorAccum // Read + // iterator + >; + + int col = blockN * MM1::Mma::Shape::kN; + auto source_iter = createOutputAccumIter(col); + auto dest_iter = + call_conditional:: + apply(createOutputIter, createOutputAccumIter, col); + EpilogueOutputOp rescale(s_prime, m_prime); + Epilogue epilogue(shared_storage.epilogue_shared_storage(), + thread_id(), + warp_id(), + lane_id()); + epilogue(rescale, dest_iter, accum_o, source_iter); + })); + })); + if (!kSingleValueIteration) { __syncthreads(); } + } + } + __syncthreads(); // we modify `m_prime` after + } + + if (kKeepOutputInRF) { + constexpr bool kIsFirst = true; + constexpr bool kIsLast = true; + using DefaultEpilogue = typename MM1::DefaultEpilogue; + using DefaultOp = typename MM1::DefaultConfig::EpilogueOutputOp; + using ElementCompute = typename DefaultOp::ElementCompute; + using EpilogueOutputOp = + typename cutlass::epilogue::thread::MemoryEfficientAttentionNormalize< + output_t, // output + output_accum_t, // source + DefaultOp::kCount, + typename DefaultOp::ElementAccumulator, // accum + output_accum_t, // compute + kIsFirst, + kIsLast, + cutlass::Array>; + using Epilogue = typename cutlass::epilogue::threadblock::EpiloguePipelined< + typename DefaultEpilogue::Shape, + typename MM1::Mma::Operator, + DefaultEpilogue::kPartitionsK, + typename MM1::OutputTileIterator, // destination + typename DefaultEpilogue::AccumulatorFragmentIterator, + typename DefaultEpilogue::WarpTileIterator, + typename DefaultEpilogue::SharedLoadIterator, + EpilogueOutputOp, + typename DefaultEpilogue::Padding, + DefaultEpilogue::kFragmentsPerIteration, + true, // IterationsUnroll + typename MM1::OutputTileIteratorAccum // source tile + >; + auto dest_iter = createOutputIter(0); + EpilogueOutputOp rescale(s_prime, m_prime); + Epilogue epilogue( + shared_storage.epilogue_shared_storage(), thread_id(), warp_id(), lane_id()); + MM1::Mma::drain_cp_asyncs(); + epilogue(rescale, dest_iter, accum_o); + } + + // 7. Calculate logsumexp + // To make the backward easier, we pad logsumexp with `inf` + // this avoids a few bound checks, and is not more expensive during fwd + static_assert(kQueriesPerBlock < kNumWarpsPerBlock * kWarpSize, ""); + if (p.logsumexp_ptr && thread_id() < kQueriesPerBlock) { + auto lse_dim = ceil_div((int32_t)p.num_queries, kAlignLSE) * kAlignLSE; + if (thread_id() < p.num_queries) { + p.logsumexp_ptr[thread_id()] = + accum_t(mi[thread_id()]) + cutlass::fast_log(accum_t(s_prime[thread_id()])); + } else if (thread_id() < lse_dim) { + p.logsumexp_ptr[thread_id()] = + cutlass::platform::numeric_limits::infinity(); + } + } + } + + template + CUTLASS_DEVICE static void iterative_softmax( + typename WarpIteratorC::Fragment& frag_o, // output so far + typename WarpIteratorC::Fragment& frag, + cutlass::Array& mi, + cutlass::Array& m_prime, + cutlass::Array& s_prime, + int8_t lane_id, + int8_t thread_id, + int8_t warp_id, + int16_t max_col, + typename WarpIteratorC::TensorCoord const& tile_offset, + float scaling) + { + /* Iterates on the accumulator and corresponding position on result matrix + + (1) Update `mi[r]` to the max value of the row `r` + (2) In a second iteration do the following: + (a) accum <- exp(accum - mi) + (b) m_prime <- exp(m_prime - mi) + (c) s_prime <- s_prime * m_prime + sum(accum) + + All of this is done on registers, before we store all of this + on shared memory for the next matmul with Value. + */ + using Fragment = typename WarpIteratorC::Fragment; + using LambdaIterator = + typename DefaultMmaAccumLambdaIterator::Iterator; + // Convert to `accum_t` (rather than double) + constexpr float kLog2e = 1.4426950408889634074; // log_2(e) = M_LOG2E + if (!kIsFirst) { + if (thread_id < kQueriesPerBlock) { m_prime[thread_id] = mi[thread_id]; } + __syncthreads(); + } + + auto lane_offset = LambdaIterator::get_lane_offset(lane_id, warp_id, tile_offset); + + // First update `mi` to the max per-row + { + accum_t max; + LambdaIterator::iterateRows( + lane_offset, + [&](int accum_m) { max = -cutlass::platform::numeric_limits::infinity(); }, + [&](int accum_m, int accum_n, int idx) { + if (kFullColumns || accum_n < max_col) { + max = cutlass::fast_max(max, frag[idx]); + } + }, + [&](int accum_m) { + // Having 4x atomicMax seems faster than reduce within warp + // first... + atomicMaxFloat(&mi[accum_m], max * scaling); + }); + } + frag = cutlass::multiplies()(scaling * kLog2e, frag); + + // Make sure we all share the update values for `mi` + __syncthreads(); + + if (thread_id < kQueriesPerBlock) { + auto m_prime_exp = exp2f(kLog2e * (m_prime[thread_id] - mi[thread_id])); + m_prime[thread_id] = m_prime_exp; + s_prime[thread_id] *= m_prime_exp; + } + __syncthreads(); // Update output fragments + if (kKeepOutputInRF && !kIsFirst) { + accum_t mp; + LambdaIterator::iterateRows( + lane_offset, + [&](int accum_m) { mp = m_prime[accum_m]; }, + [&](int accum_m, int accum_n, int idx) { frag_o[idx] *= mp; }, + [&](int accum_m) {}); + __syncthreads(); + } + // Update accum_m, accum_n, ... + { + accum_t mi_row, total_row; + LambdaIterator::iterateRows( + lane_offset, + [&](int accum_m) { mi_row = kLog2e * mi[accum_m]; }, + [&](int accum_m, int accum_n, int idx) { + frag[idx] = (kFullColumns || accum_n < max_col) ? exp2f(frag[idx] - mi_row) + : accum_t(0.0); + }, + [&](int accum_m) {}); + LambdaIterator::iterateRows( + lane_offset, + [&](int accum_m) { total_row = 0.0; }, + [&](int accum_m, int accum_n, int idx) { total_row += frag[idx]; }, + [&](int accum_m) { + if (LambdaIterator::reduceSameRow( + lane_id, total_row, [](accum_t a, accum_t b) { return a + b; })) { + atomicAdd(&s_prime[accum_m], total_row); + } + }); + } + } + + static CUTLASS_DEVICE int8_t lane_id() { return threadIdx.x; } + static CUTLASS_DEVICE int8_t warp_id() { return threadIdx.y; } + static CUTLASS_DEVICE int16_t thread_id() { return threadIdx.x + threadIdx.y * blockDim.x; } +}; + +template +__global__ void __launch_bounds__(AK::kNumThreads, AK::kMinBlocksPerSm) + attention_kernel_batched_impl(typename AK::Params p) +{ + if (!p.advance_to_block()) { return; } + AK::attention_kernel(p); +} + +template +__global__ void __launch_bounds__(AK::kNumThreads, AK::kMinBlocksPerSm) + attention_kernel_batched(typename AK::Params params); diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/fp_quantizer/includes/context.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/fp_quantizer/includes/context.h new file mode 100644 index 0000000000000000000000000000000000000000..5bd9badbcb4fa06091a8c01b0b0d386153a24589 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/fp_quantizer/includes/context.h @@ -0,0 +1,66 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include +#include +#include +#include +#include "cublas_v2.h" +#include "cuda.h" +#include "curand.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#define WARP_SIZE 32 + +class FPContext { +public: + FPContext() : _seed(42) + { + curandCreateGenerator(&_gen, CURAND_RNG_PSEUDO_DEFAULT); + curandSetPseudoRandomGeneratorSeed(_gen, 123); + } + + virtual ~FPContext() {} + + static FPContext& Instance() + { + static FPContext _ctx; + return _ctx; + } + + curandGenerator_t& GetRandGenerator() { return _gen; } + + cudaStream_t GetCurrentStream() + { + // get current pytorch stream. + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + return stream; + } + + std::pair IncrementOffset(uint64_t offset_inc) + { + uint64_t offset = _curr_offset; + _curr_offset += offset_inc; + return std::pair(_seed, offset); + } + + void SetSeed(uint64_t new_seed) { _seed = new_seed; } + +private: + curandGenerator_t _gen; + cublasHandle_t _cublasHandle; + uint64_t _seed; + uint64_t _curr_offset; +}; diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/fp_quantizer/includes/quantize.h b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/fp_quantizer/includes/quantize.h new file mode 100644 index 0000000000000000000000000000000000000000..507252d6e72291f705b19d9c54c9c0050b86e14c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/fp_quantizer/includes/quantize.h @@ -0,0 +1,126 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#pragma once + +#include +#include + +#include + +#include +#include +#include + +#define QUANT_SWITCH(Q_BITS, ...) \ + [&] { \ + if (12 == Q_BITS) { \ + constexpr int CONST_STOCHASTIC_ROUNDING = 0; \ + constexpr int CONST_Q_BITS = 8; \ + constexpr int CONST_Q_MANTISA_BITS = 3; \ + __VA_ARGS__(); \ + } else if (13 == Q_BITS) { \ + constexpr int CONST_STOCHASTIC_ROUNDING = 1; \ + constexpr int CONST_Q_BITS = 8; \ + constexpr int CONST_Q_MANTISA_BITS = 3; \ + __VA_ARGS__(); \ + } else if (10 == Q_BITS) { \ + constexpr int CONST_STOCHASTIC_ROUNDING = 0; \ + constexpr int CONST_Q_BITS = 8; \ + constexpr int CONST_Q_MANTISA_BITS = 2; \ + __VA_ARGS__(); \ + } else if (11 == Q_BITS) { \ + constexpr int CONST_STOCHASTIC_ROUNDING = 1; \ + constexpr int CONST_Q_BITS = 8; \ + constexpr int CONST_Q_MANTISA_BITS = 2; \ + __VA_ARGS__(); \ + } else if (28 == Q_BITS) { \ + constexpr int CONST_STOCHASTIC_ROUNDING = 0; \ + constexpr int CONST_Q_BITS = 12; \ + constexpr int CONST_Q_MANTISA_BITS = 7; \ + __VA_ARGS__(); \ + } else if (29 == Q_BITS) { \ + constexpr int CONST_STOCHASTIC_ROUNDING = 1; \ + constexpr int CONST_Q_BITS = 12; \ + constexpr int CONST_Q_MANTISA_BITS = 7; \ + __VA_ARGS__(); \ + } else if (6 == Q_BITS) { \ + constexpr int CONST_STOCHASTIC_ROUNDING = 0; \ + constexpr int CONST_Q_BITS = 6; \ + constexpr int CONST_Q_MANTISA_BITS = 2; \ + __VA_ARGS__(); \ + } else if (7 == Q_BITS) { \ + constexpr int CONST_STOCHASTIC_ROUNDING = 1; \ + constexpr int CONST_Q_BITS = 6; \ + constexpr int CONST_Q_MANTISA_BITS = 2; \ + __VA_ARGS__(); \ + } else if (2 == Q_BITS) { \ + constexpr int CONST_STOCHASTIC_ROUNDING = 0; \ + constexpr int CONST_Q_BITS = 4; \ + constexpr int CONST_Q_MANTISA_BITS = 1; \ + __VA_ARGS__(); \ + } else { \ + constexpr int CONST_STOCHASTIC_ROUNDING = 1; \ + constexpr int CONST_Q_BITS = 4; \ + constexpr int CONST_Q_MANTISA_BITS = 1; \ + __VA_ARGS__(); \ + } \ + }() + +#define DEQUANT_SWITCH(Q_MANTISA_EXPONENT_BITS, ...) \ + [&] { \ + if (12 == Q_MANTISA_EXPONENT_BITS) { \ + constexpr int CONST_Q_MANTISA_BITS = 3; \ + constexpr int CONST_Q_EXPONENT_BITS = 4; \ + __VA_ARGS__(); \ + } else if (10 == Q_MANTISA_EXPONENT_BITS) { \ + constexpr int CONST_Q_MANTISA_BITS = 2; \ + constexpr int CONST_Q_EXPONENT_BITS = 5; \ + __VA_ARGS__(); \ + } else if (28 == Q_MANTISA_EXPONENT_BITS) { \ + constexpr int CONST_Q_MANTISA_BITS = 7; \ + constexpr int CONST_Q_EXPONENT_BITS = 4; \ + __VA_ARGS__(); \ + } else if (6 == Q_MANTISA_EXPONENT_BITS) { \ + constexpr int CONST_Q_MANTISA_BITS = 2; \ + constexpr int CONST_Q_EXPONENT_BITS = 3; \ + __VA_ARGS__(); \ + } else { \ + constexpr int CONST_Q_MANTISA_BITS = 1; \ + constexpr int CONST_Q_EXPONENT_BITS = 2; \ + __VA_ARGS__(); \ + } \ + }() + +template +void launch_quantization(T* val, + uint8_t* q_val, + int num_groups, + int group_size, + cudaStream_t stream, + float q_range, + int q_bits, + int q_mantisa_bits, + int stochastic_rounding); + +template +void launch_dequantization(uint8_t* val, + T* q_val, + int num_groups, + int group_size, + int q_mantisa_bits, + int q_exponent_bits, + cudaStream_t stream); + +template +void launch_selective_dequantization(uint8_t* val, + T* q_val, + int32_t* indexes, + int num_groups, + int group_size, + int num_indexes, + int q_mantisa_bits, + int q_exponent_bits, + cudaStream_t stream); diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/fp_quantizer/quantize.cpp b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/fp_quantizer/quantize.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ec631c576e27352c14e1b263fc21b5beb5743dcc --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/fp_quantizer/quantize.cpp @@ -0,0 +1,116 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include "quantize.h" + +#include +#include +#include + +#define DISPATCH_QUANTIZE(T_TYPE, C_TYPE, mantisa, exponent) \ + if (val.options().dtype() == torch::T_TYPE) { \ + launch_quantization((C_TYPE*)val.data_ptr(), \ + (uint8_t*)out.data_ptr(), \ + num_groups, \ + group_size, \ + at::cuda::getCurrentCUDAStream(), \ + q_range, \ + q_bits, \ + q_mantisa_bits, \ + stochastic_rounding); \ + } + +at::Tensor quantize(torch::Tensor& val, + int group_size, + int stochastic_rounding, + int q_bits, + int q_mantisa_bits) +{ + int total_elems = at::numel(val); + auto options = at::TensorOptions() + .dtype(torch::kInt8) + .layout(val.layout()) + .device(val.device()) + .requires_grad(false); + float q_range = q_bits == 8 ? (q_mantisa_bits == 3 ? 480.0 : 114688.0) : // fp8 ranges + (q_bits == 12 ? 510.0 : // fp12 range + (q_bits == 6 ? 28.0 : // fp6 range + 6.0)); // fp4 range (using power 2); TODO (Reza): add the power-4 + // in case accuracy is not matching! + int num_groups = total_elems / group_size; + auto out = torch::empty({num_groups, group_size * q_bits / 8 + 4}, options); + + DISPATCH_QUANTIZE(kHalf, __half, 23, 8); +#ifdef BF16_AVAILABLE + DISPATCH_QUANTIZE(kBFloat16, __nv_bfloat16, 23, 8); +#endif + + return out; +} + +#define DISPATCH_DEQUANTIZE(T_TYPE, C_TYPE, mantisa) \ + if (val.options().dtype() == torch::T_TYPE) { \ + launch_dequantization((uint8_t*)val_q.data_ptr(), \ + (C_TYPE*)val.data_ptr(), \ + num_groups, \ + group_size, \ + q_mantisa_bits, \ + q_exponent_bits, \ + at::cuda::getCurrentCUDAStream()); \ + return; \ + } + +void dequantize(torch::Tensor& val, + torch::Tensor& val_q, + int group_size, + int q_mantisa_bits, + int q_exponent_bits) +{ + int total_elems = at::numel(val); + + int num_groups = total_elems / group_size; + + DISPATCH_DEQUANTIZE(kHalf, __half, 10); +#ifdef BF16_AVAILABLE + DISPATCH_DEQUANTIZE(kBFloat16, __nv_bfloat16, 7); +#endif +} + +#define DISPATCH_DEQUANTIZE_INDEX(T_TYPE, C_TYPE, mantisa) \ + if (val.options().dtype() == torch::T_TYPE) { \ + launch_selective_dequantization((uint8_t*)val_q.data_ptr(), \ + (C_TYPE*)val.data_ptr(), \ + (int32_t*)indexes.data_ptr(), \ + num_groups, \ + group_size, \ + num_indexes, \ + q_mantisa_bits, \ + q_exponent_bits, \ + at::cuda::getCurrentCUDAStream()); \ + return; \ + } +void selective_dequantize(torch::Tensor& val, + torch::Tensor& val_q, + torch::Tensor& indexes, + int group_size, + int q_mantisa_bits, + int q_exponent_bits) +{ + int total_elems = at::numel(val); + int num_indexes = indexes.size(0); + int num_groups = total_elems / group_size; + + DISPATCH_DEQUANTIZE_INDEX(kHalf, __half, 10); +#ifdef BF16_AVAILABLE + DISPATCH_DEQUANTIZE_INDEX(kBFloat16, __nv_bfloat16, 7); +#endif +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) +{ + m.def("quantize", &quantize, "quantize function"); + m.def("dequantize", &dequantize, "dequantize function"); + m.def("selective_dequantize", &selective_dequantize, "selective dequantize function"); +} diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/fp_quantizer/quantize.cu b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/fp_quantizer/quantize.cu new file mode 100644 index 0000000000000000000000000000000000000000..5ada6894747f65d698ed0aa95fb3da41bff24aa8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/csrc/fp_quantizer/quantize.cu @@ -0,0 +1,530 @@ +// Copyright (c) Microsoft Corporation. +// SPDX-License-Identifier: Apache-2.0 + +// DeepSpeed Team + +#include +#include "context.h" +#include "memory_access_utils.h" +#include "quantize.h" +#include "reduction_utils.h" + +#include +#include + +#include +#include + +#include +#include + +using ROp = reduce::ROpType; + +namespace quantization { + +constexpr int access_granularity = 16; +constexpr int quanitzed_access_granularity = 4; +constexpr int quanitzed_access_granularity_6bits = 2; +constexpr int threads = 256; +constexpr int warps = threads / 32; + +} // namespace quantization + +template +__device__ void round(uint32_t& mantisa, uint32_t& dst_exponent, curandStatePhilox4_32_10_t* state) +{ + constexpr uint32_t mantisa_mask = (1 << (_mantisa_bits - q_mantisa_bits)) - 1; + uint32_t offset = stochastic_rounding ? (curand_poisson(state, 10) & mantisa_mask) + : 1 << (_mantisa_bits - q_mantisa_bits - 1); + mantisa += offset; + dst_exponent += (((mantisa & ~mantisa_mask) == (1 << _mantisa_bits)) ? 1 : 0); +} + +template +__device__ void clip(uint32_t& exponent, uint32_t& mantisa) +{ + constexpr uint32_t max_exponent = (1 << (q_exponent_bits - 1)) + (1 << (_exponent_bits - 1)); + constexpr uint32_t min_exponent = + (1 << (_exponent_bits - 1)) - ((1 << (q_exponent_bits - 1)) - 1); + if (exponent > max_exponent) { + exponent = max_exponent; + mantisa = (((uint32_t)-1) >> (32 - q_mantisa_bits)) << 1; //.11 .. 10 + } + if (exponent < min_exponent) { + exponent = min_exponent; + mantisa = 0; + } +} + +template +__global__ void apply_quantization(T* val, + uint8_t* q_val, + int group_size, + std::pair seed, + float q_range) +{ + int tidx = threadIdx.x; + int wid = tidx >> 5; + int lane = tidx & 0x1f; + int gid = blockIdx.x * quantization::warps + wid; + + constexpr int q_exponent_bits = total_q_bits - q_mantisa_bits - 1; + constexpr uint32_t _mantisa_mask = (1 << _mantisa_bits) - 1; + constexpr uint32_t _exponent_mask = ((1 << _exponent_bits) - 1) << _mantisa_bits; + constexpr uint32_t _sign_mask = 1 << (_mantisa_bits + _exponent_bits); + // CG helpers + cg::thread_block tb = cg::this_thread_block(); + cg::thread_block_tile warp = cg::tiled_partition(tb); + + constexpr uint32_t vector_size = quantization::access_granularity / sizeof(T); + constexpr uint32_t load_stride = vector_size * hw_warp_size; + constexpr uint32_t store_stride = (total_q_bits * vector_size / 8) * hw_warp_size; + const uint32_t thread_offset = lane * vector_size; + const uint32_t store_thread_offset = lane * (total_q_bits * vector_size / 8); + const uint32_t base_load_offset = gid * group_size + thread_offset; + const uint32_t base_store_offset = + gid * ((group_size * total_q_bits / 8) + 4) + + store_thread_offset; // 4-byte for saving the scale per group + const T* load_base_ptr = val + base_load_offset; + T tmp_buf[unroll * vector_size]; + T cur_max; + reduce::init(&cur_max); + + int idx = blockIdx.x * blockDim.x + threadIdx.x; + curandStatePhilox4_32_10_t state; + curand_init(seed.first, idx, seed.second, &state); + +#pragma unroll + for (int i = 0; i < unroll; i++) { + if (i * load_stride + thread_offset < group_size) { + mem_access::load_global( + &tmp_buf[vector_size * i], load_base_ptr + i * load_stride); + for (int j = 0; j < vector_size; j++) + cur_max = reduce::element(cur_max, __habs(tmp_buf[i * vector_size + j])); + } + } + reduce::_block(tb, warp, &cur_max); + + int mantisa_mask = ((1 << q_mantisa_bits) - 1); + mantisa_mask <<= (_mantisa_bits - q_mantisa_bits); + + uint8_t* store_base_ptr = q_val + base_store_offset; + float scale = (float)q_range / conversion::to(cur_max); +#pragma unroll + for (int i = 0; i < unroll; i++) { + if (i * load_stride + thread_offset < group_size) { + uint64_t q_buf = 0; + uint64_t q_buf1 = 0; +#pragma unroll + for (int j = 0; j < vector_size; j++) { + float val_f = conversion::to(tmp_buf[i * vector_size + j]) * scale; + uint32_t* data = reinterpret_cast(&val_f); + uint32_t sign = (data[0] & _sign_mask) >> (_mantisa_bits + _exponent_bits); + uint32_t cur_exponent = (data[0] & _exponent_mask) >> _mantisa_bits; + uint32_t dst_mantisa = (data[0] & _mantisa_mask); + + uint32_t dst_exponent = cur_exponent; + + round<_mantisa_bits, q_mantisa_bits, stochastic_rounding>( + dst_mantisa, dst_exponent, &state); + if (cur_exponent != 0) + clip<_mantisa_bits, _exponent_bits, q_mantisa_bits, q_exponent_bits>( + dst_exponent, dst_mantisa); + + dst_mantisa = (dst_mantisa & mantisa_mask) >> (_mantisa_bits - q_mantisa_bits); + + if (dst_exponent != (1 << q_exponent_bits) - 1) + dst_exponent = (dst_exponent - ((1 << (_exponent_bits - 1)) - 1)) + + (1 << (q_exponent_bits - 1)) - 1; + if (total_q_bits == 8 || total_q_bits == 4 || total_q_bits == 6) + q_buf = q_buf | + ((uint64_t)((uint8_t)(sign << (q_exponent_bits + q_mantisa_bits) | + (dst_exponent << q_mantisa_bits) | dst_mantisa)) + << j * total_q_bits); + else if (total_q_bits == 12) { + if (j < 5) + q_buf = + q_buf | + ((uint64_t)((uint16_t)(sign << (q_exponent_bits + q_mantisa_bits) | + (dst_exponent << q_mantisa_bits) | dst_mantisa)) + << j * total_q_bits); + else + q_buf1 = + q_buf1 | + ((uint64_t)((uint16_t)(sign << (q_exponent_bits + q_mantisa_bits) | + (dst_exponent << q_mantisa_bits) | dst_mantisa)) + << (j - 5) * total_q_bits); + } + } + if (total_q_bits == 12) { + uint64_t last_nibble_mask = 0xf; + last_nibble_mask = q_buf1 & last_nibble_mask; + q_buf = (last_nibble_mask << 60) | q_buf; + q_buf1 >>= 4; + } + uint8_t* int8_data = reinterpret_cast(&q_buf); + uint8_t* int8_data1 = reinterpret_cast(&q_buf1); + if (total_q_bits == 6) { + mem_access::store_global( + store_base_ptr + i * store_stride, int8_data); + mem_access::store_global( + store_base_ptr + i * store_stride + + quantization::quanitzed_access_granularity_6bits, + int8_data + quantization::quanitzed_access_granularity_6bits); + mem_access::store_global( + store_base_ptr + i * store_stride + + quantization::quanitzed_access_granularity_6bits * 2, + int8_data + 2 * quantization::quanitzed_access_granularity_6bits); + } else { + mem_access::store_global( + store_base_ptr + i * store_stride, int8_data); + + if (total_q_bits > 4) { + mem_access::store_global( + store_base_ptr + i * store_stride + + quantization::quanitzed_access_granularity, + int8_data + quantization::quanitzed_access_granularity); + if (total_q_bits == 12) { + mem_access::store_global( + store_base_ptr + i * store_stride + + quantization::quanitzed_access_granularity * 2, + int8_data1); + } + } + } + } + } + if (lane == 0) { + float q_scale = conversion::to(cur_max) / (float)q_range; + uint8_t* scale_as_int8 = reinterpret_cast(&q_scale); + uint32_t scale_offset = + gid * ((group_size * total_q_bits / 8) + 4) + (group_size * total_q_bits / 8); + if (total_q_bits != 6) + mem_access::store_global( + q_val + scale_offset, scale_as_int8); + else { + mem_access::store_global( + q_val + scale_offset, scale_as_int8); + mem_access::store_global( + q_val + scale_offset + quantization::quanitzed_access_granularity_6bits, + scale_as_int8 + quantization::quanitzed_access_granularity_6bits); + } + } +} + +template +__global__ void apply_dequantization(uint8_t* val, T* q_val, int group_size, int total_num_elements) +{ + constexpr uint32_t vector_size = quantization::access_granularity / sizeof(T); + int tidx = (blockIdx.x * blockDim.x + threadIdx.x) * vector_size; + + constexpr int quantized_bits = _mantisa_bits + _exponent_bits + 1; + constexpr int q_exponent_bits = total_q_bits - q_mantisa_bits - 1; + constexpr uint16_t _mantisa_mask = (1 << _mantisa_bits) - 1; + constexpr uint16_t _exponent_mask = ((1 << _exponent_bits) - 1) << _mantisa_bits; + constexpr uint16_t _sign_mask = 1 << (_mantisa_bits + _exponent_bits); + const uint32_t g_index = (tidx / group_size); + const uint32_t group_size_bytes = (group_size * quantized_bits / 8); + const uint8_t* load_base_ptr = + val + g_index * (group_size_bytes + 4) + (tidx % group_size) * quantized_bits / 8; + + int mantisa_mask = ((1 << q_mantisa_bits) - 1); + mantisa_mask <<= (_mantisa_bits - q_mantisa_bits); + + T* store_base_ptr = q_val + tidx; + float scale; + + uint8_t* scale_as_int8 = reinterpret_cast(&scale); + if (quantized_bits == 6) { + mem_access::load_global( + scale_as_int8, val + g_index * (group_size_bytes + 4) + group_size_bytes); + mem_access::load_global( + scale_as_int8 + quantization::quanitzed_access_granularity_6bits, + val + g_index * (group_size_bytes + 4) + group_size_bytes + + quantization::quanitzed_access_granularity_6bits); + } else + mem_access::load_global( + scale_as_int8, val + g_index * (group_size_bytes + 4) + group_size_bytes); + + if (tidx < total_num_elements) { + uint64_t q_buf_in; + uint64_t q_buf_in1; + uint8_t* int8_data = reinterpret_cast(&q_buf_in); + uint8_t* int8_data1 = reinterpret_cast(&q_buf_in1); + if (quantized_bits == 6) { + mem_access::load_global( + int8_data, load_base_ptr); + mem_access::load_global( + int8_data + quantization::quanitzed_access_granularity_6bits, + load_base_ptr + quantization::quanitzed_access_granularity_6bits); + mem_access::load_global( + int8_data + quantization::quanitzed_access_granularity_6bits * 2, + load_base_ptr + quantization::quanitzed_access_granularity_6bits * 2); + + } else { + mem_access::load_global(int8_data, + load_base_ptr); + if (quantized_bits > 4) { + mem_access::load_global( + int8_data + quantization::quanitzed_access_granularity, + load_base_ptr + quantization::quanitzed_access_granularity); + if (quantized_bits == 12) { + mem_access::load_global( + int8_data1, load_base_ptr + quantization::quanitzed_access_granularity * 2); + } + } + } + T store_buf[vector_size]; + uint16_t* q_buf = reinterpret_cast(store_buf); +#pragma unroll + for (int j = 0; j < vector_size; j++) { + uint16_t new_data; + if (j < 5 || quantized_bits != 12) { + new_data = (uint16_t)(q_buf_in >> (j * quantized_bits)); + } else { + if (j == 5) { + new_data = (uint16_t)(q_buf_in1); + new_data = (uint16_t)((new_data << 4) | (q_buf_in >> 60)); + } else + new_data = (uint16_t)(q_buf_in1 >> ((j - 6) * quantized_bits + 8)); + } + + uint16_t sign = (new_data & _sign_mask) >> (_mantisa_bits + _exponent_bits); + uint16_t dst_exponent = (new_data & _exponent_mask) >> _mantisa_bits; + uint16_t dst_mantisa = (new_data & _mantisa_mask); + + if (dst_exponent != (1 << q_exponent_bits) - 1) + dst_exponent = (dst_exponent - ((1 << (_exponent_bits - 1)) - 1)) + + (1 << (q_exponent_bits - 1)) - 1; + + q_buf[j] = + ((sign << (q_exponent_bits + q_mantisa_bits)) | (dst_exponent << q_mantisa_bits) | + (dst_mantisa << (q_mantisa_bits - _mantisa_bits))); + float up_cast = conversion::to(store_buf[j]); + store_buf[j] = conversion::to(up_cast * scale); + } + mem_access::store_global(store_base_ptr, store_buf); + } +} + +#define LAUNCH_FOR_QUANTIZATION_UNROLL(COUNT) \ + case COUNT: \ + apply_quantization \ + <<>>(val, q_val, group_size, seed, q_range); \ + break; + +template +void launch_quantization(T* val, + uint8_t* q_val, + int num_groups, + int group_size, + cudaStream_t stream, + float q_range, + int q_bits, + int q_mantisa_bits, + int stochastic_rounding) +{ + const dim3 grid((num_groups + quantization::warps - 1) / quantization::warps); + const dim3 block(quantization::threads); + + std::pair seed = FPContext::Instance().IncrementOffset(16); + + constexpr int vals_per_unroll = hw_warp_size * quantization::access_granularity / sizeof(T); + + const int copy_unroll = (group_size + vals_per_unroll - 1) / vals_per_unroll; + QUANT_SWITCH((q_bits - q_mantisa_bits - 1) * q_mantisa_bits + stochastic_rounding, [&] { + switch (copy_unroll) { + LAUNCH_FOR_QUANTIZATION_UNROLL(1) + LAUNCH_FOR_QUANTIZATION_UNROLL(2) + LAUNCH_FOR_QUANTIZATION_UNROLL(3) + LAUNCH_FOR_QUANTIZATION_UNROLL(4) + LAUNCH_FOR_QUANTIZATION_UNROLL(5) + LAUNCH_FOR_QUANTIZATION_UNROLL(6) + } + }); +} +#define INSTANTIATE_LAUNCH_QUANTIZATION(T, mantisa, exponent) \ + template void launch_quantization( \ + T*, uint8_t*, int, int, cudaStream_t, float q_range, int, int, int); +// fp8(E4M3), nearest-rounding +#ifdef BF16_AVAILABLE +INSTANTIATE_LAUNCH_QUANTIZATION(__nv_bfloat16, 23, 8); +#endif +INSTANTIATE_LAUNCH_QUANTIZATION(__half, 23, 8); + +template +void launch_dequantization(uint8_t* val, + T* q_val, + int num_groups, + int group_size, + int q_mantisa_bits, + int q_exponent_bits, + cudaStream_t stream) +{ + int blocks = ((num_groups * group_size) - 1) / + (quantization::threads * (quantization::access_granularity / sizeof(T))) + + 1; + const dim3 grid(blocks); + const dim3 block(quantization::threads); + DEQUANT_SWITCH(q_mantisa_bits * q_exponent_bits, [&] { + apply_dequantization + <<>>(val, q_val, group_size, (num_groups * group_size)); + }); +} +#define INSTANTIATE_LAUNCH_DEQUANTIZATION(T, mantisa) \ + template void launch_dequantization(uint8_t*, T*, int, int, int, int, cudaStream_t); +// fp8(E4M3) +#ifdef BF16_AVAILABLE +INSTANTIATE_LAUNCH_DEQUANTIZATION(__nv_bfloat16, 7); +#endif +INSTANTIATE_LAUNCH_DEQUANTIZATION(__half, 10); + +template +__global__ void apply_selective_dequantization(uint8_t* val, + T* q_val, + int32_t* indexes, + int group_size, + int total_num_elements) +{ + int index = indexes[blockIdx.x]; + constexpr uint32_t vector_size = quantization::access_granularity / sizeof(T); + int tidx = (blockIdx.y * blockDim.x + threadIdx.x) * vector_size; + int input_index = index * total_num_elements + tidx; + constexpr int quantized_bits = _mantisa_bits + _exponent_bits + 1; + constexpr int q_exponent_bits = total_q_bits - q_mantisa_bits - 1; + constexpr uint16_t _mantisa_mask = (1 << _mantisa_bits) - 1; + constexpr uint16_t _exponent_mask = ((1 << _exponent_bits) - 1) << _mantisa_bits; + constexpr uint16_t _sign_mask = 1 << (_mantisa_bits + _exponent_bits); + const uint32_t g_index = (input_index / group_size); + const uint32_t group_size_bytes = (group_size * quantized_bits / 8); + const uint8_t* load_base_ptr = + val + g_index * (group_size_bytes + 4) + (input_index % group_size) * quantized_bits / 8; + + int mantisa_mask = ((1 << q_mantisa_bits) - 1); + mantisa_mask <<= (_mantisa_bits - q_mantisa_bits); + + T* store_base_ptr = q_val + tidx + blockIdx.x * total_num_elements; + float scale; + + uint8_t* scale_as_int8 = reinterpret_cast(&scale); + if (quantized_bits == 6) { + mem_access::load_global( + scale_as_int8, val + g_index * (group_size_bytes + 4) + group_size_bytes); + mem_access::load_global( + scale_as_int8 + quantization::quanitzed_access_granularity_6bits, + val + g_index * (group_size_bytes + 4) + group_size_bytes + + quantization::quanitzed_access_granularity_6bits); + } else + mem_access::load_global( + scale_as_int8, val + g_index * (group_size_bytes + 4) + group_size_bytes); + + if (tidx < total_num_elements) { + uint64_t q_buf_in; + uint64_t q_buf_in1; + uint8_t* int8_data = reinterpret_cast(&q_buf_in); + uint8_t* int8_data1 = reinterpret_cast(&q_buf_in1); + if (quantized_bits == 6) { + mem_access::load_global( + int8_data, load_base_ptr); + mem_access::load_global( + int8_data + quantization::quanitzed_access_granularity_6bits, + load_base_ptr + quantization::quanitzed_access_granularity_6bits); + mem_access::load_global( + int8_data + quantization::quanitzed_access_granularity_6bits * 2, + load_base_ptr + quantization::quanitzed_access_granularity_6bits * 2); + } else { + mem_access::load_global(int8_data, + load_base_ptr); + if (quantized_bits > 4) { + mem_access::load_global( + int8_data + quantization::quanitzed_access_granularity, + load_base_ptr + quantization::quanitzed_access_granularity); + if (quantized_bits == 12) { + mem_access::load_global( + int8_data1, load_base_ptr + quantization::quanitzed_access_granularity * 2); + } + } + } + T store_buf[vector_size]; + uint16_t* q_buf = reinterpret_cast(store_buf); +#pragma unroll + for (int j = 0; j < vector_size; j++) { + uint16_t new_data; + if (j < 5 || quantized_bits != 12) { + new_data = (uint16_t)(q_buf_in >> (j * quantized_bits)); + } else { + if (j == 5) { + new_data = (uint16_t)(q_buf_in1); + new_data = (uint16_t)((new_data << 4) | (q_buf_in >> 60)); + } else + new_data = (uint16_t)(q_buf_in1 >> ((j - 6) * quantized_bits + 8)); + } + + uint16_t sign = (new_data & _sign_mask) >> (_mantisa_bits + _exponent_bits); + uint16_t dst_exponent = (new_data & _exponent_mask) >> _mantisa_bits; + uint16_t dst_mantisa = (new_data & _mantisa_mask); + + if (dst_exponent != (1 << q_exponent_bits) - 1) + dst_exponent = (dst_exponent - ((1 << (_exponent_bits - 1)) - 1)) + + (1 << (q_exponent_bits - 1)) - 1; + + q_buf[j] = + ((sign << (q_exponent_bits + q_mantisa_bits)) | (dst_exponent << q_mantisa_bits) | + (dst_mantisa << (q_mantisa_bits - _mantisa_bits))); + float up_cast = conversion::to(store_buf[j]); + store_buf[j] = conversion::to(up_cast * scale); + } + mem_access::store_global(store_base_ptr, store_buf); + } +} + +template +void launch_selective_dequantization(uint8_t* val, + T* q_val, + int32_t* indexes, + int num_groups, + int group_size, + int num_indexes, + int q_mantisa_bits, + int q_exponent_bits, + cudaStream_t stream) +{ + int total_elements_per_index = (num_groups / num_indexes) * group_size; + int blocks = (total_elements_per_index - 1) / + (quantization::threads * (quantization::access_granularity / sizeof(T))) + + 1; + const dim3 grid(num_indexes, blocks); + const dim3 block(quantization::threads); + DEQUANT_SWITCH(q_mantisa_bits * q_exponent_bits, [&] { + apply_selective_dequantization + <<>>(val, q_val, indexes, group_size, total_elements_per_index); + }); +} +#define INSTANTIATE_LAUNCH_SELECTIVE_DEQUANTIZATION(T, mantisa) \ + template void launch_selective_dequantization( \ + uint8_t*, T*, int32_t*, int, int, int, int, int, cudaStream_t); +// fp8(E4M3) +#ifdef BF16_AVAILABLE +INSTANTIATE_LAUNCH_SELECTIVE_DEQUANTIZATION(__nv_bfloat16, 7); +#endif +INSTANTIATE_LAUNCH_SELECTIVE_DEQUANTIZATION(__half, 10); diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f8886edb6ba54580c85a43a51ed870d9dbab76d Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/__pycache__/transformer.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/__pycache__/transformer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4331925c671fb9eda521a5c2fcc18e65a09868a5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/__pycache__/transformer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/bias_add.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/bias_add.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7bbd856f783721fb4936e27a356d3843abea99e Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/bias_add.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/diffusers_attention.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/diffusers_attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d8b49a20a5d5396bcf4ca7ba6a55b0dfe72dffc0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/diffusers_attention.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/ds_attention.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/ds_attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ecc05cc1938107bccf35a157b72c86d5e0b6a212 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/ds_attention.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/moe_inference.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/moe_inference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8af8f77a5af28ce339146b867f5bf8ad166bef67 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/__pycache__/moe_inference.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__init__.py b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b7d1968df62a99849992c8b1e93698d9e51cec30 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__init__.py @@ -0,0 +1,11 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +from .residual_add import residual_add_bias +from .layer_norm import layer_norm, layer_norm_residual +from .gelu import gelu +from .softmax import softmax +from .ops import * +from .matmul_ext import fp16_matmul, matmul_4d, score_4d_matmul, context_4d_matmul diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d3938835281d9f1c716e240521ea86394acf0e0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/attention.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/attention.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae3122c0788f8a8ba6c4470ab4d5733a1d636ddd Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/attention.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/layer_norm.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/layer_norm.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..991bb8aaa630c443a6ead4d5eabe87b192af5d62 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/layer_norm.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/matmul_ext.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/matmul_ext.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a78200c3db5987fbd9fb073cda716c20726a17c Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/matmul_ext.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/ops.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1000acea99e0ecd9505344ea5abe5cd4f5f7ceb4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/ops.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/softmax.cpython-310.pyc b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/softmax.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2ac0daeb1052f6cdc38489c6e807322f7a5d2ec Binary files /dev/null and b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/__pycache__/softmax.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/layer_norm.py b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/layer_norm.py new file mode 100644 index 0000000000000000000000000000000000000000..d3f313d2ac3d8205702dc2ceb82856154e9ddb2c --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/layer_norm.py @@ -0,0 +1,249 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import triton +import triton.language as tl +''' +layer-normalization +modified the triton kernel in +https://github.com/openai/triton/blob/34817ecc954a6f4ca7b4dfb352fdde1f8bd49ca5/python/tutorials/05-layer-norm.py +''' + + +@triton.jit +def layer_norm_kernel( + Out, + A, + Weight, + Bias, + stride, + N, + eps, + BLOCK_SIZE: tl.constexpr, +): + # position of elements processed by this program + row = tl.program_id(0) + Out += row * stride + A += row * stride + # compute mean + mean = 0 + _mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32) + for off in range(0, N, BLOCK_SIZE): + cols = off + tl.arange(0, BLOCK_SIZE) + a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32) + _mean += a + mean = tl.sum(_mean, axis=0) / N + # compute variance + _var = tl.zeros([BLOCK_SIZE], dtype=tl.float32) + for off in range(0, N, BLOCK_SIZE): + cols = off + tl.arange(0, BLOCK_SIZE) + a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32) + a = tl.where(cols < N, a - mean, 0.0) + _var += a * a + var = tl.sum(_var, axis=0) / N + rstd = 1 / tl.sqrt(var + eps) + # multiply by weight and add bias + for off in range(0, N, BLOCK_SIZE): + cols = off + tl.arange(0, BLOCK_SIZE) + mask = cols < N + weight = tl.load(Weight + cols, mask=mask) + bias = tl.load(Bias + cols, mask=mask) + a = tl.load(A + cols, mask=mask, other=0.0).to(tl.float32) + a_hat = (a - mean) * rstd + out = a_hat * weight + bias + # # write-back + tl.store(Out + cols, out, mask=mask) + + +@triton.jit +def layer_norm_residual_kernel( + Out, + A, + Residual, + ln_input, + Weight, + Bias, + stride, + N, + eps, + BLOCK_SIZE: tl.constexpr, +): + # position of elements processed by this program + row = tl.program_id(0) + Out += row * stride + A += row * stride + Residual += row * stride + ln_input += row * stride + # compute mean + mean = 0 + _mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32) + for off in range(0, N, BLOCK_SIZE): + cols = off + tl.arange(0, BLOCK_SIZE) + a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32) + res = tl.load(Residual + cols, mask=cols < N, other=0.0).to(tl.float32) + a = a + res + tl.store(ln_input + cols, a, mask=cols < N) + _mean += a + mean = tl.sum(_mean, axis=0) / N + # compute variance + _var = tl.zeros([BLOCK_SIZE], dtype=tl.float32) + for off in range(0, N, BLOCK_SIZE): + cols = off + tl.arange(0, BLOCK_SIZE) + a = tl.load(ln_input + cols, mask=cols < N, other=0.0).to(tl.float32) + a = tl.where(cols < N, a - mean, 0.0) + _var += a * a + var = tl.sum(_var, axis=0) / N + rstd = 1 / tl.sqrt(var + eps) + # multiply by weight and add bias + for off in range(0, N, BLOCK_SIZE): + cols = off + tl.arange(0, BLOCK_SIZE) + mask = cols < N + weight = tl.load(Weight + cols, mask=mask) + bias = tl.load(Bias + cols, mask=mask) + a = tl.load(ln_input + cols, mask=mask, other=0.0).to(tl.float32) + a_hat = (a - mean) * rstd + out = a_hat * weight + bias + # write-back + tl.store(Out + cols, out, mask=mask) + + +@triton.jit +def layer_norm_residual_bias_kernel( + Out, + A, + Residual, + InputBias, + ln_input, + Weight, + Bias, + stride, + N, + eps, + BLOCK_SIZE: tl.constexpr, +): + # position of elements processed by this program + row = tl.program_id(0) + Out += row * stride + A += row * stride + Residual += row * stride + ln_input += row * stride + # compute mean + mean = 0 + _mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32) + for off in range(0, N, BLOCK_SIZE): + cols = off + tl.arange(0, BLOCK_SIZE) + a = tl.load(A + cols, mask=cols < N, other=0.0).to(tl.float32) + res = tl.load(Residual + cols, mask=cols < N, other=0.0).to(tl.float32) + b = tl.load(InputBias + cols, mask=cols < N, other=0.0).to(tl.float32) + a = a + b + res + tl.store(ln_input + cols, a, mask=cols < N) + _mean += a + mean = tl.sum(_mean, axis=0) / N + # compute variance + _var = tl.zeros([BLOCK_SIZE], dtype=tl.float32) + for off in range(0, N, BLOCK_SIZE): + cols = off + tl.arange(0, BLOCK_SIZE) + a = tl.load(ln_input + cols, mask=cols < N, other=0.0).to(tl.float32) + a = tl.where(cols < N, a - mean, 0.0) + _var += a * a + var = tl.sum(_var, axis=0) / N + rstd = 1 / tl.sqrt(var + eps) + # multiply by weight and add bias + for off in range(0, N, BLOCK_SIZE): + cols = off + tl.arange(0, BLOCK_SIZE) + mask = cols < N + weight = tl.load(Weight + cols, mask=mask) + bias = tl.load(Bias + cols, mask=mask) + a = tl.load(ln_input + cols, mask=mask, other=0.0).to(tl.float32) + a_hat = (a - mean) * rstd + out = a_hat * weight + bias + # write-back + tl.store(Out + cols, out, mask=mask) + + +def layer_norm(a, weight, bias, eps): + assert a.is_contiguous() + assert weight.is_contiguous() + assert bias.is_contiguous() + + # allocate output + out = torch.empty_like(a) + # reshape input data into 2D tensor + a_arg = a.view(-1, a.shape[-1]) + M, N = a_arg.shape + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // a.element_size() + BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) + BLOCK_SIZE = max(BLOCK_SIZE, 128) + BLOCK_SIZE = min(BLOCK_SIZE, 4096) + BLOCK_SIZE = BLOCK_SIZE if N <= 4096 else 8192 + # heuristics for number of warps + num_warps = min(max(BLOCK_SIZE // 256, 1), 8) + layer_norm_kernel[(M, )]( + out, + a_arg, + weight, + bias, + a_arg.stride(0), + N, + eps, + BLOCK_SIZE=BLOCK_SIZE, + num_warps=num_warps, + ) + return out + + +def layer_norm_residual(a, input_bias, residual, weight, bias, eps): + assert a.is_contiguous() + assert weight.is_contiguous() + assert bias.is_contiguous() + assert residual.is_contiguous() + + # allocate output and scratch-pad for residual addition + out = torch.empty_like(a) + ln_input = torch.empty_like(a) + # reshape input data into 2D tensor + a_arg = a.view(-1, a.shape[-1]) + residual = residual.view(-1, residual.shape[-1]) + M, N = a_arg.shape + # Less than 64KB per feature: enqueue fused kernel + MAX_FUSED_SIZE = 65536 // a.element_size() + BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) + BLOCK_SIZE = max(BLOCK_SIZE, 128) + BLOCK_SIZE = min(BLOCK_SIZE, 4096) + BLOCK_SIZE = BLOCK_SIZE if N <= 4096 else 8192 + # heuristics for number of warps + num_warps = min(max(BLOCK_SIZE // 256, 1), 8) + if input_bias is None: + layer_norm_residual_kernel[(M, )]( + out, + a_arg, + residual, + ln_input, + weight, + bias, + a_arg.stride(0), + N, + eps, + BLOCK_SIZE=BLOCK_SIZE, + num_warps=num_warps, + ) + else: + layer_norm_residual_bias_kernel[(M, )]( + out, + a_arg, + residual, + input_bias, + ln_input, + weight, + bias, + a_arg.stride(0), + N, + eps, + BLOCK_SIZE=BLOCK_SIZE, + num_warps=num_warps, + ) + return out diff --git a/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/mlp.py b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/mlp.py new file mode 100644 index 0000000000000000000000000000000000000000..1708080b27efb0671d361b7cdeaff4b262cf0ce8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/deepspeed/ops/transformer/inference/triton/mlp.py @@ -0,0 +1,81 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch +import math +import torch.nn as nn +from deepspeed.accelerator import get_accelerator +from deepspeed import comm as dist +from ..op_binding import MLPGemmOp, VectorMatMulOp, GELUGemmOp, ResidualAddOp + + +class TritonMLP(nn.Module): + + def __init__(self, config, mp_group=None, q_scales=None, q_groups=1, merge_count=1, mlp_extra_grouping=False): + super(TritonMLP, self).__init__() + + self.config = config + data_type = self.config.dtype + data_type_fp = torch.half if self.config.dtype == torch.int8 else self.config.dtype + device = get_accelerator().current_device_name() + self.attn_nw = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device), + requires_grad=False) + self.attn_nb = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device), + requires_grad=False) + intm_size_per_partition = self.config.intermediate_size // self.config.mp_size + self.inter_w = nn.Parameter(torch.empty(self.config.hidden_size, + intm_size_per_partition, + dtype=data_type, + device=device), + requires_grad=False) + self.inter_b = nn.Parameter(torch.empty(intm_size_per_partition, dtype=data_type_fp, device=device), + requires_grad=False) + self.output_w = nn.Parameter(torch.empty(intm_size_per_partition, + self.config.hidden_size, + dtype=data_type, + device=device), + requires_grad=False) + self.output_b = nn.Parameter(torch.empty(self.config.hidden_size, dtype=data_type_fp, device=device), + requires_grad=False) + + # used for quantization + self.q_scales = q_scales + self.q_groups = q_groups * 2 if mlp_extra_grouping else q_groups + self.merge_count = int(math.log2(merge_count)) + self.mp_group = mp_group + + self.mlp_gemm_func = MLPGemmOp(config) + self.vector_matmul_func = VectorMatMulOp(config) + self.fused_gemm_gelu = GELUGemmOp(config) + self.residual_add_func = ResidualAddOp(config) + + def forward(self, input, residual, residual_norm, bias): + residual_add = None + if self.attn_nw is None: + output = self.fused_gemm_gelu(input=residual_norm, + weight=self.inter_w, + bias=self.inter_b, + weight_out=self.output_w) + else: + output, residual_add = self.mlp_gemm_func(input=input, + residual=residual, + input_bias=bias, + weight_interm=self.inter_w, + weight_out=self.output_w, + bias=self.inter_b, + gamma=self.attn_nw, + beta=self.attn_nb) + residual = self.residual_add_func(hidden_state=output, + residual=residual, + attention_output=input, + attention_bias=bias if bias is not None else self.output_b, + final_bias=self.output_b, + add_bias=bias is not None, + residual_add=residual_add) + + if self.mp_group is not None and dist.get_world_size(group=self.mp_group) > 1: + dist.all_reduce(residual, group=self.mp_group) + + return residual diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/data-v1-dl-1595261.arff.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/data-v1-dl-1595261.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..6619336b53b5d71c6bd5c7f2de8e11b88c33ed31 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_1590/data-v1-dl-1595261.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee1dcdf58f2f1072f7dd1b43388969c51bc6cfe776e3e9465ae6a756e5ddb10a +size 1152 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-40981.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-40981.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..b3c915315eff5a266c715e3f99584b16ec06ea8f --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_292/api-v1-jdf-40981.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:255c16f33ed2967fe100cd8011a7e69f789603724b1ec2ecf91dfeb72067c190 +size 306 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jd-40966.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jd-40966.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..2b93281d0ded598bd03b1160a1b8a86df61b485c --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jd-40966.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36c63c3ac8c9db59910acbf4c772cd53040ccd0eac0b0452611dd7ad8da50474 +size 1660 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdf-40966.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdf-40966.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..138ffc718b067282922ebeb107b22b8c3af08477 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdf-40966.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8adac8e2f8cbcbfa9677acdd4927a961430465d2c99401832160be455cfaced8 +size 3690 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdl-dn-miceprotein-l-2-dv-4.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdl-dn-miceprotein-l-2-dv-4.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..7e6491106a294f38733d8dfd6475c1afe42b8848 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdl-dn-miceprotein-l-2-dv-4.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f0c203b4627175cebbf527d81917a499911af915f6f2f46ee7248428a948d603 +size 325 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdl-dn-miceprotein-l-2-s-act-.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdl-dn-miceprotein-l-2-s-act-.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..ecd8d1b12a547833c2d00ed29be640a12167d082 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdl-dn-miceprotein-l-2-s-act-.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:301396b4a42c814b1a15038ddfcbcf5c8590501231747d0dc2a500b84b2fd0df +size 328 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdq-40966.json.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdq-40966.json.gz new file mode 100644 index 0000000000000000000000000000000000000000..1d119ce6ec907e4689015911b16bcbfe8552b4e8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/api-v1-jdq-40966.json.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3dee83987fffa8ec20e23b3cabc00d42beb7a469af6bd803909998c1687fa634 +size 934 diff --git a/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/data-v1-dl-17928620.arff.gz b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/data-v1-dl-17928620.arff.gz new file mode 100644 index 0000000000000000000000000000000000000000..c82d051bccb1b232214b31c73114f4f78749d810 --- /dev/null +++ b/venv/lib/python3.10/site-packages/sklearn/datasets/tests/data/openml/id_40966/data-v1-dl-17928620.arff.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c5fd93ffec7deb63a940fd698534dd7ebb7db349fc183930041cbf17e60e2cc +size 6471