Browse Source

feat: draft for cuda kernels

AlpinDale 1 year ago
parent
commit
a409431c40

BIN
aphrodite/__pycache__/__init__.cpython-310.pyc


BIN
aphrodite/common/__pycache__/block.cpython-310.pyc


BIN
aphrodite/common/__pycache__/config.cpython-310.pyc


BIN
aphrodite/common/__pycache__/logger.cpython-310.pyc


BIN
aphrodite/common/__pycache__/outputs.cpython-310.pyc


BIN
aphrodite/common/__pycache__/sampling_params.cpython-310.pyc


BIN
aphrodite/common/__pycache__/sequence.cpython-310.pyc


BIN
aphrodite/common/__pycache__/utils.cpython-310.pyc


BIN
aphrodite/engine/__pycache__/args_tools.cpython-310.pyc


+ 138 - 0
aphrodite/task_handler/cache_engine.py

@@ -0,0 +1,138 @@
+"""CacheEngine for managing the KV cache"""
+from typing import Dict, List, Tuple
+
+import torch
+
+from aphrodite import cache_ops
+from aphrodite.common.config import CacheConfig, ModelConfig, ParallelConfig
+
+KVCache = Tuple[torch.Tensor, torch.Tensor]
+
+class CacheEngine:
+
+    def __init__(
+        self,
+        cache_config: CacheConfig,
+        model_config: ModelConfig,
+        parallel_config: ParallelConfig
+    ) -> None:
+        self.cache_config = cache_config
+        self.model_config = model_config
+        self.parallel_config = parallel_config
+        
+        self.head_size = model_config.get_head_size()
+        self.num_layers = model_config.get_num_layers(parallel_config)
+        self.num_heads = model_config.get_num_heads(parallel_config)
+        self.dtype = model_config.dtype
+
+        self.block_size = cache_config.block_size
+        self.num_gpu_blocks = cache_config.num_gpu_blocks
+        self.num_cpu_blocks = cache_config.num_cpu_blocks
+
+        self.gpu_cache = self.allocate_gpu_cache()
+        self.cpu_cache = self.allocate_cpu_cache()
+
+        self.cache_stream = torch.cuda.Stream()
+        assert self.cache_stream != torch.cuda.current_stream()
+        self.events = [torch.cuda.Event() for _ in range(self.num_layers)]
+
+    def get_key_block_shape(self) -> Tuple[int, int, int, int]:
+        element_size = torch.tensor([], dtype=self.dtype).element_size()
+        x = 16 // element_size
+        return (
+            self.num_heads,
+            self.head_size // x,
+            self.block_size,
+            x,
+        )
+
+    def get_value_block_shape(self) -> Tuple[int, int, int]:
+        return (
+            self.num_heads,
+            self.head_size,
+            self.block_size,
+        )
+    
+    def allocate_gpu_cache(self) -> List[KVCache]:
+        gpu_cache: List[KVCache] = []
+        key_block_shape = self.get_key_block_shape()
+        value_block_shape = self.get_key_block_shape()
+        for _ in range(self.num_cpu_blocks):
+            key_blocks = torch.empty(
+                size=(self.num_gpu_blocks, *key_block_shape),
+                dtype=self.dtype,
+                device="cuda",
+            )
+            value_blocks = torch.empty(
+                size=(self.num_gpu_blocks, *value_block_shape),
+                dtype=self.dtype,
+                device="cuda",
+            )
+            gpu_cache.append((key_blocks, value_blocks))
+        return gpu_cache
+    
+    def allocate_cpu_cache(self) -> List[KVCache]:
+        cpu_cache: List[KVCache] = []
+        key_block_shape = self.get_key_block_shape()
+        value_block_shape = self.get_key_block_shape()
+        for _ in range(self.num_layers):
+            key_blocks = torch.empty(
+                size=(self.num_cpu_blocks, *key_block_shape),
+                dtype=self.dtype,
+                pin_memory=True,
+            )
+            value_blocks = torch.empty(
+                size=(self.num_cpu_blocks, *value_block_shape),
+                dtype=self.dtype,
+                pin_memory=True,
+            )
+            cpu_cache.append((key_blocks, value_blocks))
+        return cpu_cache
+
+    def _swap(
+        self,
+        src: List[KVCache],
+        dst: List[KVCache],
+        src_to_dst: Dict[int, int],
+    ) -> None:
+        with torch.cuda.stream(self.cache_stream):
+            for i in range(self.num_layers):
+                src_key_cache, src_value_cache = src[i]
+                dst_key_cache, dst_value_cache = dst[i]
+                cache_ops.swap_blocks(
+                    src_key_cache, dst_key_cache, src_to_dst)
+                cache_ops.swap_blocks(
+                    src_value_cache, dst_value_cache, src_to_dst)
+                event = self.events[i]
+                event.record(stream=self.cache_stream)
+
+    def swap_in(self, src_to_dst: Dict[int, int]) -> None:
+        self._swap(self.cpu_cache, self.gpu_cache, src_to_dst)
+
+    def swap_out(self, src_to_dst: Dict[int, int]) -> None:
+        self._swap(self.gpu_cache, self.cpu_cache, src_to_dst)
+
+    def copy(self, src_to_dsts: Dict[int, List[int]]) -> None:
+        key_caches = [key_cache for key_cache, _ in self.gpu_cache]
+        value_caches = [value_cache for _, value_cache in self.gpu_cache]
+        cache_ops.copy_blocks(key_caches, value_caches, src_to_dsts)
+
+    @staticmethod
+    def get_cache_block_size(
+        block_size: int,
+        model_config: ModelConfig,
+        parallel_config: ParallelConfig,
+    ) -> int:
+        head_size = model_config.get_head_size()
+        num_heads = model_config.get_num_heads(parallel_config)
+        num_layers = model_config.get_num_layers(parallel_config)
+
+        key_cache_block = block_size * num_heads * head_size
+        value_cache_block = key_cache_block
+        total = num_layers * (key_cache_block + value_cache_block)
+        dtype_size = _get_dtype_size(model_config.dtype)
+        return dtype_size * total
+
+
+def _get_dtype_size(dtype: torch.dtype) -> int:
+    return torch.tensor([], dtype=dtype).element_size()

+ 47 - 0
kernels/cache.cpp

@@ -0,0 +1,47 @@
+#include <cstdint>
+#include <torch/extension.h>
+#include <map>
+#include <vector>
+
+void swap_blocks(
+    torch::Tensor& src,
+    torch::Tensor& dst,
+    const std::map<int64_t, int64_t>& block_mapping);
+
+void copy_blocks(
+    std::vector<torch::Tensor>& key_caches,
+    std::vector<torch::Tensor>& value_caches,
+    const std::map<int64_t, std::vector<int64_t>>& block_mapping);
+
+void reshape_and_cache(
+    torch::Tensor& key,
+    torch::Tensor& value,
+    torch::Tensor& key_cache,
+    torch::Tensor& value_cache,
+    torch::Tensor& slot_mapping);
+
+void gather_cached_kv(
+    torch::Tensor& key,
+    torch::Tensor& value,
+    torch::Tensor& key_cache,
+    torch::Tensor& value_cache,
+    torch::Tensor& slot_mapping);
+
+PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
+    m.def(
+        "swap_blocks",
+        &swap_blocks,
+        "Swap in (out) the cache blocks from src to dst");
+    m.def(
+        "copy_blocks",
+        &copy_blocks,
+        "Copy the cache blocks from src to dst");
+    m.def(
+        "reshape_and_cache",
+        &reshape_and_cache,
+        "Reshape the key and value tensors and cache them");
+    m.def(
+        "gather_cached_kv",
+        &gather_cached_kv,
+        "Gather key and value from the cache into contiguous KV tensors");
+}

+ 379 - 0
kernels/cache_kernels.cu

@@ -0,0 +1,379 @@
+#include <cstdint>
+#include <pstl/glue_execution_defs.h>
+#include <sched.h>
+#include <sys/cdefs.h>
+#include <torch/extension.h>
+#include <ATen/cuda/CUDAContext.h>
+
+#include <algorithm>
+#include <cassert>
+#include <map>
+#include <vector>
+
+void swap_blocks(
+  torch::Tensor& src,
+  torch::Tensor& dst,
+  const std::map<int64_t, int64_t>& block_mapping) {
+  torch::Device src_device = src.device();
+  torch::Device dst_device = dst.device();
+  cudaMemcpyKind memcpy_type;
+  if (src_device.is_cuda() && dst_device.is_cuda()) {
+    TORCH_CHECK(
+      src_device.index() == dst_device.index(),
+      "src and dst must be on the same GPU");
+    memcpy_type = cudaMemcpyDeviceToDevice;
+  } else if (src_device.is_cuda() && dst_device.is_cpu()) {
+    memcpy_type = cudaMemcpyDeviceToHost;
+  } else if (src_device.is_cpu() && dst_device.is_cuda()) {
+    memcpy_type = cudaMemcpyHostToDevice;
+  } else {
+    TORCH_CHECK(false, "Invalid device combination");
+  }
+
+  void *src_ptr = src.data_ptr();
+  void *dst_ptr = dst.data_ptr();
+
+  const int64_t block_size_in_bytes = src.element_size() * src[0].numel();
+  const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
+  for (const auto& pair : block_mapping) {
+    int64_t src_block_number = pair.first;
+    int64_t dst_block_number = pair.second;
+    int64_t src_offset = src_block_number * block_size_in_bytes;
+    int64_t dst_offset = dst_block_number * block_size_in_bytes;
+    cudaMemcpyAsync(
+        dst_ptr + dst_offset,
+        src_ptr + src_offset,
+        block_size_in_bytes,
+        memcpy_type,
+        stream);
+    }
+}
+
+namespace aphrodite {
+
+// Grid: (num_layers, num_pairs)
+
+template<typename scalar_t>
+__global__ void copy_blocks_kernel(
+    int64_t* key_cache_ptrs,
+    int64_t* value_cache_ptrs,
+    const int* __restrict__ block_mapping,
+    const int numel_per_block) {
+    const int layer_idx = blockIdx.x;
+    const int pair_idx = blockIdx.y;
+
+    scalar_t* key_cache = reinterpret_cast<scalar_t*>(key_cache_ptrs[layer_idx]);
+    scalar_t* value_cache = reinterpret_cast<scalar_t*>(value_cache_ptrs[layers_idx]);
+    int src_block_number = block_mapping[2 * pair_idx];
+    int dst_block_number = block_mapping[2 * pair_idx + 1];
+
+    const int src_block_offset = src_block_number * numel_per_block;
+    const int dst_block_offset = dst_block_number * numel_per_block;
+    for (int i = threadIdx.x; i < numel_per_block; i += blockdim.x) {
+        int src_offset = src_block_offset + i;
+        int dst_offset = dst_block_number + 1;
+        value_cache[dst_offset] = value_cache[src_offset];
+    }
+}
+
+}
+
+void copy_blocks(
+    std::vector<torch::Tensor>& key_caches,
+    std::vector<torch::Tensor>& value_caches,
+    const std::map<int64_t, std::vector<int64_t>>& block_mapping) {
+    int num_layers = key_caches.size();
+    TORCH_CHECK(num_layers == value_caches.size());
+    if (num_layers == 0) {
+        return;
+    }
+    torch::Device cache_device = key_caches[0].device();
+    TORCH_CHECK(cache_device.is_cuda());
+
+    // Create data structures for the kernel.
+    // Create an array of pointers to the key/value caches
+    int64_t key_cache_ptrs[num_layers];
+    int64_t value_cache_ptrs[num_layers];
+    for (int layer_idx = 0; layer_idx < num_layers; ++layer_idx) {
+        key_cache_ptrs[layer_idx] = reinterpret_cast<int64_t>(key_caches[layer_idx].data_ptr());
+        value_cache_ptrs[layer_idx] = reinterpret_cast<int64_t>(value_caches[layer_idx].data_ptr());
+    }
+    // Create a block mapping array
+    std::vector<int> block_mapping_vec;
+    for (const auto& pair : block_mapping) {
+      int src_block_number = pair.first;
+      for (int dst_block_number : pair.second) {
+        block_mapping_vec.push_back(src_block_number);
+        block_mapping_vec.push_back(dst_block_number);
+      }
+    }
+    int* block_mapping_array = block_mapping_vec.data();
+    int num_pairs = block_mapping_vec.size() / 2;
+
+    // move the data structures to the GPU and have it synchronize the CPU and GPU
+    torch::Tensor key_cache_ptrs_tensor = torch::from_blob(
+      key_cache_ptrs, {num_layers}, torch::kInt64).to(cache_device);
+    torch::Tensor value_cache_ptrs = torch::from_blob(
+      value_cache_ptrs, {num_layers} = torch::kInt64).to(cache_device);
+    torch::Tensor block_mapping_tensor = torch::from_blob(
+      block_mapping_array, {2 * num_pairs}, torch::kInt).to(cache_device);
+
+    const int numel_per_block = key_caches[0][0].numel();
+    dim3 grid(num_layers, num_pairs);
+    dim3 block(std::min(1024, numel_per_block));
+    const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
+    AT_DISPATCH_FLOATING_TYPES_AND2(
+      at::ScalarType::Half,
+      at::ScalarType::BFloat16,
+      key_caches[0].scalar_types(), "copy_blocks_kernel", ([&] {
+        aphrodite::copy_blocks_kernel<scalar_t><<<grid, block, 0, stream>>>(
+          key_cache_ptrs_tensor.data_ptr<int64_t>(),
+          value_cache_ptrs_tensor.data_ptr<int64_t>(),
+          block_mapping_tensor.data_ptr<int>(),
+          numel_per_block)
+      }));
+}
+
+namespace aphrodite{
+
+template<typename scalar_t>
+__global__ void reshape_and_cache_kernel(
+  const scalar_t* __restrict__ key,     // [num_tokens, num_heads, head_size]
+  const scalar_t* __restrict__ value,   // [num_tokens, num_heads, head_size]
+  scalar_t* __restrict__ key_cache,     // [num_blocks, num_heads, head_size/x, block_size, x]
+  scalar_t* __restrict__ value_cache,   // [num_blocks, num_heads, head_size, block_size]
+  const int* __restrict__ slot_mapping, // [num_tokens]
+  const int key_stride,
+  const int value_stride,
+  const int num_heads,
+  const int head_size,
+  const int block_size,
+  const int x) {
+  const int token_idx = blockIdx.x;
+  const int slot_idx = slot_mapping[token_idx];
+  const int block_idx = slot_idx / block_size;
+  const int block_offset = slot_idx % block_size;
+
+  const int n = num_heads * head_size;
+  for (int i = threadIdx.x; i < n; i += blockDim.x) {
+    const int src_key_idx = token_idx * key_stride + i;
+    const int src_value_idx = token_idx * value_stride + i;
+
+    const int head_idx = i / head_size;
+    const int head_offset = i % head_size;
+    const int x_idx = head_offset / x;
+    const int x_offset = head_offset % x;
+
+    const int tgt_key_idx = block_idx * num_heads * (head_size / x ) * block_size * x
+                            + head_idx * (head_size / x) * block_size * x 
+                            + x_idx * block_size * x
+                            + block_offset * x
+                            + x_offset;
+    const int tgt_value_idx = block_idx * num_heads * head_size * block_size
+                            + head_idx * head_size * block_size
+                            + head_offset * block_size
+                            + block_offset;
+    key_cache[tgt_key_idx] = __ldg(&value[src_key_idx]);
+    value_cache[tgt_value_idx] = __ldg(&value[src_value_idx]);
+    }
+  }
+}
+
+void reshape_and_cache(
+  torch::Tensor& key,
+  torch::Tensor& value,
+  torch::Tensor& key_cache,
+  torch::Tensor& value_cache,
+  torch::Tensor& slot_mapping)
+{
+  int num_tokens = key.size(0);
+  int num_heads = key.size(1);
+  int head_size = key.size(2);
+  int block_size = key_cache.size(3);
+  int x = key_cache.size(4);
+
+  int key_stride = key.stride(0);
+  int value_stride = value.stride(0);
+
+  dim3 grid(num_tokens);
+  dim3 block(std::min(num_heads * head_size, 512));
+  const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
+  AT_DISPATCH_FLOATING_TYPES_AND2(
+    at::ScalarType::Half,
+    at::ScalarType::BFloat16,
+    key.scalar_type(),
+    "reshape_and_cache_kernel",
+    [&] {
+      aphrodite::reshape_and_cache_kernel<scalar_t><<<grid, block, 0, stream>>>(
+        key.data_ptr<scalar_t>(),
+        value.data_ptr<scalar_t>(),
+        key_cache.data_ptr<scalar_t>(),
+        value_cache.data_ptr<scalar_t>(),
+        slot_mapping.data_ptr<int>(),
+        key_stride,
+        value_stride,
+        num_heads,
+        head_size,
+        block_size,
+        x);
+    });
+}
+
+namespace aphrodite {
+
+template<typename scalar_t>
+__global__ void gather_cached_kv_kernel(
+scalar_t* __restrict__ key,     // [num_tokens, [stride], num_heads, head_size]
+scalar_t* __restrict__ value,   // [num_tokens, [stride], num_heads, head_size]
+const scalar_t* __restrict__ key_cache,     // [num_blocks, num_heads, head_size/x, block_size, x]
+const scalar_t* __restrict__ value_cache,   // [num_blocks, num_heads, head_size, block_size]
+const int* __restrict__ slot_mapping, // [num_tokens]
+  const int key_stride,
+  const int value_stride,
+  const int num_heads,
+  const int head_size,
+  const int block_size,
+  const int x) {
+  const int token_idx = blockIdx.x;
+  const int slot_idx = slot_mapping[token_idx];
+  const int block_idx = slot_idx / block_size;
+  const int block_offset = slot_idx % block_size;
+
+  const int num_tokens = num_heads * num_size;
+  for (int i = threadIdx.x; i < num_tokens; i += blockDim.x) {
+    const int tgt_key_idx = token_idx * key_stride + i;
+    const int tgt_value_idx = token_idx * value_stride + i;
+
+    const int head_idx = i / head_size;
+    const int head_offset = i % head_size;
+    const int x_idx = head_offset / x;    // the offset of the [head_size/x] dimension
+    const int x_offset = head_offset % x;
+
+    const int src_key_idx = block_idx * num_heads * (head_size / x ) * block_size * x
+                            + head_idx * (head_size / x) * block_size * x 
+                            + x_idx * block_size * x
+                            + block_offset * x
+                            + x_offset;
+    const int src_value_idx = block_idx * num_heads * head_size * block_size
+                            + head_idx * head_size * block_size
+                            + head_offset * block_size
+                            + block_offset;
+    key_cache[tgt_key_idx] = __ldg(&value[src_key_idx]);
+    value_cache[tgt_value_idx] = __ldg(&value[src_value_idx]);
+    }
+  }
+
+template <typename scalar_t>
+__global__ void gather_cached_kv_kernel_optimized(
+    scalar_t* __restrict__ key,     // [num_tokens, [stride], num_heads, head_size]
+    scalar_t* __restrict__ value,   // [num_tokens, [stride], num_heads, head_size]
+    const scalar_t* __restrict__ key_cache,     // [num_blocks, num_heads, head_size/x, block_size, x]
+    const scalar_t* __restrict__ value_cache,   // [num_blocks, num_heads, head_size, block_size]
+    const int* __restrict__ slot_mapping, // [num_tokens]
+    const int key_stride,
+    const int value_stride,
+    const int num_heads,
+    const int head_size,
+    const int block_size,
+    const int x)
+{
+  const int token_idx = blockIdx.x;
+  const int slot_idx = slot_mapping[token_idx];
+  const int block_idx = slot_idx / block_size;
+  const int block_offset = slot_idx % block_size;
+
+  const int dim = num_heads * head_size;
+  assert(dim % 4 == 0);   // true for known use cases
+  const int unroll_factor = 4;
+  const int unroll_dim = dim / unroll_factor;
+
+  for (int i = threadIdx.x; i < unroll_dim; i += blockDim.x)
+  {
+    int tgt_key_indices[unroll_factor];
+    int tgt_value_indices[unroll_factor];
+    int src_key_indices[unroll_factor];
+    int src_value_indices[unroll_factor];
+    scalar_t key_to_store[unroll_factor];
+    scalar_t values_to_store[unroll_factor];
+
+    #pragma unroll
+    for (int j = 0; j < unroll_factor; ++j)
+    {
+        int index = i + j * unroll_dim;
+        
+        const int tgt_key_idx = token_idx * key_stride + index;
+        const int tgt_value_idx = token_idx * value_stride + index;
+
+        const int head_idx = index / head_size;
+        const int head_offset = index % head_size;
+        const int x_idx = head_offset / x;
+        const int x_offset = head_offset % x;
+
+        const int src_key_idx = block_idx * num_heads * (head_size / x) * block_size * x
+                                + head_idx * (head_size / x) * block_size * x
+                                + x_idx * block_idx * x
+                                + block_offset * x
+                                + x_offset
+        const int src_value_idx = block_idx * num_heads * head_size * block_size
+                                + head_idx * head_size * block_size
+                                + head_offset * block_size
+                                + block_offset;
+        tgt_key_indices[j] = tgt_key_idx;
+        tgt_value_indices[j] = tgt_value_idx;
+        src_key_indices[j] = src_key_idx;
+        src_value_indices[j] = src_value_idx;
+
+        keys_to_store[j] = __ldg(&key_cache[src_key_idx]);
+        values_to_store[j] = __ldg(&value_cache[src_value_idx]);
+    }
+
+    #pragma unroll
+    for (int j = 0; j < unroll_factor; ++j)
+    {
+      key[tgt_key_indices[j]] = key_to_store[j];
+      value[tgt_value_indices[j]] = values_to_store[j];
+
+    }
+  }
+}
+}
+
+void gather_cached_kv(
+  torch::Tensor& key,
+  torch::Tensor& value,
+  torch::Tensor& key_cache,
+  torch::Tensor& value_cache,
+  torch::Tensor& slot_mapping)
+{
+  int num_tokens = key.size(0);
+  int num_heads = key.size(1);
+  int head_size = key.size(2);
+  int block_size = key_cache.size(3);
+  int x = key_cache.size(4);
+
+  int key_stride = key.stride(0);
+  int value_stride = value.stride(0);
+
+  dim3 grid(num_tokens);
+  dim3 block(std::min(num_heads * head_size, 512));
+  const cudaStream_t stream = at::cuda::getCurrentCUDAStream();
+  AT_DISPATCH_FLOATING_TYPES_AND2(
+    at::ScalarType::Half,
+    at::ScalarType::BFloat16,
+    key.scalar_type(),
+    "gather_cached_kv_kernel_optimized",
+    [&] {
+      aphrodite::gather_cached_kv_kernel_optimized<scalar_t><<<grid, block, 0, stream>>>(
+        key.data_ptr<scalar_t>(),
+        value.data_ptr<scalar_t>(),
+        key_cache.data_ptr<scalar_t>(),
+        value_cache.data_ptr<scalar_t>(),
+        slot_mapping.data_ptr<int>(),
+        key_stride,
+        value_stride,
+        num_heads,
+        head_size,
+        block_size,
+        x);
+    });
+}