attention.cpp 1.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142
  1. #include <torch/extension.h>
  2. #include <c10/util/Optional.h>
  3. void paged_attention_v1(
  4. torch::Tensor& out,
  5. torch::Tensor& query,
  6. torch::Tensor& key_cache,
  7. torch::Tensor& value_cache,
  8. torch::Tensor& head_mapping,
  9. float scale,
  10. torch::Tensor& block_tables,
  11. torch::Tensor& context_lens,
  12. int block_size,
  13. int max_context_len,
  14. const c10::optional<torch::Tensor>& alibi_slopes);
  15. void paged_attention_v2(
  16. torch::Tensor& out,
  17. torch::Tensor& exp_sums,
  18. torch::Tensor& max_logits,
  19. torch::Tensor& tmp_out,
  20. torch::Tensor& query,
  21. torch::Tensor& key_cache,
  22. torch::Tensor& value_cache,
  23. torch::Tensor& head_mapping,
  24. float scale,
  25. torch::Tensor& block_tables,
  26. torch::Tensor& context_lens,
  27. int block_size,
  28. int max_context_len,
  29. const c10::optional<torch::Tensor>& alibi_slopes);
  30. PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
  31. m.def(
  32. "paged_attention_v1",
  33. &paged_attention_v1,
  34. "Compute the attention between an input query and the cached key/value tensors");
  35. m.def(
  36. "paged_attention_v2",
  37. &paged_attention_v2,
  38. "PagedAttention V2.");
  39. }