123456789101112131415161718192021222324252627282930313233343536373839404142 |
- #include <torch/extension.h>
- #include <c10/util/Optional.h>
- void paged_attention_v1(
- torch::Tensor& out,
- torch::Tensor& query,
- torch::Tensor& key_cache,
- torch::Tensor& value_cache,
- torch::Tensor& head_mapping,
- float scale,
- torch::Tensor& block_tables,
- torch::Tensor& context_lens,
- int block_size,
- int max_context_len,
- const c10::optional<torch::Tensor>& alibi_slopes);
- void paged_attention_v2(
- torch::Tensor& out,
- torch::Tensor& exp_sums,
- torch::Tensor& max_logits,
- torch::Tensor& tmp_out,
- torch::Tensor& query,
- torch::Tensor& key_cache,
- torch::Tensor& value_cache,
- torch::Tensor& head_mapping,
- float scale,
- torch::Tensor& block_tables,
- torch::Tensor& context_lens,
- int block_size,
- int max_context_len,
- const c10::optional<torch::Tensor>& alibi_slopes);
- PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
- m.def(
- "paged_attention_v1",
- &paged_attention_v1,
- "Compute the attention between an input query and the cached key/value tensors");
- m.def(
- "paged_attention_v2",
- &paged_attention_v2,
- "PagedAttention V2.");
- }
|