123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118 |
- #pragma once
- #include <torch/extension.h>
- void paged_attention_v1(
- torch::Tensor& out,
- torch::Tensor& query,
- torch::Tensor& key_cache,
- torch::Tensor& value_cache,
- int num_kv_heads,
- float scale,
- torch::Tensor& block_tables,
- torch::Tensor& context_lens,
- int block_size,
- int max_context_len,
- const c10::optional<torch::Tensor>& alibi_slopes,
- const std::string& kv_cache_dtype,
- float kv_scale);
- void paged_attention_v2(
- torch::Tensor& out,
- torch::Tensor& exp_sums,
- torch::Tensor& max_logits,
- torch::Tensor& tmp_out,
- torch::Tensor& query,
- torch::Tensor& key_cache,
- torch::Tensor& value_cache,
- int num_kv_heads,
- float scale,
- torch::Tensor& block_tables,
- torch::Tensor& context_lens,
- int block_size,
- int max_context_len,
- const c10::optional<torch::Tensor>& alibi_slopes,
- const std::string& kv_cache_dtype,
- float kv_scale);
- void rms_norm(
- torch::Tensor& out,
- torch::Tensor& input,
- torch::Tensor& weight,
- float epsilon);
- void fused_add_rms_norm(
- torch::Tensor& input,
- torch::Tensor& residual,
- torch::Tensor& weight,
- float epsilon);
- void rotary_embedding(
- torch::Tensor& positions,
- torch::Tensor& query,
- torch::Tensor& key,
- int head_size,
- torch::Tensor& cos_sin_cache,
- bool is_neox);
- void batched_rotary_embedding(
- torch::Tensor& positions,
- torch::Tensor& query,
- torch::Tensor& key,
- int head_size,
- torch::Tensor& cos_sin_cache,
- bool is_neox,
- int rot_dim,
- torch::Tensor& cos_sin_cache_offsets);
- void silu_and_mul(
- torch::Tensor& out,
- torch::Tensor& input);
- void gelu_and_mul(
- torch::Tensor& out,
- torch::Tensor& input);
- void gelu_tanh_and_mul(
- torch::Tensor& out,
- torch::Tensor& input);
- void gelu_new(
- torch::Tensor& out,
- torch::Tensor& input);
- void gelu_fast(
- torch::Tensor& out,
- torch::Tensor& input);
- void moe_align_block_size(
- torch::Tensor topk_ids,
- int num_experts,
- int block_size,
- torch::Tensor sorted_token_ids,
- torch::Tensor expert_ids,
- torch::Tensor num_tokens_post_pad
- );
- #ifndef USE_ROCM
- using fptr_t = uint64_t;
- fptr_t init_custom_ar(torch::Tensor &meta, torch::Tensor &rank_data,
- const std::vector<std::string> &handles,
- const std::vector<int64_t> &offsets, int rank,
- bool full_nvlink);
- bool should_custom_ar(torch::Tensor &inp, int max_size, int world_size,
- bool full_nvlink);
- void all_reduce_reg(fptr_t _fa, torch::Tensor &inp, torch::Tensor &out);
- void all_reduce_unreg(fptr_t _fa, torch::Tensor &inp, torch::Tensor ®_buffer,
- torch::Tensor &out);
- void dispose(fptr_t _fa);
- int meta_size();
- void register_buffer(fptr_t _fa, torch::Tensor &t,
- const std::vector<std::string> &handles,
- const std::vector<int64_t> &offsets);
- std::pair<std::vector<uint8_t>, std::vector<int64_t>> get_graph_buffer_ipc_meta(fptr_t _fa);
- void register_graph_buffers(fptr_t _fa, const std::vector<std::string> &handles,
- const std::vector<std::vector<int64_t>> &offsets);
- #endif
|