1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889 |
- #pragma once
- #include <optional>
- #include <torch/library.h>
- void paged_attention_v1(
- torch::Tensor& out, torch::Tensor& query, torch::Tensor& key_cache,
- torch::Tensor& value_cache, int64_t num_kv_heads, double scale,
- torch::Tensor& block_tables, torch::Tensor& seq_lens, int64_t block_size,
- int64_t max_seq_len, const c10::optional<torch::Tensor>& alibi_slopes,
- const std::string& kv_cache_dtype, double kv_scale, const int64_t tp_rank,
- const int64_t blocksparse_local_blocks,
- const int64_t blocksparse_vert_stride, const int64_t blocksparse_block_size,
- const int64_t blocksparse_head_sliding_step);
- void paged_attention_v2(
- torch::Tensor& out, torch::Tensor& exp_sums, torch::Tensor& max_logits,
- torch::Tensor& tmp_out, torch::Tensor& query, torch::Tensor& key_cache,
- torch::Tensor& value_cache, int64_t num_kv_heads, double scale,
- torch::Tensor& block_tables, torch::Tensor& seq_lens, int64_t block_size,
- int64_t max_seq_len, const c10::optional<torch::Tensor>& alibi_slopes,
- const std::string& kv_cache_dtype, double kv_scale, const int64_t tp_rank,
- const int64_t blocksparse_local_blocks,
- const int64_t blocksparse_vert_stride, const int64_t blocksparse_block_size,
- const int64_t blocksparse_head_sliding_step);
- void rms_norm(torch::Tensor& out, torch::Tensor& input, torch::Tensor& weight,
- double epsilon);
- void fused_add_rms_norm(torch::Tensor& input, torch::Tensor& residual,
- torch::Tensor& weight, double epsilon);
- void rotary_embedding(torch::Tensor& positions, torch::Tensor& query,
- torch::Tensor& key, int64_t head_size,
- torch::Tensor& cos_sin_cache, bool is_neox);
- void batched_rotary_embedding(torch::Tensor& positions, torch::Tensor& query,
- torch::Tensor& key, int64_t head_size,
- torch::Tensor& cos_sin_cache, bool is_neox,
- int64_t rot_dim,
- torch::Tensor& cos_sin_cache_offsets);
- void silu_and_mul(torch::Tensor& out, torch::Tensor& input);
- void gelu_and_mul(torch::Tensor& out, torch::Tensor& input);
- void gelu_tanh_and_mul(torch::Tensor& out, torch::Tensor& input);
- void gelu_new(torch::Tensor& out, torch::Tensor& input);
- void gelu_fast(torch::Tensor& out, torch::Tensor& input);
- void gelu_quick(torch::Tensor& out, torch::Tensor& input);
- void moe_align_block_size(torch::Tensor topk_ids, int64_t num_experts,
- int64_t block_size, torch::Tensor sorted_token_ids,
- torch::Tensor expert_ids,
- torch::Tensor num_tokens_post_pad);
- std::vector<torch::Tensor> selective_scan_fwd(
- const torch::Tensor& u, const torch::Tensor& delta, const torch::Tensor& A,
- const torch::Tensor& B, const torch::Tensor& C,
- const c10::optional<torch::Tensor>& D_,
- const c10::optional<torch::Tensor>& z_,
- const c10::optional<torch::Tensor>& delta_bias_, bool delta_softplus,
- const c10::optional<torch::Tensor>& index_,
- const c10::optional<torch::Tensor>& x);
- #ifndef USE_ROCM
- using fptr_t = int64_t;
- fptr_t init_custom_ar(torch::Tensor& meta, torch::Tensor& rank_data,
- const std::vector<std::string>& handles,
- const std::vector<int64_t>& offsets, int64_t rank,
- bool full_nvlink);
- bool should_custom_ar(torch::Tensor& inp, int64_t max_size, int64_t world_size,
- bool full_nvlink);
- void all_reduce_reg(fptr_t _fa, torch::Tensor& inp, torch::Tensor& out);
- void all_reduce_unreg(fptr_t _fa, torch::Tensor& inp, torch::Tensor& reg_buffer,
- torch::Tensor& out);
- void dispose(fptr_t _fa);
- int64_t meta_size();
- void register_buffer(fptr_t _fa, torch::Tensor& t,
- const std::vector<std::string>& handles,
- const std::vector<int64_t>& offsets);
- std::tuple<torch::Tensor, std::vector<int64_t>> get_graph_buffer_ipc_meta(
- fptr_t _fa);
- void register_graph_buffers(fptr_t _fa, const std::vector<std::string>& handles,
- const std::vector<std::vector<int64_t>>& offsets);
- #endif
|