#pragma once #include #include void paged_attention_v1( torch::Tensor& out, torch::Tensor& query, torch::Tensor& key_cache, torch::Tensor& value_cache, int64_t num_kv_heads, double scale, torch::Tensor& block_tables, torch::Tensor& seq_lens, int64_t block_size, int64_t max_seq_len, const c10::optional& alibi_slopes, const std::string& kv_cache_dtype, double k_scale, double v_scale, const int64_t tp_rank, const int64_t blocksparse_local_blocks, const int64_t blocksparse_vert_stride, const int64_t blocksparse_block_size, const int64_t blocksparse_head_sliding_step); void paged_attention_v2( torch::Tensor& out, torch::Tensor& exp_sums, torch::Tensor& max_logits, torch::Tensor& tmp_out, torch::Tensor& query, torch::Tensor& key_cache, torch::Tensor& value_cache, int64_t num_kv_heads, double scale, torch::Tensor& block_tables, torch::Tensor& seq_lens, int64_t block_size, int64_t max_seq_len, const c10::optional& alibi_slopes, const std::string& kv_cache_dtype, double k_scale, double v_scale, const int64_t tp_rank, const int64_t blocksparse_local_blocks, const int64_t blocksparse_vert_stride, const int64_t blocksparse_block_size, const int64_t blocksparse_head_sliding_step); void rms_norm(torch::Tensor& out, torch::Tensor& input, torch::Tensor& weight, double epsilon); void fused_add_rms_norm(torch::Tensor& input, torch::Tensor& residual, torch::Tensor& weight, double epsilon); void rotary_embedding(torch::Tensor& positions, torch::Tensor& query, torch::Tensor& key, int64_t head_size, torch::Tensor& cos_sin_cache, bool is_neox); void batched_rotary_embedding(torch::Tensor& positions, torch::Tensor& query, torch::Tensor& key, int64_t head_size, torch::Tensor& cos_sin_cache, bool is_neox, int64_t rot_dim, torch::Tensor& cos_sin_cache_offsets); void silu_and_mul(torch::Tensor& out, torch::Tensor& input); void gelu_and_mul(torch::Tensor& out, torch::Tensor& input); void gelu_tanh_and_mul(torch::Tensor& out, torch::Tensor& input); void gelu_new(torch::Tensor& out, torch::Tensor& input); void gelu_fast(torch::Tensor& out, torch::Tensor& input); void gelu_quick(torch::Tensor& out, torch::Tensor& input); void advance_step_flashattn(int64_t num_seqs, int64_t num_queries, int64_t block_size, torch::Tensor& input_tokens, torch::Tensor& sampled_token_ids, torch::Tensor& input_positions, torch::Tensor& seq_lens, torch::Tensor& slot_mapping, torch::Tensor& block_tables); void advance_step_flashinfer( int64_t num_seqs, int64_t num_queries, int64_t block_size, torch::Tensor& input_tokens, torch::Tensor& sampled_token_ids, torch::Tensor& input_positions, torch::Tensor& seq_lens, torch::Tensor& slot_mapping, torch::Tensor& block_tables, torch::Tensor& paged_kv_indices, torch::Tensor& paged_kv_indptr, torch::Tensor& paged_kv_last_page_len, torch::Tensor& block_table_bounds); void moe_align_block_size(torch::Tensor topk_ids, int64_t num_experts, int64_t block_size, torch::Tensor sorted_token_ids, torch::Tensor expert_ids, torch::Tensor num_tokens_post_pad); #ifndef USE_ROCM using fptr_t = int64_t; fptr_t init_custom_ar(torch::Tensor& meta, torch::Tensor& rank_data, const std::vector& handles, const std::vector& offsets, int64_t rank, bool full_nvlink); void all_reduce_reg(fptr_t _fa, torch::Tensor& inp, torch::Tensor& out); void all_reduce_unreg(fptr_t _fa, torch::Tensor& inp, torch::Tensor& reg_buffer, torch::Tensor& out); void dispose(fptr_t _fa); int64_t meta_size(); void register_buffer(fptr_t _fa, torch::Tensor& t, const std::vector& handles, const std::vector& offsets); std::tuple> get_graph_buffer_ipc_meta( fptr_t _fa); void register_graph_buffers(fptr_t _fa, const std::vector& handles, const std::vector>& offsets); std::vector selective_scan_fwd( const torch::Tensor& u, const torch::Tensor& delta, const torch::Tensor& A, const torch::Tensor& B, const torch::Tensor& C, const c10::optional& D_, const c10::optional& z_, const c10::optional& delta_bias_, bool delta_softplus, const c10::optional& index_, const c10::optional& x); at::Tensor causal_conv1d_update( const at::Tensor& x, const at::Tensor& conv_state, const at::Tensor& weight, const c10::optional& bias, bool silu_activation, const c10::optional& conv_state_indices); at::Tensor causal_conv1d_fwd(const at::Tensor& x, const at::Tensor& weight, const c10::optional& bias_, const c10::optional& seq_idx_, const c10::optional& initial_states_, const c10::optional& final_states_out_, bool silu_activation); torch::Tensor permute_cols(torch::Tensor const& A, torch::Tensor const& perm); // Sampling kernels torch::Tensor sampling_from_probs(torch::Tensor probs, torch::Tensor uniform_samples, bool deterministic); std::vector top_p_sampling_from_probs( torch::Tensor probs, torch::Tensor uniform_samples, std::optional maybe_top_p_arr, double top_p_val, bool deterministic); std::vector top_k_sampling_from_probs( torch::Tensor probs, torch::Tensor uniform_samples, std::optional maybe_top_k_arr, int64_t top_k_val, bool deterministic); std::vector min_p_sampling_from_probs( torch::Tensor probs, torch::Tensor uniform_samples, std::optional maybe_min_p_arr, double min_p_val, bool deterministic); std::vector top_k_top_p_sampling_from_probs( torch::Tensor probs, torch::Tensor uniform_samples, std::optional maybe_top_k_arr, double top_k_val, std::optional maybe_top_p_arr, double top_p_val, bool deterministic); torch::Tensor top_p_renorm_prob(torch::Tensor probs, std::optional maybe_top_p_arr, double top_p_val); torch::Tensor top_k_renorm_prob(torch::Tensor probs, std::optional maybe_top_k_arr, int64_t top_k_val); torch::Tensor top_k_mask_logits(torch::Tensor logits, std::optional maybe_top_k_arr, int64_t top_k_val); #endif