torch_bindings.cpp 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388
  1. #include "cache.h"
  2. #include "cuda_utils.h"
  3. #include "ops.h"
  4. #include "registration.h"
  5. #include "quantization/quant_ops.h"
  6. #include <torch/library.h>
  7. // Note on op signatures:
  8. // The X_meta signatures are for the meta functions corresponding to op X.
  9. // They must be kept in sync with the signature for X. Generally, only
  10. // functions that return Tensors require a meta function.
  11. //
  12. // See the following links for detailed docs on op registration and function
  13. // schemas.
  14. // https://docs.google.com/document/d/1_W62p8WJOQQUzPsJYa7s701JXt0qf2OfLub2sbkHOaU/edit#heading=h.ptttacy8y1u9
  15. // https://github.com/pytorch/pytorch/blob/main/aten/src/ATen/native/README.md#annotations
  16. TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) {
  17. // Aphrodite custom ops
  18. // Attention ops
  19. // Compute the attention between an input query and the cached
  20. // keys/values using PagedAttention.
  21. ops.def(
  22. "paged_attention_v1("
  23. " Tensor! out, Tensor query, Tensor key_cache,"
  24. " Tensor value_cache, int num_kv_heads, float scale,"
  25. " Tensor block_tables, Tensor seq_lens, int block_size,"
  26. " int max_seq_len, Tensor? alibi_slopes,"
  27. " str kv_cache_dtype, float k_scale, float v_scale,"
  28. " int tp_rank, int blocksparse_local_blocks,"
  29. " int blocksparse_vert_stride, int blocksparse_block_size,"
  30. " int blocksparse_head_sliding_step) -> ()");
  31. ops.impl("paged_attention_v1", torch::kCUDA, &paged_attention_v1);
  32. // PagedAttention V2.
  33. ops.def(
  34. "paged_attention_v2("
  35. " Tensor! out, Tensor exp_sums, Tensor max_logits,"
  36. " Tensor tmp_out, Tensor query, Tensor key_cache,"
  37. " Tensor value_cache, int num_kv_heads, float scale,"
  38. " Tensor block_tables, Tensor seq_lens, int block_size,"
  39. " int max_seq_len, Tensor? alibi_slopes,"
  40. " str kv_cache_dtype, float k_scale, float v_scale,"
  41. " int tp_rank, int blocksparse_local_blocks,"
  42. " int blocksparse_vert_stride, int blocksparse_block_size,"
  43. " int blocksparse_head_sliding_step) -> ()");
  44. ops.impl("paged_attention_v2", torch::kCUDA, &paged_attention_v2);
  45. // Activation ops
  46. // Activation function used in SwiGLU.
  47. ops.def("silu_and_mul(Tensor! out, Tensor input) -> ()");
  48. ops.impl("silu_and_mul", torch::kCUDA, &silu_and_mul);
  49. // Activation function used in GeGLU with `none` approximation.
  50. ops.def("gelu_and_mul(Tensor! out, Tensor input) -> ()");
  51. ops.impl("gelu_and_mul", torch::kCUDA, &gelu_and_mul);
  52. // Activation function used in GeGLU with `tanh` approximation.
  53. ops.def("gelu_tanh_and_mul(Tensor! out, Tensor input) -> ()");
  54. ops.impl("gelu_tanh_and_mul", torch::kCUDA, &gelu_tanh_and_mul);
  55. // GELU implementation used in GPT-2.
  56. ops.def("gelu_new(Tensor! out, Tensor input) -> ()");
  57. ops.impl("gelu_new", torch::kCUDA, &gelu_new);
  58. // Approximate GELU implementation.
  59. ops.def("gelu_fast(Tensor! out, Tensor input) -> ()");
  60. ops.impl("gelu_fast", torch::kCUDA, &gelu_fast);
  61. // Quick GELU implementation.
  62. ops.def("gelu_quick(Tensor! out, Tensor input) -> ()");
  63. ops.impl("gelu_quick", torch::kCUDA, &gelu_quick);
  64. // prepare_inputs advance_step
  65. ops.def("advance_step", &advance_step);
  66. ops.impl("advance_step", torch::kCUDA, &advance_step);
  67. // Layernorm
  68. // Apply Root Mean Square (RMS) Normalization to the input tensor.
  69. ops.def(
  70. "rms_norm(Tensor! out, Tensor input, Tensor weight, float epsilon) -> "
  71. "()");
  72. ops.impl("rms_norm", torch::kCUDA, &rms_norm);
  73. // In-place fused Add and RMS Normalization.
  74. ops.def(
  75. "fused_add_rms_norm(Tensor! input, Tensor! residual, Tensor weight, "
  76. "float epsilon) -> ()");
  77. ops.impl("fused_add_rms_norm", torch::kCUDA, &fused_add_rms_norm);
  78. // Rotary embedding
  79. // Apply GPT-NeoX or GPT-J style rotary embedding to query and key.
  80. ops.def(
  81. "rotary_embedding(Tensor positions, Tensor! query,"
  82. " Tensor! key, int head_size,"
  83. " Tensor cos_sin_cache, bool is_neox) -> ()");
  84. ops.impl("rotary_embedding", torch::kCUDA, &rotary_embedding);
  85. // Apply GPT-NeoX or GPT-J style rotary embedding to query and key
  86. // (supports multiple loras).
  87. ops.def(
  88. "batched_rotary_embedding(Tensor positions, Tensor! query,"
  89. " Tensor! key, int head_size,"
  90. " Tensor cos_sin_cache, bool is_neox,"
  91. " int rot_dim,"
  92. " Tensor cos_sin_cache_offsets) -> ()");
  93. ops.impl("batched_rotary_embedding", torch::kCUDA, &batched_rotary_embedding);
  94. // Quantization ops
  95. #ifndef USE_ROCM
  96. // Quantized GEMM for AQLM.
  97. ops.def("aqlm_gemm", &aqlm_gemm);
  98. ops.impl("aqlm_gemm", torch::kCUDA, &aqlm_gemm);
  99. // Decompression method for AQLM.
  100. ops.def("aqlm_dequant", &aqlm_dequant);
  101. ops.impl("aqlm_dequant", torch::kCUDA, &aqlm_dequant);
  102. // Quantized GEMM for AWQ.
  103. ops.def("awq_gemm", &awq_gemm);
  104. ops.impl("awq_gemm", torch::kCUDA, &awq_gemm);
  105. // Dequantization for AWQ.
  106. ops.def("awq_dequantize", &awq_dequantize);
  107. ops.impl("awq_dequantize", torch::kCUDA, &awq_dequantize);
  108. // Marlin (Dense) Optimized Quantized GEMM for GPTQ.
  109. ops.def("marlin_gemm", &marlin_gemm);
  110. ops.impl("marlin_gemm", torch::kCUDA, &marlin_gemm);
  111. // Marlin_24 (Sparse) Optimized Quantized GEMM for GPTQ.
  112. ops.def("gptq_marlin_24_gemm", &gptq_marlin_24_gemm);
  113. ops.impl("gptq_marlin_24_gemm", torch::kCUDA, &gptq_marlin_24_gemm);
  114. // gptq_marlin Optimized Quantized GEMM for GPTQ.
  115. ops.def("gptq_marlin_gemm", &gptq_marlin_gemm);
  116. ops.impl("gptq_marlin_gemm", torch::kCUDA, &gptq_marlin_gemm);
  117. // gptq_marlin repack from GPTQ.
  118. ops.def("gptq_marlin_repack", &gptq_marlin_repack);
  119. ops.impl("gptq_marlin_repack", torch::kCUDA, &gptq_marlin_repack);
  120. // awq_marlin repack from AWQ.
  121. ops.def("awq_marlin_repack", &awq_marlin_repack);
  122. ops.impl("awq_marlin_repack", torch::kCUDA, &awq_marlin_repack);
  123. // fp8_marlin Optimized Quantized GEMM for FP8 weight-only.
  124. ops.def("fp8_marlin_gemm", &fp8_marlin_gemm);
  125. ops.impl("fp8_marlin_gemm", torch::kCUDA, &fp8_marlin_gemm);
  126. // CUTLASS w8a8 GEMM, supporting symmetric per-tensor or per-row/column
  127. // quantization.
  128. ops.def(
  129. "cutlass_scaled_mm(Tensor! out, Tensor a,"
  130. " Tensor b, Tensor a_scales,"
  131. " Tensor b_scales, Tensor? bias) -> ()");
  132. ops.impl("cutlass_scaled_mm", torch::kCUDA, &cutlass_scaled_mm);
  133. // Check if cutlass scaled_mm is supported for CUDA devices of the given
  134. // capability
  135. ops.def("cutlass_scaled_mm_supports_fp8", &cutlass_scaled_mm_supports_fp8);
  136. ops.impl("cutlass_scaled_mm_supports_fp8", torch::kCUDA,
  137. &cutlass_scaled_mm_supports_fp8);
  138. // QuIP# GEMV
  139. ops.def("quip_gemv", &e8p_mm_origorder);
  140. ops.impl("quip_gemv", torch::kCUDA, &e8p_mm_origorder);
  141. // QuIP# Decompress
  142. ops.def("quip_decompress", &decompress_e8p_origorder);
  143. ops.impl("quip_decompress", torch::kCUDA, &decompress_e8p_origorder);
  144. // Sampling Kernels
  145. ops.def("sampling_from_probs", &sampling_from_probs);
  146. ops.impl("sampling_from_probs", torch::kCUDA, &sampling_from_probs);
  147. ops.def("top_k_sampling_from_probs", &top_k_sampling_from_probs);
  148. ops.impl("top_k_sampling_from_probs", torch::kCUDA,
  149. &top_k_sampling_from_probs);
  150. ops.def("min_p_sampling_from_probs", &min_p_sampling_from_probs);
  151. ops.impl("min_p_sampling_from_probs", torch::kCUDA,
  152. &min_p_sampling_from_probs);
  153. ops.def("top_p_sampling_from_probs", &top_p_sampling_from_probs);
  154. ops.impl("top_p_sampling_from_probs", torch::kCUDA,
  155. &top_p_sampling_from_probs);
  156. ops.def("top_k_top_p_sampling_from_probs", &top_k_top_p_sampling_from_probs);
  157. ops.impl("top_k_top_p_sampling_from_probs", torch::kCUDA,
  158. &top_k_top_p_sampling_from_probs);
  159. ops.def("top_k_renorm_prob", &top_k_renorm_prob);
  160. ops.impl("top_k_renorm_prob", torch::kCUDA, &top_k_renorm_prob);
  161. ops.def("top_p_renorm_prob", &top_p_renorm_prob);
  162. ops.impl("top_p_renorm_prob", torch::kCUDA, &top_p_renorm_prob);
  163. ops.def("top_k_mask_logits", &top_k_mask_logits);
  164. ops.impl("top_k_mask_logits", torch::kCUDA, &top_k_mask_logits);
  165. #endif
  166. // Quantized GEMM for GPTQ.
  167. ops.def("gptq_gemm", &gptq_gemm);
  168. ops.impl("gptq_gemm", torch::kCUDA, &gptq_gemm);
  169. // Post processing for GPTQ.
  170. ops.def("gptq_shuffle(Tensor! q_weight, Tensor q_perm, int bit) -> ()");
  171. ops.impl("gptq_shuffle", torch::kCUDA, &gptq_shuffle);
  172. // Quantized GEMM for SqueezeLLM.
  173. ops.def(
  174. "squeezellm_gemm(Tensor vec, Tensor mat, Tensor! mul, Tensor "
  175. "lookup_table) -> ()");
  176. ops.impl("squeezellm_gemm", torch::kCUDA, &squeezellm_gemm);
  177. // Compute FP8 quantized tensor for given scaling factor.
  178. ops.def(
  179. "static_scaled_fp8_quant(Tensor! out, Tensor input, Tensor scale) -> ()");
  180. ops.impl("static_scaled_fp8_quant", torch::kCUDA, &static_scaled_fp8_quant);
  181. // Compute dynamic-per-tensor FP8 quantized tensor and scaling factor.
  182. ops.def(
  183. "dynamic_scaled_fp8_quant(Tensor! out, Tensor input, Tensor! scale) -> "
  184. "()");
  185. ops.impl("dynamic_scaled_fp8_quant", torch::kCUDA, &dynamic_scaled_fp8_quant);
  186. // Compute dynamic-per-token FP8 quantized tensor and scaling factor.
  187. ops.def(
  188. "dynamic_per_token_scaled_fp8_quant(Tensor! out, Tensor input, Tensor! "
  189. "scale, Tensor? scale_ub) -> "
  190. "()");
  191. ops.impl("dynamic_per_token_scaled_fp8_quant", torch::kCUDA,
  192. &dynamic_per_token_scaled_fp8_quant);
  193. // Aligning the number of tokens to be processed by each expert such
  194. // that it is divisible by the block size.
  195. ops.def(
  196. "moe_align_block_size(Tensor topk_ids, int num_experts,"
  197. " int block_size, Tensor! sorted_token_ids,"
  198. " Tensor! experts_ids,"
  199. " Tensor! num_tokens_post_pad) -> ()");
  200. ops.impl("moe_align_block_size", torch::kCUDA, &moe_align_block_size);
  201. // Compute int8 quantized tensor for given scaling factor.
  202. /*
  203. Implementation:
  204. void static_scaled_int8_quant(torch::Tensor& out, torch::Tensor const&
  205. input, torch::Tensor const& scale);
  206. */
  207. ops.def(
  208. "static_scaled_int8_quant(Tensor! out, Tensor input, Tensor scale) -> "
  209. "()");
  210. ops.impl("static_scaled_int8_quant", torch::kCUDA, &static_scaled_int8_quant);
  211. // Compute int8 quantized tensor and scaling factor
  212. /*
  213. Implementation:
  214. void dynamic_scaled_int8_quant(torch::Tensor& out, torch::Tensor const&
  215. input, torch::Tensor& scales);
  216. */
  217. ops.def(
  218. "dynamic_scaled_int8_quant(Tensor! out, Tensor input, Tensor! scale) -> "
  219. "()");
  220. ops.impl("dynamic_scaled_int8_quant", torch::kCUDA,
  221. &dynamic_scaled_int8_quant);
  222. // Mamba kernels
  223. ops.def(
  224. "selective_scan_fwd(Tensor! u, Tensor! delta,"
  225. "Tensor! A, Tensor! B, Tensor! C,"
  226. "Tensor? D_, Tensor? z_, Tensor? delta_bias_,"
  227. "bool delta_softplus,"
  228. "Tensor? index_, Tensor? x) -> Tensor[]");
  229. ops.impl("selective_scan_fwd", torch::kCUDA, &selective_scan_fwd);
  230. ops.def(
  231. "causal_conv1d_update(Tensor! x,"
  232. "Tensor! conv_state,"
  233. "Tensor! weight,"
  234. "Tensor? bias_,"
  235. "bool silu_activation) -> Tensor");
  236. ops.impl("causal_conv1d_update", torch::kCUDA, &causal_conv1d_update);
  237. ops.def(
  238. "causal_conv1d_fwd(Tensor! x, Tensor! weight,"
  239. "Tensor? bias_,"
  240. "Tensor? seq_idx_,"
  241. "Tensor? seq_pos_idx_,"
  242. "Tensor? initial_states_,"
  243. "Tensor? final_states_out_,"
  244. "bool silu_activation) -> Tensor");
  245. ops.impl("causal_conv1d_fwd", torch::kCUDA, &causal_conv1d_fwd);
  246. }
  247. TORCH_LIBRARY_EXPAND(CONCAT(TORCH_EXTENSION_NAME, _cache_ops), cache_ops) {
  248. // Cache ops
  249. // Swap in (out) the cache blocks from src to dst.
  250. cache_ops.def(
  251. "swap_blocks(Tensor src, Tensor! dst, Tensor block_mapping) -> ()");
  252. cache_ops.impl("swap_blocks", torch::kCUDA, &swap_blocks);
  253. // Copy the cache blocks from src to dst.
  254. cache_ops.def(
  255. "copy_blocks(Tensor[]! key_caches, Tensor[]! value_caches, Tensor "
  256. "block_mapping) -> ()");
  257. cache_ops.impl("copy_blocks", torch::kCUDA, &copy_blocks);
  258. // Reshape the key and value tensors and cache them.
  259. cache_ops.def(
  260. "reshape_and_cache(Tensor key, Tensor value,"
  261. " Tensor! key_cache, Tensor! value_cache,"
  262. " Tensor slot_mapping,"
  263. " str kv_cache_dtype,"
  264. " float k_scale, float v_scale) -> ()");
  265. cache_ops.impl("reshape_and_cache", torch::kCUDA, &reshape_and_cache);
  266. // Reshape the key and value tensors and cache them.
  267. cache_ops.def(
  268. "reshape_and_cache_flash(Tensor key, Tensor value,"
  269. " Tensor! key_cache,"
  270. " Tensor! value_cache,"
  271. " Tensor slot_mapping,"
  272. " str kv_cache_dtype) -> ()");
  273. cache_ops.impl("reshape_and_cache_flash", torch::kCUDA,
  274. &reshape_and_cache_flash);
  275. // Convert the key and value cache to fp8 data type.
  276. cache_ops.def(
  277. "convert_fp8(Tensor! dst_cache, Tensor src_cache, float scale, str "
  278. "kv_cache_dtype) -> ()");
  279. cache_ops.impl("convert_fp8", torch::kCUDA, &convert_fp8);
  280. }
  281. TORCH_LIBRARY_EXPAND(CONCAT(TORCH_EXTENSION_NAME, _cuda_utils), cuda_utils) {
  282. // Cuda utils
  283. // Gets the specified device attribute.
  284. cuda_utils.def("get_device_attribute", &get_device_attribute);
  285. cuda_utils.impl("get_device_attribute", torch::kCUDA, &get_device_attribute);
  286. // Gets the maximum shared memory per block device attribute.
  287. cuda_utils.def("get_max_shared_memory_per_block_device_attribute",
  288. &get_max_shared_memory_per_block_device_attribute);
  289. cuda_utils.impl("get_max_shared_memory_per_block_device_attribute",
  290. torch::kCUDA,
  291. &get_max_shared_memory_per_block_device_attribute);
  292. }
  293. #ifndef USE_ROCM
  294. TORCH_LIBRARY_EXPAND(CONCAT(TORCH_EXTENSION_NAME, _custom_ar), custom_ar) {
  295. // Custom all-reduce kernels
  296. custom_ar.def("init_custom_ar", &init_custom_ar);
  297. custom_ar.impl("init_custom_ar", torch::kCUDA, &init_custom_ar);
  298. custom_ar.def("should_custom_ar", &should_custom_ar);
  299. custom_ar.impl("should_custom_ar", torch::kCUDA, &should_custom_ar);
  300. custom_ar.def("all_reduce_reg(int fa, Tensor inp, Tensor! out) -> ()");
  301. custom_ar.impl("all_reduce_reg", torch::kCUDA, &all_reduce_reg);
  302. custom_ar.def(
  303. "all_reduce_unreg(int fa, Tensor inp, Tensor reg_buffer, Tensor! out) -> "
  304. "()");
  305. custom_ar.impl("all_reduce_unreg", torch::kCUDA, &all_reduce_unreg);
  306. custom_ar.def("dispose", &dispose);
  307. custom_ar.impl("dispose", torch::kCPU, &dispose);
  308. custom_ar.def("meta_size", &meta_size);
  309. custom_ar.impl("meta_size", torch::kCPU, &meta_size);
  310. custom_ar.def("register_buffer", &register_buffer);
  311. custom_ar.impl("register_buffer", torch::kCUDA, &register_buffer);
  312. custom_ar.def("get_graph_buffer_ipc_meta", &get_graph_buffer_ipc_meta);
  313. custom_ar.impl("get_graph_buffer_ipc_meta", torch::kCPU,
  314. &get_graph_buffer_ipc_meta);
  315. custom_ar.def("register_graph_buffers", &register_graph_buffers);
  316. custom_ar.impl("register_graph_buffers", torch::kCPU,
  317. &register_graph_buffers);
  318. }
  319. #endif
  320. REGISTER_EXTENSION(TORCH_EXTENSION_NAME)