torch_bindings.cpp 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324
  1. #include "cache.h"
  2. #include "cuda_utils.h"
  3. #include "ops.h"
  4. #include "registration.h"
  5. #include "quantization/quant_ops.h"
  6. #include <torch/library.h>
  7. // Note on op signatures:
  8. // The X_meta signatures are for the meta functions corresponding to op X.
  9. // They must be kept in sync with the signature for X. Generally, only
  10. // functions that return Tensors require a meta function.
  11. //
  12. // See the following links for detailed docs on op registration and function
  13. // schemas.
  14. // https://docs.google.com/document/d/1_W62p8WJOQQUzPsJYa7s701JXt0qf2OfLub2sbkHOaU/edit#heading=h.ptttacy8y1u9
  15. // https://github.com/pytorch/pytorch/blob/main/aten/src/ATen/native/README.md#annotations
  16. TORCH_LIBRARY_EXPAND(TORCH_EXTENSION_NAME, ops) {
  17. // Aphrodite custom ops
  18. // Attention ops
  19. // Compute the attention between an input query and the cached
  20. // keys/values using PagedAttention.
  21. ops.def(
  22. "paged_attention_v1("
  23. " Tensor! out, Tensor query, Tensor key_cache,"
  24. " Tensor value_cache, int num_kv_heads, float scale,"
  25. " Tensor block_tables, Tensor seq_lens, int block_size,"
  26. " int max_seq_len, Tensor? alibi_slopes,"
  27. " str kv_cache_dtype, float kv_scale, int tp_rank,"
  28. " int blocksparse_local_blocks,"
  29. " int blocksparse_vert_stride, int blocksparse_block_size,"
  30. " int blocksparse_head_sliding_step) -> ()");
  31. ops.impl("paged_attention_v1", torch::kCUDA, &paged_attention_v1);
  32. // PagedAttention V2.
  33. ops.def(
  34. "paged_attention_v2("
  35. " Tensor! out, Tensor exp_sums, Tensor max_logits,"
  36. " Tensor tmp_out, Tensor query, Tensor key_cache,"
  37. " Tensor value_cache, int num_kv_heads, float scale,"
  38. " Tensor block_tables, Tensor seq_lens, int block_size,"
  39. " int max_seq_len, Tensor? alibi_slopes,"
  40. " str kv_cache_dtype, float kv_scale, int tp_rank,"
  41. " int blocksparse_local_blocks,"
  42. " int blocksparse_vert_stride, int blocksparse_block_size,"
  43. " int blocksparse_head_sliding_step) -> ()");
  44. ops.impl("paged_attention_v2", torch::kCUDA, &paged_attention_v2);
  45. // Activation ops
  46. // Activation function used in SwiGLU.
  47. ops.def("silu_and_mul(Tensor! out, Tensor input) -> ()");
  48. ops.impl("silu_and_mul", torch::kCUDA, &silu_and_mul);
  49. // Activation function used in GeGLU with `none` approximation.
  50. ops.def("gelu_and_mul(Tensor! out, Tensor input) -> ()");
  51. ops.impl("gelu_and_mul", torch::kCUDA, &gelu_and_mul);
  52. // Activation function used in GeGLU with `tanh` approximation.
  53. ops.def("gelu_tanh_and_mul(Tensor! out, Tensor input) -> ()");
  54. ops.impl("gelu_tanh_and_mul", torch::kCUDA, &gelu_tanh_and_mul);
  55. // GELU implementation used in GPT-2.
  56. ops.def("gelu_new(Tensor! out, Tensor input) -> ()");
  57. ops.impl("gelu_new", torch::kCUDA, &gelu_new);
  58. // Approximate GELU implementation.
  59. ops.def("gelu_fast(Tensor! out, Tensor input) -> ()");
  60. ops.impl("gelu_fast", torch::kCUDA, &gelu_fast);
  61. // Quick GELU implementation.
  62. ops.def("gelu_quick(Tensor! out, Tensor input) -> ()");
  63. ops.impl("gelu_quick", torch::kCUDA, &gelu_quick);
  64. // Layernorm
  65. // Apply Root Mean Square (RMS) Normalization to the input tensor.
  66. ops.def(
  67. "rms_norm(Tensor! out, Tensor input, Tensor weight, float epsilon) -> "
  68. "()");
  69. ops.impl("rms_norm", torch::kCUDA, &rms_norm);
  70. // In-place fused Add and RMS Normalization.
  71. ops.def(
  72. "fused_add_rms_norm(Tensor! input, Tensor! residual, Tensor weight, "
  73. "float epsilon) -> ()");
  74. ops.impl("fused_add_rms_norm", torch::kCUDA, &fused_add_rms_norm);
  75. // Rotary embedding
  76. // Apply GPT-NeoX or GPT-J style rotary embedding to query and key.
  77. ops.def(
  78. "rotary_embedding(Tensor positions, Tensor! query,"
  79. " Tensor! key, int head_size,"
  80. " Tensor cos_sin_cache, bool is_neox) -> ()");
  81. ops.impl("rotary_embedding", torch::kCUDA, &rotary_embedding);
  82. // Apply GPT-NeoX or GPT-J style rotary embedding to query and key
  83. // (supports multiple loras).
  84. ops.def(
  85. "batched_rotary_embedding(Tensor positions, Tensor! query,"
  86. " Tensor! key, int head_size,"
  87. " Tensor cos_sin_cache, bool is_neox,"
  88. " int rot_dim,"
  89. " Tensor cos_sin_cache_offsets) -> ()");
  90. ops.impl("batched_rotary_embedding", torch::kCUDA, &batched_rotary_embedding);
  91. // Quantization ops
  92. #ifndef USE_ROCM
  93. // Quantized GEMM for AQLM.
  94. ops.def("aqlm_gemm", &aqlm_gemm);
  95. ops.impl("aqlm_gemm", torch::kCUDA, &aqlm_gemm);
  96. // Decompression method for AQLM.
  97. ops.def("aqlm_dequant", &aqlm_dequant);
  98. ops.impl("aqlm_dequant", torch::kCUDA, &aqlm_dequant);
  99. // Quantized GEMM for AWQ.
  100. ops.def("awq_gemm", &awq_gemm);
  101. ops.impl("awq_gemm", torch::kCUDA, &awq_gemm);
  102. // Dequantization for AWQ.
  103. ops.def("awq_dequantize", &awq_dequantize);
  104. ops.impl("awq_dequantize", torch::kCUDA, &awq_dequantize);
  105. // Marlin (Dense) Optimized Quantized GEMM for GPTQ.
  106. ops.def("marlin_gemm", &marlin_gemm);
  107. ops.impl("marlin_gemm", torch::kCUDA, &marlin_gemm);
  108. // Marlin_24 (Sparse) Optimized Quantized GEMM for GPTQ.
  109. ops.def("gptq_marlin_24_gemm", &gptq_marlin_24_gemm);
  110. ops.impl("gptq_marlin_24_gemm", torch::kCUDA, &gptq_marlin_24_gemm);
  111. // gptq_marlin Optimized Quantized GEMM for GPTQ.
  112. ops.def("gptq_marlin_gemm", &gptq_marlin_gemm);
  113. ops.impl("gptq_marlin_gemm", torch::kCUDA, &gptq_marlin_gemm);
  114. // gptq_marlin repack from GPTQ.
  115. ops.def("gptq_marlin_repack", &gptq_marlin_repack);
  116. ops.impl("gptq_marlin_repack", torch::kCUDA, &gptq_marlin_repack);
  117. // fp8_marlin Optimized Quantized GEMM for FP8 weight-only.
  118. ops.def("fp8_marlin_gemm", &fp8_marlin_gemm);
  119. ops.impl("fp8_marlin_gemm", torch::kCUDA, &fp8_marlin_gemm);
  120. // CUTLASS w8a8 GEMM, supporting symmetric per-tensor or per-row/column
  121. // quantization.
  122. ops.def(
  123. "cutlass_scaled_mm(Tensor! out, Tensor a,"
  124. " Tensor b, Tensor a_scales,"
  125. " Tensor b_scales, Tensor? bias) -> ()");
  126. ops.impl("cutlass_scaled_mm", torch::kCUDA, &cutlass_scaled_mm);
  127. // Check if cutlass scaled_mm is supported for CUDA devices of the given
  128. // capability
  129. ops.def("cutlass_scaled_mm_supports_fp8", &cutlass_scaled_mm_supports_fp8);
  130. ops.impl("cutlass_scaled_mm_supports_fp8", torch::kCUDA,
  131. &cutlass_scaled_mm_supports_fp8);
  132. // QuIP# GEMV
  133. ops.def("quip_gemv", &e8p_mm_origorder);
  134. ops.impl("quip_gemv", torch::kCUDA, &e8p_mm_origorder);
  135. // QuIP# Decompress
  136. ops.def("quip_decompress", &decompress_e8p_origorder);
  137. ops.impl("quip_decompress", torch::kCUDA, &decompress_e8p_origorder);
  138. #endif
  139. // Quantized GEMM for GPTQ.
  140. ops.def("gptq_gemm", &gptq_gemm);
  141. ops.impl("gptq_gemm", torch::kCUDA, &gptq_gemm);
  142. // Post processing for GPTQ.
  143. ops.def("gptq_shuffle(Tensor! q_weight, Tensor q_perm, int bit) -> ()");
  144. ops.impl("gptq_shuffle", torch::kCUDA, &gptq_shuffle);
  145. // Quantized GEMM for SqueezeLLM.
  146. ops.def(
  147. "squeezellm_gemm(Tensor vec, Tensor mat, Tensor! mul, Tensor "
  148. "lookup_table) -> ()");
  149. ops.impl("squeezellm_gemm", torch::kCUDA, &squeezellm_gemm);
  150. // Compute FP8 quantized tensor for given scaling factor.
  151. ops.def(
  152. "static_scaled_fp8_quant(Tensor! out, Tensor input, Tensor scale) -> ()");
  153. ops.impl("static_scaled_fp8_quant", torch::kCUDA, &static_scaled_fp8_quant);
  154. // Compute FP8 quantized tensor and scaling factor.
  155. ops.def(
  156. "dynamic_scaled_fp8_quant(Tensor! out, Tensor input, Tensor! scale) -> "
  157. "()");
  158. ops.impl("dynamic_scaled_fp8_quant", torch::kCUDA, &dynamic_scaled_fp8_quant);
  159. // Aligning the number of tokens to be processed by each expert such
  160. // that it is divisible by the block size.
  161. ops.def(
  162. "moe_align_block_size(Tensor topk_ids, int num_experts,"
  163. " int block_size, Tensor! sorted_token_ids,"
  164. " Tensor! experts_ids,"
  165. " Tensor! num_tokens_post_pad) -> ()");
  166. ops.impl("moe_align_block_size", torch::kCUDA, &moe_align_block_size);
  167. // Compute int8 quantized tensor for given scaling factor.
  168. /*
  169. Implementation:
  170. void static_scaled_int8_quant(torch::Tensor& out, torch::Tensor const&
  171. input, torch::Tensor const& scale);
  172. */
  173. ops.def(
  174. "static_scaled_int8_quant(Tensor! out, Tensor input, Tensor scale) -> "
  175. "()");
  176. ops.impl("static_scaled_int8_quant", torch::kCUDA, &static_scaled_int8_quant);
  177. // Compute int8 quantized tensor and scaling factor
  178. /*
  179. Implementation:
  180. void dynamic_scaled_int8_quant(torch::Tensor& out, torch::Tensor const&
  181. input, torch::Tensor& scales);
  182. */
  183. ops.def(
  184. "dynamic_scaled_int8_quant(Tensor! out, Tensor input, Tensor! scale) -> "
  185. "()");
  186. ops.impl("dynamic_scaled_int8_quant", torch::kCUDA,
  187. &dynamic_scaled_int8_quant);
  188. ops.def(
  189. "selective_scan_fwd(Tensor! u, Tensor! delta,"
  190. " Tensor! A, Tensor! B, Tensor C,"
  191. " Tensor? D_, Tensor? z_, Tensor? delta_bias_,"
  192. " bool delta_softplus,"
  193. " Tensor? index_, Tensor? x) -> Tensor[]");
  194. ops.impl("selective_scan_fwd", torch::kCUDA, &selective_scan_fwd);
  195. }
  196. TORCH_LIBRARY_EXPAND(CONCAT(TORCH_EXTENSION_NAME, _cache_ops), cache_ops) {
  197. // Cache ops
  198. // Swap in (out) the cache blocks from src to dst.
  199. cache_ops.def(
  200. "swap_blocks(Tensor src, Tensor! dst, Tensor block_mapping) -> ()");
  201. cache_ops.impl("swap_blocks", torch::kCUDA, &swap_blocks);
  202. // Copy the cache blocks from src to dst.
  203. cache_ops.def(
  204. "copy_blocks(Tensor[]! key_caches, Tensor[]! value_caches, Tensor "
  205. "block_mapping) -> ()");
  206. cache_ops.impl("copy_blocks", torch::kCUDA, &copy_blocks);
  207. // Reshape the key and value tensors and cache them.
  208. cache_ops.def(
  209. "reshape_and_cache(Tensor key, Tensor value,"
  210. " Tensor! key_cache, Tensor! value_cache,"
  211. " Tensor slot_mapping,"
  212. " str kv_cache_dtype,"
  213. " float kv_scale) -> ()");
  214. cache_ops.impl("reshape_and_cache", torch::kCUDA, &reshape_and_cache);
  215. // Reshape the key and value tensors and cache them.
  216. cache_ops.def(
  217. "reshape_and_cache_flash(Tensor key, Tensor value,"
  218. " Tensor! key_cache,"
  219. " Tensor! value_cache,"
  220. " Tensor slot_mapping,"
  221. " str kv_cache_dtype) -> ()");
  222. cache_ops.impl("reshape_and_cache_flash", torch::kCUDA,
  223. &reshape_and_cache_flash);
  224. // Convert the key and value cache to fp8 data type.
  225. cache_ops.def(
  226. "convert_fp8(Tensor! dst_cache, Tensor src_cache, float scale, str "
  227. "kv_cache_dtype) -> ()");
  228. cache_ops.impl("convert_fp8", torch::kCUDA, &convert_fp8);
  229. }
  230. TORCH_LIBRARY_EXPAND(CONCAT(TORCH_EXTENSION_NAME, _cuda_utils), cuda_utils) {
  231. // Cuda utils
  232. // Gets the specified device attribute.
  233. cuda_utils.def("get_device_attribute", &get_device_attribute);
  234. cuda_utils.impl("get_device_attribute", torch::kCUDA, &get_device_attribute);
  235. // Gets the maximum shared memory per block device attribute.
  236. cuda_utils.def("get_max_shared_memory_per_block_device_attribute",
  237. &get_max_shared_memory_per_block_device_attribute);
  238. cuda_utils.impl("get_max_shared_memory_per_block_device_attribute",
  239. torch::kCUDA,
  240. &get_max_shared_memory_per_block_device_attribute);
  241. }
  242. #ifndef USE_ROCM
  243. TORCH_LIBRARY_EXPAND(CONCAT(TORCH_EXTENSION_NAME, _custom_ar), custom_ar) {
  244. // Custom all-reduce kernels
  245. custom_ar.def("init_custom_ar", &init_custom_ar);
  246. custom_ar.impl("init_custom_ar", torch::kCUDA, &init_custom_ar);
  247. custom_ar.def("should_custom_ar", &should_custom_ar);
  248. custom_ar.impl("should_custom_ar", torch::kCUDA, &should_custom_ar);
  249. custom_ar.def("all_reduce_reg(int fa, Tensor inp, Tensor! out) -> ()");
  250. custom_ar.impl("all_reduce_reg", torch::kCUDA, &all_reduce_reg);
  251. custom_ar.def(
  252. "all_reduce_unreg(int fa, Tensor inp, Tensor reg_buffer, Tensor! out) -> "
  253. "()");
  254. custom_ar.impl("all_reduce_unreg", torch::kCUDA, &all_reduce_unreg);
  255. custom_ar.def("dispose", &dispose);
  256. custom_ar.impl("dispose", torch::kCPU, &dispose);
  257. custom_ar.def("meta_size", &meta_size);
  258. custom_ar.impl("meta_size", torch::kCPU, &meta_size);
  259. custom_ar.def("register_buffer", &register_buffer);
  260. custom_ar.impl("register_buffer", torch::kCUDA, &register_buffer);
  261. custom_ar.def("get_graph_buffer_ipc_meta", &get_graph_buffer_ipc_meta);
  262. custom_ar.impl("get_graph_buffer_ipc_meta", torch::kCPU,
  263. &get_graph_buffer_ipc_meta);
  264. custom_ar.def("register_graph_buffers", &register_graph_buffers);
  265. custom_ar.impl("register_graph_buffers", torch::kCPU,
  266. &register_graph_buffers);
  267. }
  268. #endif
  269. REGISTER_EXTENSION(TORCH_EXTENSION_NAME)