quant_ops.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211
  1. #pragma once
  2. #include <torch/extension.h>
  3. #ifndef USE_ROCM
  4. // AQLM
  5. torch::Tensor aqlm_gemm(
  6. const torch::Tensor& input,
  7. const torch::Tensor& codes,
  8. const torch::Tensor& codebooks,
  9. const torch::Tensor& scales,
  10. const torch::Tensor& codebook_partition_sizes,
  11. const std::optional<torch::Tensor>& bias
  12. );
  13. torch::Tensor aqlm_dequant(const torch::Tensor& codes,
  14. const torch::Tensor& codebooks,
  15. const torch::Tensor& codebook_partition_sizes);
  16. // AWQ
  17. torch::Tensor awq_gemm(
  18. torch::Tensor _in_feats,
  19. torch::Tensor _kernel,
  20. torch::Tensor _scaling_factors,
  21. torch::Tensor _zeros,
  22. int split_k_iters);
  23. torch::Tensor awq_dequantize(
  24. torch::Tensor _kernel,
  25. torch::Tensor _scaling_factors,
  26. torch::Tensor _zeros,
  27. int split_k_iters,
  28. int thx,
  29. int thy);
  30. torch::Tensor awq_group_gemm(
  31. torch::Tensor _in_feats,
  32. torch::Tensor _kernel,
  33. torch::Tensor _scaling_factors,
  34. torch::Tensor _zeros,
  35. torch::Tensor _topk_weights,
  36. torch::Tensor _sorted_token_ids_ptr,
  37. torch::Tensor _expert_ids_ptr,
  38. torch::Tensor _num_tokens_post_padded,
  39. bool mul_weights,
  40. int split_k_iters);
  41. #endif
  42. // ExLlamav2
  43. torch::Tensor exl2_gemm(
  44. torch::Tensor a,
  45. uintptr_t b
  46. );
  47. uintptr_t make_q_matrix(
  48. torch::Tensor q_weight,
  49. torch::Tensor q_perm,
  50. torch::Tensor q_invperm,
  51. torch::Tensor q_scale,
  52. torch::Tensor q_scale_max,
  53. torch::Tensor q_groups,
  54. torch::Tensor q_group_map
  55. );
  56. #ifndef USE_ROCM
  57. // GGUF
  58. torch::Tensor ggml_dequantize(
  59. torch::Tensor X,
  60. int8_t type,
  61. int64_t m,
  62. int64_t n
  63. );
  64. torch::Tensor ggml_mul_mat_vec(
  65. torch::Tensor W, // quant weight
  66. torch::Tensor X, // input
  67. int8_t type,
  68. int64_t m
  69. );
  70. torch::Tensor ggml_mul_mat_vec_a8(
  71. torch::Tensor W, // quant weight
  72. torch::Tensor X, // input
  73. int8_t type,
  74. int64_t row
  75. );
  76. torch::Tensor ggml_mul_mat_a8(
  77. torch::Tensor W, // quant weight
  78. torch::Tensor X, // input
  79. int8_t type,
  80. int64_t row
  81. );
  82. #endif
  83. // GPTQ
  84. torch::Tensor gptq_gemm(
  85. torch::Tensor a,
  86. torch::Tensor b_q_weight,
  87. torch::Tensor b_gptq_qzeros,
  88. torch::Tensor b_gptq_scales,
  89. torch::Tensor b_g_idx,
  90. bool use_exllama,
  91. int bit);
  92. void gptq_shuffle(
  93. torch::Tensor q_weight,
  94. torch::Tensor q_perm,
  95. int bit);
  96. torch::Tensor group_gptq_gemm(
  97. torch::Tensor a,
  98. torch::Tensor b_q_weight,
  99. torch::Tensor b_gptq_qzeros,
  100. torch::Tensor b_gptq_scales,
  101. torch::Tensor b_g_idx,
  102. torch::Tensor topk_weights,
  103. torch::Tensor sorted_token_ids_ptr,
  104. torch::Tensor expert_ids_ptr,
  105. torch::Tensor num_tokens_post_padded,
  106. bool mul_weights,
  107. bool use_exllama
  108. );
  109. torch::Tensor dequant_gptq(
  110. torch::Tensor b_q_weight,
  111. torch::Tensor b_gptq_qzeros,
  112. torch::Tensor b_gptq_scales,
  113. torch::Tensor b_g_idx,
  114. int bits,
  115. bool use_exllama
  116. );
  117. #ifndef USE_ROCM
  118. // Marlin
  119. torch::Tensor marlin_gemm(
  120. torch::Tensor& a,
  121. torch::Tensor& b_q_weight,
  122. torch::Tensor& b_scales,
  123. torch::Tensor& workspace,
  124. int64_t size_m,
  125. int64_t size_n,
  126. int64_t size_k);
  127. torch::Tensor gptq_marlin_gemm(
  128. torch::Tensor &a,
  129. torch::Tensor &b_q_weight,
  130. torch::Tensor &b_scales,
  131. torch::Tensor &g_idx,
  132. torch::Tensor &perm,
  133. torch::Tensor &workspace,
  134. int64_t num_bits,
  135. int64_t size_m,
  136. int64_t size_n,
  137. int64_t size_k,
  138. bool is_k_full);
  139. torch::Tensor gptq_marlin_repack(
  140. torch::Tensor &b_q_weight,
  141. torch::Tensor &perm,
  142. int64_t size_k,
  143. int64_t size_n,
  144. int64_t num_bits);
  145. // QuIP#
  146. at::Tensor e8p_mm_origorder(
  147. const at::Tensor& A,
  148. const at::Tensor& B,
  149. const at::Tensor& CB);
  150. void decompress_e8p_origorder(
  151. torch::Tensor YIs,
  152. torch::Tensor CB,
  153. torch::Tensor &Y
  154. );
  155. // SmoothQuant+
  156. torch::Tensor autoquant_s4_f16_gemm(
  157. torch::Tensor _in_feats,
  158. torch::Tensor _kernel,
  159. torch::Tensor _scales_zeros);
  160. void autoquant_convert_s4_k_m8(
  161. torch::Tensor _weight_dest,
  162. torch::Tensor _quant_scales_zeros_dest,
  163. torch::Tensor _workspace,
  164. torch::Tensor _quant_weight_src,
  165. torch::Tensor _quant_scales,
  166. torch::Tensor _quant_zeros,
  167. int m,
  168. int k,
  169. int group_size);
  170. #endif
  171. // SqueezeLLM
  172. void squeezellm_gemm(
  173. torch::Tensor vec,
  174. torch::Tensor mat,
  175. torch::Tensor mul,
  176. torch::Tensor lookup_table);
  177. // FP8
  178. void static_scaled_fp8_quant(
  179. torch::Tensor& out,
  180. torch::Tensor& input,
  181. torch::Tensor& scale);
  182. void dynamic_scaled_fp8_quant(
  183. torch::Tensor& out,
  184. torch::Tensor& input,
  185. torch::Tensor& scale);