quant_ops.h 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. #pragma once
  2. #include <torch/library.h>
  3. #include "core/scalar_type.hpp"
  4. #ifndef USE_ROCM
  5. // AQLM
  6. torch::Tensor aqlm_gemm(const torch::Tensor& input, const torch::Tensor& codes,
  7. const torch::Tensor& codebooks,
  8. const torch::Tensor& scales,
  9. const std::vector<int64_t>& codebook_partition_sizes,
  10. const std::optional<torch::Tensor>& bias);
  11. torch::Tensor aqlm_dequant(
  12. const torch::Tensor& codes, const torch::Tensor& codebooks,
  13. const std::vector<int64_t>& codebook_partition_sizes);
  14. // AWQ
  15. torch::Tensor awq_gemm(torch::Tensor _in_feats, torch::Tensor _kernel,
  16. torch::Tensor _scaling_factors, torch::Tensor _zeros,
  17. int64_t split_k_iters);
  18. torch::Tensor awq_dequantize(torch::Tensor _kernel,
  19. torch::Tensor _scaling_factors,
  20. torch::Tensor _zeros, int64_t split_k_iters,
  21. int64_t thx, int64_t thy);
  22. torch::Tensor awq_group_gemm(torch::Tensor _in_feats, torch::Tensor _kernel,
  23. torch::Tensor _scaling_factors,
  24. torch::Tensor _zeros, torch::Tensor _topk_weights,
  25. torch::Tensor _sorted_token_ids_ptr,
  26. torch::Tensor _expert_ids_ptr,
  27. torch::Tensor _num_tokens_post_padded,
  28. bool mul_weights, int64_t split_k_iters);
  29. #endif
  30. // GPTQ
  31. torch::Tensor gptq_gemm(torch::Tensor a, torch::Tensor b_q_weight,
  32. torch::Tensor b_gptq_qzeros,
  33. torch::Tensor b_gptq_scales, torch::Tensor b_g_idx,
  34. bool use_exllama, int64_t bit);
  35. void gptq_shuffle(torch::Tensor q_weight, torch::Tensor q_perm, int64_t bit);
  36. torch::Tensor group_gptq_gemm(torch::Tensor a, torch::Tensor b_q_weight,
  37. torch::Tensor b_gptq_qzeros,
  38. torch::Tensor b_gptq_scales,
  39. torch::Tensor b_g_idx, torch::Tensor topk_weights,
  40. torch::Tensor sorted_token_ids_ptr,
  41. torch::Tensor expert_ids_ptr,
  42. torch::Tensor num_tokens_post_padded,
  43. bool mul_weights, bool use_exllama);
  44. torch::Tensor dequant_gptq(torch::Tensor b_q_weight,
  45. torch::Tensor b_gptq_qzeros,
  46. torch::Tensor b_gptq_scales, torch::Tensor b_g_idx,
  47. int64_t bits, bool use_exllama);
  48. #ifndef USE_ROCM
  49. // Marlin
  50. torch::Tensor marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight,
  51. torch::Tensor& b_scales, torch::Tensor& workspace,
  52. int64_t size_m, int64_t size_n, int64_t size_k);
  53. torch::Tensor gptq_marlin_24_gemm(torch::Tensor& a, torch::Tensor& b_q_weight,
  54. torch::Tensor& b_meta,
  55. torch::Tensor& b_scales,
  56. torch::Tensor& workspace,
  57. aphrodite::ScalarTypeTorchPtr const& b_q_type,
  58. int64_t size_m, int64_t size_n,
  59. int64_t size_k);
  60. torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight,
  61. torch::Tensor& b_scales, torch::Tensor& b_zeros,
  62. torch::Tensor& g_idx, torch::Tensor& perm,
  63. torch::Tensor& workspace,
  64. aphrodite::ScalarTypeTorchPtr const& b_q_type,
  65. int64_t size_m, int64_t size_n, int64_t size_k,
  66. bool is_k_full, bool has_zp,
  67. bool use_fp32_reduce, bool is_zp_float);
  68. torch::Tensor gptq_marlin_repack(torch::Tensor& b_q_weight, torch::Tensor& perm,
  69. int64_t size_k, int64_t size_n,
  70. int64_t num_bits);
  71. torch::Tensor gptq_marlin_repack_meta(torch::Tensor& b_q_weight,
  72. torch::Tensor& perm, c10::SymInt size_k,
  73. c10::SymInt size_n, int64_t num_bits);
  74. torch::Tensor awq_marlin_repack(torch::Tensor& b_q_weight, int64_t size_k,
  75. int64_t size_n, int64_t num_bits);
  76. torch::Tensor awq_marlin_repack_meta(torch::Tensor& b_q_weight,
  77. c10::SymInt size_k, c10::SymInt size_n,
  78. int64_t num_bits);
  79. torch::Tensor fp8_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight,
  80. torch::Tensor& b_scales, torch::Tensor& workspace,
  81. int64_t num_bits, int64_t size_m, int64_t size_n,
  82. int64_t size_k);
  83. // GGUF
  84. torch::Tensor ggml_dequantize(torch::Tensor W, int64_t type, int64_t m,
  85. int64_t n);
  86. torch::Tensor ggml_mul_mat_vec_a8(torch::Tensor W, torch::Tensor X,
  87. int64_t type, int64_t row);
  88. torch::Tensor ggml_mul_mat_a8(torch::Tensor W, torch::Tensor X, int64_t type,
  89. int64_t row);
  90. // QuIP#
  91. at::Tensor e8p_mm_origorder(const at::Tensor& A, const at::Tensor& B,
  92. const at::Tensor& CB);
  93. void decompress_e8p_origorder(torch::Tensor YIs, torch::Tensor CB,
  94. torch::Tensor& Y);
  95. #ifndef _WIN32
  96. // Cutlass Kernels
  97. bool cutlass_scaled_mm_supports_fp8(int64_t cuda_device_capability);
  98. void cutlass_scaled_mm(torch::Tensor& out, torch::Tensor const& a,
  99. torch::Tensor const& b, torch::Tensor const& a_scales,
  100. torch::Tensor const& b_scales,
  101. c10::optional<torch::Tensor> const& bias);
  102. void cutlass_scaled_mm_azp(torch::Tensor& out, torch::Tensor const& a,
  103. torch::Tensor const& b,
  104. torch::Tensor const& a_scales,
  105. torch::Tensor const& b_scales,
  106. torch::Tensor const& azp_adj,
  107. c10::optional<torch::Tensor> const& azp,
  108. c10::optional<torch::Tensor> const& bias);
  109. // Machete Kernels
  110. namespace machete {
  111. std::vector<std::string> supported_schedules(
  112. aphrodite::ScalarTypeTorchPtr const& btype);
  113. torch::Tensor gemm(torch::Tensor const& A, torch::Tensor const& B,
  114. aphrodite::ScalarTypeTorchPtr const& btype,
  115. c10::optional<torch::Tensor> const& scales,
  116. c10::optional<torch::Tensor> const& zeros,
  117. c10::optional<int64_t> group_size,
  118. c10::optional<torch::Tensor> const& C,
  119. c10::optional<double> alpha, c10::optional<double> beta,
  120. c10::optional<std::string> schedule);
  121. torch::Tensor prepack_B(torch::Tensor const& B,
  122. aphrodite::ScalarTypeTorchPtr const& btype);
  123. }; // namespace machete
  124. torch::Tensor marlin_qqq_gemm(torch::Tensor const& a,
  125. torch::Tensor const& b_q_weight,
  126. torch::Tensor const& s_tok,
  127. torch::Tensor const& s_ch,
  128. torch::Tensor const& s_group,
  129. torch::Tensor& workspace, int64_t size_m,
  130. int64_t size_n, int64_t size_k);
  131. #endif
  132. torch::Tensor fp_eXmY_linear_forward_cuda(int64_t EXPONENT, int64_t MANTISSA,
  133. torch::Tensor _in_feats,
  134. torch::Tensor _weights,
  135. torch::Tensor _scales,
  136. int64_t splitK = 1);
  137. #endif
  138. void static_scaled_int8_quant(torch::Tensor& out, torch::Tensor const& input,
  139. torch::Tensor const& scale,
  140. c10::optional<torch::Tensor> const& azp);
  141. void dynamic_scaled_int8_quant(torch::Tensor& out, torch::Tensor const& input,
  142. torch::Tensor& scales,
  143. c10::optional<torch::Tensor> const& azp);
  144. // SqueezeLLM
  145. void squeezellm_gemm(torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
  146. torch::Tensor lookup_table);
  147. // FP8
  148. void static_scaled_fp8_quant(torch::Tensor& out, torch::Tensor const& input,
  149. torch::Tensor const& scale);
  150. void dynamic_scaled_fp8_quant(torch::Tensor& out, torch::Tensor const& input,
  151. torch::Tensor& scale);
  152. void dynamic_per_token_scaled_fp8_quant(
  153. torch::Tensor& out, torch::Tensor const& input, torch::Tensor& scale,
  154. c10::optional<torch::Tensor> const& scale_ub);