quant_ops.h 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145
  1. #pragma once
  2. #include <torch/extension.h>
  3. #ifndef USE_ROCM
  4. // AQLM
  5. torch::Tensor aqlm_gemm(const torch::Tensor& input, const torch::Tensor& codes,
  6. const torch::Tensor& codebooks,
  7. const torch::Tensor& scales,
  8. const torch::Tensor& codebook_partition_sizes,
  9. const std::optional<torch::Tensor>& bias);
  10. torch::Tensor aqlm_dequant(const torch::Tensor& codes,
  11. const torch::Tensor& codebooks,
  12. const torch::Tensor& codebook_partition_sizes);
  13. // AWQ
  14. torch::Tensor awq_gemm(torch::Tensor _in_feats, torch::Tensor _kernel,
  15. torch::Tensor _scaling_factors, torch::Tensor _zeros,
  16. int split_k_iters);
  17. torch::Tensor awq_dequantize(torch::Tensor _kernel,
  18. torch::Tensor _scaling_factors,
  19. torch::Tensor _zeros, int split_k_iters, int thx,
  20. int thy);
  21. torch::Tensor awq_group_gemm(torch::Tensor _in_feats, torch::Tensor _kernel,
  22. torch::Tensor _scaling_factors,
  23. torch::Tensor _zeros, torch::Tensor _topk_weights,
  24. torch::Tensor _sorted_token_ids_ptr,
  25. torch::Tensor _expert_ids_ptr,
  26. torch::Tensor _num_tokens_post_padded,
  27. bool mul_weights, int split_k_iters);
  28. #endif
  29. // ExLlamav2
  30. torch::Tensor exl2_gemm(torch::Tensor a, uintptr_t b);
  31. uintptr_t make_q_matrix(torch::Tensor q_weight, torch::Tensor q_perm,
  32. torch::Tensor q_invperm, torch::Tensor q_scale,
  33. torch::Tensor q_scale_max, torch::Tensor q_groups,
  34. torch::Tensor q_group_map);
  35. #ifndef USE_ROCM
  36. // GGUF
  37. torch::Tensor ggml_dequantize(torch::Tensor X, int8_t type, int64_t m,
  38. int64_t n);
  39. torch::Tensor ggml_mul_mat_vec(torch::Tensor W, // quant weight
  40. torch::Tensor X, // input
  41. int8_t type, int64_t m);
  42. torch::Tensor ggml_mul_mat_vec_a8(torch::Tensor W, // quant weight
  43. torch::Tensor X, // input
  44. int8_t type, int64_t row);
  45. torch::Tensor ggml_mul_mat_a8(torch::Tensor W, // quant weight
  46. torch::Tensor X, // input
  47. int8_t type, int64_t row);
  48. #endif
  49. // GPTQ
  50. torch::Tensor gptq_gemm(torch::Tensor a, torch::Tensor b_q_weight,
  51. torch::Tensor b_gptq_qzeros,
  52. torch::Tensor b_gptq_scales, torch::Tensor b_g_idx,
  53. bool use_exllama, int bit);
  54. void gptq_shuffle(torch::Tensor q_weight, torch::Tensor q_perm, int bit);
  55. torch::Tensor group_gptq_gemm(torch::Tensor a, torch::Tensor b_q_weight,
  56. torch::Tensor b_gptq_qzeros,
  57. torch::Tensor b_gptq_scales,
  58. torch::Tensor b_g_idx, torch::Tensor topk_weights,
  59. torch::Tensor sorted_token_ids_ptr,
  60. torch::Tensor expert_ids_ptr,
  61. torch::Tensor num_tokens_post_padded,
  62. bool mul_weights, bool use_exllama);
  63. torch::Tensor dequant_gptq(torch::Tensor b_q_weight,
  64. torch::Tensor b_gptq_qzeros,
  65. torch::Tensor b_gptq_scales, torch::Tensor b_g_idx,
  66. int bits, bool use_exllama);
  67. #ifndef USE_ROCM
  68. // Marlin
  69. torch::Tensor marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight,
  70. torch::Tensor& b_scales, torch::Tensor& workspace,
  71. int64_t size_m, int64_t size_n, int64_t size_k);
  72. torch::Tensor gptq_marlin_24_gemm(torch::Tensor& a, torch::Tensor& b_q_weight,
  73. torch::Tensor& b_meta,
  74. torch::Tensor& b_scales,
  75. torch::Tensor& workspace, int64_t num_bits,
  76. int64_t size_m, int64_t size_n,
  77. int64_t size_k);
  78. torch::Tensor gptq_marlin_gemm(torch::Tensor& a, torch::Tensor& b_q_weight,
  79. torch::Tensor& b_scales, torch::Tensor& g_idx,
  80. torch::Tensor& perm, torch::Tensor& workspace,
  81. int64_t num_bits, int64_t size_m, int64_t size_n,
  82. int64_t size_k, bool is_k_full);
  83. torch::Tensor gptq_marlin_repack(torch::Tensor& b_q_weight, torch::Tensor& perm,
  84. int64_t size_k, int64_t size_n,
  85. int64_t num_bits);
  86. // QuIP#
  87. at::Tensor e8p_mm_origorder(const at::Tensor& A, const at::Tensor& B,
  88. const at::Tensor& CB);
  89. void decompress_e8p_origorder(torch::Tensor YIs, torch::Tensor CB,
  90. torch::Tensor& Y);
  91. // SmoothQuant+
  92. torch::Tensor autoquant_s4_f16_gemm(torch::Tensor _in_feats,
  93. torch::Tensor _kernel,
  94. torch::Tensor _scales_zeros);
  95. void autoquant_convert_s4_k_m8(torch::Tensor _weight_dest,
  96. torch::Tensor _quant_scales_zeros_dest,
  97. torch::Tensor _workspace,
  98. torch::Tensor _quant_weight_src,
  99. torch::Tensor _quant_scales,
  100. torch::Tensor _quant_zeros, int m, int k,
  101. int group_size);
  102. int cutlass_scaled_mm_dq(torch::Tensor& out, torch::Tensor const& a,
  103. torch::Tensor const& b, torch::Tensor const& a_scales,
  104. torch::Tensor const& b_scales);
  105. #endif
  106. void static_scaled_int8_quant(torch::Tensor& out, torch::Tensor& input,
  107. float scale);
  108. // SqueezeLLM
  109. void squeezellm_gemm(torch::Tensor vec, torch::Tensor mat, torch::Tensor mul,
  110. torch::Tensor lookup_table);
  111. // FP8
  112. void static_scaled_fp8_quant(torch::Tensor& out, torch::Tensor& input,
  113. torch::Tensor& scale);
  114. void dynamic_scaled_fp8_quant(torch::Tensor& out, torch::Tensor& input,
  115. torch::Tensor& scale);