awq.py 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232
  1. from typing import Any, Dict, List, Optional
  2. from contextlib import suppress
  3. import torch
  4. from torch.nn.parameter import Parameter
  5. from aphrodite.modeling.layers.fused_moe import (moe_align_block_size,
  6. fused_moe, fused_topk)
  7. from aphrodite.modeling.layers.linear import (LinearMethodBase,
  8. set_weight_attrs)
  9. from aphrodite.quantization.base_config import (QuantizationConfig)
  10. HAS_QUANTS = False
  11. with suppress(ImportError):
  12. from aphrodite._quant_C import quant_ops as ops
  13. HAS_QUANTS = True
  14. class AWQConfig(QuantizationConfig):
  15. """Config class for AWQ.
  16. Reference: https://arxiv.org/abs/2306.00978
  17. """
  18. def __init__(
  19. self,
  20. weight_bits: int,
  21. group_size: int,
  22. zero_point: bool,
  23. ) -> None:
  24. if not HAS_QUANTS:
  25. raise ImportError("Could not find the quantization kernels.")
  26. self.weight_bits = weight_bits
  27. self.group_size = group_size
  28. self.zero_point = zero_point
  29. if self.weight_bits != 4:
  30. raise ValueError(
  31. "Currently, only 4-bit weight quantization is supported for "
  32. f"AWQ, but got {self.weight_bits} bits.")
  33. self.pack_factor = 32 // self.weight_bits
  34. def __repr__(self) -> str:
  35. return (f"AWQConfig(weight_bits={self.weight_bits}, "
  36. f"group_size={self.group_size}, "
  37. f"zero_point={self.zero_point})")
  38. def get_name(self) -> str:
  39. return "awq"
  40. def get_supported_act_dtypes(self) -> List[torch.dtype]:
  41. return [torch.half]
  42. def get_min_capability(self) -> int:
  43. # The AWQ kernel only supports Turing or newer GPUs.
  44. return 75
  45. @staticmethod
  46. def get_config_filenames() -> List[str]:
  47. return [
  48. "quant_config.json",
  49. "quantize_config.json",
  50. ]
  51. @classmethod
  52. def from_config(cls, config: Dict[str, Any]) -> "AWQConfig":
  53. weight_bits = cls.get_from_keys(config, ["w_bit", "bits"])
  54. group_size = cls.get_from_keys(config, ["q_group_size", "group_size"])
  55. zero_point = cls.get_from_keys(config, ["zero_point"])
  56. return cls(weight_bits, group_size, zero_point)
  57. def get_linear_method(self) -> "AWQLinearMethod":
  58. return AWQLinearMethod(self)
  59. def get_scaled_act_names(self) -> List[str]:
  60. return ["gelu", "gelu_fast", "gelu_new", "gelu_pytorch_tanh"]
  61. def merge_weight(self) -> bool:
  62. return True
  63. def rope_style(self) -> Optional[bool]:
  64. return None
  65. def quant_vocab(self) -> List[bool]:
  66. return [False, False]
  67. def support_fused_moe(self) -> bool:
  68. return True
  69. class AWQLinearMethod(LinearMethodBase):
  70. """Linear method for AWQ.
  71. Args:
  72. quant_config: The AWQ quantization config.
  73. """
  74. def __init__(self, quant_config: AWQConfig):
  75. self.quant_config = quant_config
  76. def create_weights(
  77. self,
  78. input_size_per_partition: int,
  79. output_partition_sizes: List[int],
  80. input_size: int,
  81. output_size: int,
  82. params_dtype: torch.dtype,
  83. ) -> Dict[str, Any]:
  84. if input_size_per_partition % self.quant_config.group_size != 0:
  85. raise ValueError(
  86. "The input size is not aligned with the quantized "
  87. "weight shape. This can be caused by too large "
  88. "tensor parallel size.")
  89. output_size_per_partition = sum(output_partition_sizes)
  90. if output_size_per_partition % self.quant_config.pack_factor != 0:
  91. raise ValueError(
  92. "The output size is not aligned with the quantized "
  93. "weight shape. This can be caused by too large "
  94. "tensor parallel size.")
  95. qweight = Parameter(
  96. torch.empty(
  97. input_size_per_partition,
  98. output_size_per_partition // self.quant_config.pack_factor,
  99. dtype=torch.int32,
  100. ),
  101. requires_grad=False,
  102. )
  103. set_weight_attrs(
  104. qweight, {
  105. "input_dim": 0,
  106. "output_dim": 1,
  107. "packed_dim": 1,
  108. "pack_factor": self.quant_config.pack_factor,
  109. })
  110. qzeros = Parameter(
  111. torch.empty(
  112. input_size_per_partition // self.quant_config.group_size,
  113. output_size_per_partition // self.quant_config.pack_factor,
  114. dtype=torch.int32,
  115. ),
  116. requires_grad=False,
  117. )
  118. set_weight_attrs(
  119. qzeros, {
  120. "input_dim": 0,
  121. "output_dim": 1,
  122. "packed_dim": 1,
  123. "pack_factor": self.quant_config.pack_factor,
  124. })
  125. scales = Parameter(
  126. torch.empty(
  127. input_size_per_partition // self.quant_config.group_size,
  128. output_size_per_partition,
  129. dtype=params_dtype,
  130. ),
  131. requires_grad=False,
  132. )
  133. set_weight_attrs(scales, {
  134. "input_dim": 0,
  135. "output_dim": 1,
  136. })
  137. return {
  138. "qweight": qweight,
  139. "qzeros": qzeros,
  140. "scales": scales,
  141. }
  142. def apply_weights(self,
  143. weights: Dict[str, Any],
  144. x: torch.Tensor,
  145. bias: Optional[torch.Tensor] = None) -> torch.Tensor:
  146. qweight = weights["qweight"]
  147. qzeros = weights["qzeros"]
  148. scales = weights["scales"]
  149. pack_factor = self.quant_config.pack_factor
  150. out_shape = (x.shape[:-1] + (qweight.shape[-1] * pack_factor, ))
  151. reshaped_x = x.reshape(-1, x.shape[-1])
  152. # num_tokens >= threshold
  153. FP16_MATMUL_HEURISTIC_CONDITION = x.shape[:-1].numel() >= 256
  154. if FP16_MATMUL_HEURISTIC_CONDITION:
  155. out = ops.awq_dequantize(qweight, scales, qzeros, 0, 0, 0)
  156. out = torch.matmul(reshaped_x, out)
  157. else:
  158. out = ops.awq_gemm(reshaped_x, qweight, scales, qzeros,
  159. pack_factor)
  160. if bias is not None:
  161. out = out + bias
  162. return out.reshape(out_shape)
  163. def apply_moe_weights(self, w1: Dict[str,
  164. torch.Tensor], w2: Dict[str,
  165. torch.Tensor],
  166. x: torch.Tensor, gating_output: torch.Tensor,
  167. topk: int, renormalize: bool) -> torch.Tensor:
  168. FP16_MATMUL_HEURISTIC_CONDITION = x.shape[:-1].numel() >= 1024
  169. if FP16_MATMUL_HEURISTIC_CONDITION:
  170. dequant_w1 = ops.awq_dequantize(w1["qweight"], w1["scales"],
  171. w1["qzeros"], 0, 0,
  172. 0).permute(0, 2, 1)
  173. dequant_w2 = ops.awq_dequantize(w2["qweight"], w2["scales"],
  174. w2["qzeros"], 0, 0,
  175. 0).permute(0, 2, 1)
  176. return fused_moe(x, dequant_w1, dequant_w2, gating_output, topk,
  177. renormalize)
  178. topk_weights, topk_ids = fused_topk(gating_output, topk, renormalize)
  179. (sorted_token_ids, expert_ids,
  180. num_tokens_post_padded) = moe_align_block_size(
  181. topk_ids, 16, w1["qweight"].shape[0])
  182. x = x.view(x.shape[0], 1, *x.shape[1:])
  183. pack_factor = self.quant_config.pack_factor
  184. gate_up = ops.awq_group_gemm(x, w1["qweight"], w1["scales"],
  185. w1["qzeros"], topk_weights,
  186. sorted_token_ids, expert_ids,
  187. num_tokens_post_padded, False,
  188. pack_factor)
  189. out = torch.empty((gate_up.shape[:-1] + (gate_up.shape[-1] // 2, )),
  190. dtype=x.dtype,
  191. device=x.device)
  192. ops.silu_and_mul(out, gate_up)
  193. out = ops.awq_group_gemm(out, w2["qweight"], w2["scales"],
  194. w2["qzeros"], topk_weights, sorted_token_ids,
  195. expert_ids, num_tokens_post_padded, True,
  196. pack_factor)
  197. return torch.sum(out, dim=1)