awq_marlin.py 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. from typing import Any, Dict, List, Optional
  2. import torch
  3. from loguru import logger
  4. from torch.nn.parameter import Parameter
  5. from aphrodite import _custom_ops as ops
  6. from aphrodite.modeling.layers.linear import (LinearBase, LinearMethodBase,
  7. set_weight_attrs)
  8. from aphrodite.modeling.layers.vocab_parallel_embedding import ParallelLMHead
  9. from aphrodite.quantization.base_config import QuantizationConfig
  10. from aphrodite.quantization.utils.marlin_utils import (
  11. apply_awq_marlin_linear, awq_to_marlin_zero_points, check_marlin_supported,
  12. marlin_make_empty_g_idx, marlin_make_workspace, marlin_permute_scales,
  13. replace_tensor, verify_marlin_supported, verify_marlin_supports_shape)
  14. from aphrodite.scalar_type import scalar_types
  15. class AWQMarlinConfig(QuantizationConfig):
  16. """Config class for AWQ Marlin"""
  17. # num_bits -> type
  18. TYPE_MAP = {
  19. 4: scalar_types.uint4,
  20. 8: scalar_types.uint8,
  21. }
  22. def __init__(self, weight_bits: int, group_size: int, has_zp: bool,
  23. lm_head_quantized: bool) -> None:
  24. self.pack_factor = 32 // weight_bits # packed into int32
  25. self.group_size = group_size
  26. self.has_zp = has_zp
  27. self.lm_head_quantized = lm_head_quantized
  28. if weight_bits not in self.TYPE_MAP:
  29. raise ValueError(f"Unsupported num_bits = {weight_bits}. "
  30. f"Supported num_bits = {self.TYPE_MAP.keys()}")
  31. self.quant_type = self.TYPE_MAP[weight_bits]
  32. verify_marlin_supported(self.quant_type,
  33. group_size=self.group_size,
  34. has_zp=self.has_zp)
  35. def __repr__(self) -> str:
  36. return (f"AWQMarlinConfig(quant_type={self.quant_type}, "
  37. f"group_size={self.group_size}, "
  38. f"has_zp={self.has_zp}, "
  39. f"lm_head_quantized={self.lm_head_quantized})")
  40. @classmethod
  41. def get_name(cls) -> str:
  42. return "awq_marlin"
  43. @classmethod
  44. def get_supported_act_dtypes(cls) -> List[torch.dtype]:
  45. return [torch.half, torch.bfloat16]
  46. @classmethod
  47. def get_min_capability(cls) -> int:
  48. return 80
  49. @classmethod
  50. def get_config_filenames(cls) -> List[str]:
  51. return ["quantize_config.json"]
  52. @classmethod
  53. def from_config(cls, config: Dict[str, Any]) -> "AWQMarlinConfig":
  54. weight_bits = cls.get_from_keys(config, ["bits"])
  55. group_size = cls.get_from_keys(config, ["group_size"])
  56. has_zp = cls.get_from_keys(config, ["zero_point"])
  57. lm_head_quantized = cls.get_from_keys_or(config, ["lm_head"],
  58. default=False)
  59. return cls(weight_bits, group_size, has_zp, lm_head_quantized)
  60. @classmethod
  61. def override_quantization_method(cls, hf_quant_cfg,
  62. user_quant) -> Optional[str]:
  63. can_convert = cls.is_awq_marlin_compatible(hf_quant_cfg)
  64. is_valid_user_quant = (user_quant is None or user_quant == "marlin"
  65. or user_quant == "awq_marlin")
  66. if can_convert and is_valid_user_quant:
  67. msg = ("The model is convertible to {} during runtime."
  68. " Using {} kernel.".format(cls.get_name(), cls.get_name()))
  69. logger.info(msg)
  70. return cls.get_name()
  71. if can_convert and user_quant == "awq":
  72. logger.info("Detected that the model can run with awq_marlin"
  73. ", however you specified quantization=awq explicitly,"
  74. " so forcing awq. Use quantization=awq_marlin for"
  75. " faster inference")
  76. return None
  77. def get_quant_method(self, layer: torch.nn.Module,
  78. prefix: str) -> Optional["AWQMarlinLinearMethod"]:
  79. if (isinstance(layer, LinearBase) or
  80. (isinstance(layer, ParallelLMHead) and self.lm_head_quantized)):
  81. return AWQMarlinLinearMethod(self)
  82. return None
  83. def get_scaled_act_names(self) -> List[str]:
  84. return []
  85. @classmethod
  86. def is_awq_marlin_compatible(cls, quant_config: Dict[str, Any]):
  87. # Extract data from quant config.
  88. quant_method = quant_config.get("quant_method", "").lower()
  89. num_bits = quant_config.get("bits", None)
  90. group_size = quant_config.get("group_size", None)
  91. has_zp = quant_config.get("zero_point", None)
  92. if quant_method != "awq":
  93. return False
  94. # If we cannot find the info needed in the config, cannot convert.
  95. if (num_bits is None or group_size is None or has_zp is None):
  96. return False
  97. if num_bits not in cls.TYPE_MAP:
  98. return False
  99. return check_marlin_supported(quant_type=cls.TYPE_MAP[num_bits],
  100. group_size=group_size,
  101. has_zp=has_zp)
  102. class AWQMarlinLinearMethod(LinearMethodBase):
  103. """Linear method for AWQ Marlin.
  104. Args:
  105. quant_config: The AWQ Marlin quantization config.
  106. """
  107. def __init__(self, quant_config: AWQMarlinConfig) -> None:
  108. self.quant_config = quant_config
  109. def create_weights(
  110. self,
  111. layer: torch.nn.Module,
  112. input_size_per_partition: int,
  113. output_partition_sizes: List[int],
  114. input_size: int,
  115. output_size: int,
  116. params_dtype: torch.dtype,
  117. **extra_weight_attrs,
  118. ) -> None:
  119. del output_size
  120. output_size_per_partition = sum(output_partition_sizes)
  121. # Normalize group_size
  122. if self.quant_config.group_size != -1:
  123. group_size = self.quant_config.group_size
  124. else:
  125. group_size = input_size
  126. verify_marlin_supports_shape(
  127. output_size_per_partition=output_size_per_partition,
  128. input_size_per_partition=input_size_per_partition,
  129. input_size=input_size,
  130. group_size=group_size)
  131. qweight = Parameter(
  132. torch.empty(
  133. input_size_per_partition,
  134. output_size_per_partition // self.quant_config.pack_factor,
  135. dtype=torch.int32,
  136. ),
  137. requires_grad=False,
  138. )
  139. set_weight_attrs(
  140. qweight, {
  141. "input_dim": 0,
  142. "output_dim": 1,
  143. "packed_dim": 1,
  144. "pack_factor": self.quant_config.pack_factor,
  145. })
  146. num_groups = input_size_per_partition // group_size
  147. qzeros = Parameter(
  148. torch.empty(
  149. num_groups,
  150. output_size_per_partition // self.quant_config.pack_factor,
  151. dtype=torch.int32,
  152. ),
  153. requires_grad=False,
  154. )
  155. set_weight_attrs(
  156. qzeros, {
  157. "input_dim": 0,
  158. "output_dim": 1,
  159. "packed_dim": 1,
  160. "pack_factor": self.quant_config.pack_factor,
  161. })
  162. scales = Parameter(
  163. torch.empty(
  164. num_groups,
  165. output_size_per_partition,
  166. dtype=params_dtype,
  167. ),
  168. requires_grad=False,
  169. )
  170. set_weight_attrs(scales, {
  171. "input_dim": 0,
  172. "output_dim": 1,
  173. })
  174. layer.register_parameter("qweight", qweight)
  175. set_weight_attrs(qweight, extra_weight_attrs)
  176. layer.register_parameter("qzeros", qzeros)
  177. set_weight_attrs(qzeros, extra_weight_attrs)
  178. layer.register_parameter("scales", scales)
  179. set_weight_attrs(scales, extra_weight_attrs)
  180. layer.input_size_per_partition = input_size_per_partition
  181. layer.output_size_per_partition = output_size_per_partition
  182. layer.num_groups = num_groups
  183. # TODO: Update this docs
  184. # Checkpoints are serialized in AutoAWQ format, which is different from the
  185. # marlin format. This function is called after the weights are loaded.
  186. # Here, we handle the repacking
  187. def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
  188. device = layer.qweight.device
  189. # Allocate marlin workspace
  190. layer.workspace = marlin_make_workspace(
  191. layer.output_size_per_partition, device)
  192. # Repack weights from AWQ format to marlin format.
  193. marlin_qweight = ops.awq_marlin_repack(
  194. layer.qweight,
  195. size_k=layer.input_size_per_partition,
  196. size_n=layer.output_size_per_partition,
  197. num_bits=self.quant_config.quant_type.size_bits)
  198. replace_tensor(layer, "qweight", marlin_qweight)
  199. # Permute scales from AWQ format to marlin format.
  200. marlin_scales = marlin_permute_scales(
  201. layer.scales,
  202. size_k=layer.input_size_per_partition,
  203. size_n=layer.output_size_per_partition,
  204. group_size=self.quant_config.group_size)
  205. replace_tensor(layer, "scales", marlin_scales)
  206. # Permute zero-points from AWQ format to marlin format.
  207. marlin_zp = awq_to_marlin_zero_points(
  208. layer.qzeros,
  209. size_k=layer.num_groups,
  210. size_n=layer.output_size_per_partition,
  211. num_bits=self.quant_config.quant_type.size_bits)
  212. replace_tensor(layer, "qzeros", marlin_zp)
  213. # Not-used
  214. layer.g_idx = marlin_make_empty_g_idx(device)
  215. layer.g_idx_sort_indices = marlin_make_empty_g_idx(device)
  216. def apply(
  217. self,
  218. layer: torch.nn.Module,
  219. x: torch.Tensor,
  220. bias: Optional[torch.Tensor] = None,
  221. ) -> torch.Tensor:
  222. return apply_awq_marlin_linear(
  223. input=x,
  224. weight=layer.qweight,
  225. weight_scale=layer.scales,
  226. weight_zp=layer.qzeros,
  227. g_idx=layer.g_idx,
  228. g_idx_sort_indices=layer.g_idx_sort_indices,
  229. workspace=layer.workspace,
  230. quant_type=self.quant_config.quant_type,
  231. output_size_per_partition=layer.output_size_per_partition,
  232. input_size_per_partition=layer.input_size_per_partition,
  233. bias=bias)