compressed_tensors.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. from typing import Any, Dict, List, Optional
  2. import torch
  3. from pydantic import BaseModel
  4. from aphrodite.modeling.layers.linear import (LinearBase, LinearMethodBase,
  5. UnquantizedLinearMethod)
  6. from aphrodite.platforms import current_platform
  7. from aphrodite.quantization.base_config import ( # noqa: E501
  8. QuantizationConfig, QuantizeMethodBase)
  9. from aphrodite.quantization.compressed_tensors.schemes import (
  10. W4A16SPARSE24_SUPPORTED_BITS, WNA16_SUPPORTED_BITS,
  11. CompressedTensorsScheme, CompressedTensorsW4A16Sparse24,
  12. CompressedTensorsW8A8Fp8, CompressedTensorsW8A8Int8,
  13. CompressedTensorsW8A16Fp8, CompressedTensorsWNA16)
  14. from aphrodite.quantization.compressed_tensors.utils import (
  15. CompressionFormat, QuantizationArgs, QuantizationStrategy,
  16. QuantizationType, find_matched_target, is_activation_quantization_format,
  17. should_ignore_layer)
  18. from aphrodite.quantization.kv_cache import BaseKVCacheMethod
  19. __all__ = ["CompressedTensorsLinearMethod"]
  20. class CompressedTensorsConfig(QuantizationConfig):
  21. def __init__(self,
  22. target_scheme_map: Dict[str, Any],
  23. ignore: List[str],
  24. quant_format: str,
  25. kv_cache_scheme: Optional[Dict[str, Any]] = None):
  26. self.ignore = ignore
  27. self.quant_format = quant_format
  28. # Map from [target -> scheme]
  29. self.target_scheme_map = target_scheme_map
  30. self.kv_cache_scheme = kv_cache_scheme
  31. def get_linear_method(self) -> "CompressedTensorsLinearMethod":
  32. return CompressedTensorsLinearMethod(self)
  33. def get_scaled_act_names(self) -> List[str]:
  34. return []
  35. def get_supported_act_dtypes(cls) -> List[torch.dtype]:
  36. return [torch.float16, torch.bfloat16]
  37. @classmethod
  38. def get_min_capability(cls) -> int:
  39. return 70
  40. def get_name(self) -> str:
  41. return "compressed_tensors"
  42. def get_quant_method(
  43. self,
  44. layer: torch.nn.Module,
  45. prefix: str,
  46. ) -> Optional["QuantizeMethodBase"]:
  47. from aphrodite.attention.layer import (
  48. Attention) # Avoid circular import
  49. # Check if the layer is skipped for quantization.
  50. # TODO: support module names
  51. if should_ignore_layer(prefix, ignore=self.ignore):
  52. return UnquantizedLinearMethod()
  53. if isinstance(layer, LinearBase):
  54. scheme = self.get_scheme(layer=layer, layer_name=prefix)
  55. layer.scheme = scheme
  56. return CompressedTensorsLinearMethod(self)
  57. if isinstance(layer, Attention):
  58. return CompressedTensorsKVCacheMethod(self)
  59. return None
  60. @classmethod
  61. def from_config(cls, config: Dict[str, Any]) -> "CompressedTensorsConfig":
  62. target_scheme_map: Dict[str, Any] = dict()
  63. ignore: List[str] = config.get("ignore", None)
  64. quant_format: str = config.get("format", None)
  65. # The quant_config has multiple config_groups, each containing
  66. # an input_activations key with details about how the activations are
  67. # quantized, a weights key indicating how the weights are quantized,
  68. # and a list of targets under the `targets` key, dictating which
  69. # layers are impacted by the quantization details. The quantization
  70. # details follow the structure defined by the QuantizationArgs
  71. # pydantic model, which is used to verify the structure of the
  72. # quant_config and also store the details for later use.
  73. for _, quant_config in config["config_groups"].items():
  74. targets = quant_config.get("targets")
  75. for target in targets:
  76. target_scheme_map[target] = {}
  77. target_scheme_map[target][
  78. "weights"] = QuantizationArgs.parse_obj(
  79. quant_config.get("weights"))
  80. try:
  81. target_scheme_map[target][
  82. "input_activations"] = QuantizationArgs.parse_obj(
  83. quant_config.get("input_activations"))
  84. except Exception:
  85. target_scheme_map[target]["input_activations"] = None
  86. return cls(target_scheme_map=target_scheme_map,
  87. ignore=ignore,
  88. quant_format=quant_format,
  89. kv_cache_scheme=config.get("kv_cache_scheme"))
  90. @classmethod
  91. def get_config_filenames(cls) -> List[str]:
  92. return []
  93. def _check_scheme_supported(self,
  94. min_capability: int,
  95. error: bool = True) -> bool:
  96. capability = current_platform.get_device_capability()
  97. capability = capability[0] * 10 + capability[1]
  98. supported = capability >= min_capability
  99. if error and not supported:
  100. raise RuntimeError(
  101. "Quantization scheme is not supported for ",
  102. f"the current GPU. Min capability: {min_capability}. ",
  103. f"Current capability: {capability}.")
  104. return supported
  105. def _is_static_tensor_w8a8(self, weight_quant: BaseModel,
  106. input_quant: BaseModel) -> bool:
  107. is_8_bits = weight_quant.num_bits == input_quant.num_bits == 8
  108. weight_strategy = (
  109. weight_quant.strategy == QuantizationStrategy.TENSOR.value
  110. or weight_quant.strategy == QuantizationStrategy.CHANNEL.value)
  111. is_tensor = (weight_strategy and input_quant.strategy
  112. == QuantizationStrategy.TENSOR.value)
  113. is_symmetric = weight_quant.symmetric and input_quant.symmetric
  114. is_static = not weight_quant.dynamic and not input_quant.dynamic
  115. return is_8_bits and is_tensor and is_symmetric and is_static
  116. def _is_dynamic_token_w8a8(self, weight_quant: BaseModel,
  117. input_quant: BaseModel) -> bool:
  118. is_8_bits = weight_quant.num_bits == input_quant.num_bits == 8
  119. weight_strategy = (
  120. weight_quant.strategy == QuantizationStrategy.TENSOR.value
  121. or weight_quant.strategy == QuantizationStrategy.CHANNEL.value)
  122. is_token = (weight_strategy and input_quant.strategy
  123. == QuantizationStrategy.TOKEN.value)
  124. is_symmetric = weight_quant.symmetric and input_quant.symmetric
  125. is_dynamic = not weight_quant.dynamic and input_quant.dynamic
  126. return is_8_bits and is_token and is_symmetric and is_dynamic
  127. def _is_fp8_w8a8(self, weight_quant: BaseModel,
  128. input_quant: BaseModel) -> bool:
  129. # Confirm weights and activations quantized.
  130. if weight_quant is None or input_quant is None:
  131. return False
  132. # Confirm weight scheme is supported.
  133. is_floating_point = (weight_quant.type == QuantizationType.FLOAT
  134. and input_quant.type == QuantizationType.FLOAT)
  135. is_symmetric_weight = weight_quant.symmetric
  136. is_static_weight = not weight_quant.dynamic
  137. is_per_tensor_or_channel_weight = (weight_quant.strategy in [
  138. QuantizationStrategy.TENSOR, QuantizationStrategy.CHANNEL
  139. ])
  140. if not (is_floating_point and is_symmetric_weight and is_static_weight
  141. and is_per_tensor_or_channel_weight):
  142. return False
  143. # Dynamic quantization is always supported if weights supported.
  144. if input_quant.dynamic:
  145. return True
  146. # Confirm activation scheme is supported.
  147. is_symmetric_activation = input_quant.symmetric
  148. is_per_tensor_activation = (
  149. input_quant.strategy == QuantizationStrategy.TENSOR)
  150. return is_symmetric_activation and is_per_tensor_activation
  151. def _is_fp8_w8a16(self, weight_quant: BaseModel,
  152. input_quant: BaseModel) -> bool:
  153. # Confirm weights quantized.
  154. if weight_quant is None:
  155. return False
  156. # Confirm we have floating points.
  157. if weight_quant.type != QuantizationType.FLOAT:
  158. return False
  159. # Confirm weight scheme is supported.
  160. is_symmetric_weight = weight_quant.symmetric
  161. is_static_weight = not weight_quant.dynamic
  162. is_per_tensor_or_channel_weight = (weight_quant.strategy in [
  163. QuantizationStrategy.TENSOR, QuantizationStrategy.CHANNEL
  164. ])
  165. if not (is_symmetric_weight and is_static_weight
  166. and is_per_tensor_or_channel_weight):
  167. return False
  168. # All conditions satisfied.
  169. return True
  170. def _is_wNa16_group_channel(self, weight_quant: BaseModel,
  171. input_quant: BaseModel) -> bool:
  172. input_quant_none = input_quant is None
  173. is_symmetric = weight_quant.symmetric
  174. is_channel_group = (
  175. weight_quant.strategy == QuantizationStrategy.CHANNEL.value
  176. or weight_quant.strategy == QuantizationStrategy.GROUP.value)
  177. is_static = not weight_quant.dynamic
  178. return (is_channel_group and input_quant_none and is_symmetric
  179. and is_static)
  180. def _get_scheme_from_parts(
  181. self, weight_quant: BaseModel,
  182. input_quant: BaseModel) -> "CompressedTensorsScheme":
  183. # Detect If Mixed Precision
  184. if self._is_wNa16_group_channel(weight_quant, input_quant):
  185. if (self.quant_format == CompressionFormat.marlin_24.value
  186. and weight_quant.num_bits in W4A16SPARSE24_SUPPORTED_BITS):
  187. return CompressedTensorsW4A16Sparse24(
  188. strategy=weight_quant.strategy,
  189. num_bits=weight_quant.num_bits,
  190. group_size=weight_quant.group_size)
  191. if (self.quant_format == CompressionFormat.pack_quantized.value
  192. and weight_quant.num_bits in WNA16_SUPPORTED_BITS):
  193. return CompressedTensorsWNA16(
  194. num_bits=weight_quant.num_bits,
  195. strategy=weight_quant.strategy,
  196. group_size=weight_quant.group_size,
  197. actorder=weight_quant.actorder)
  198. # Detect If Activation Quantization.
  199. # TODO @dsikka: clean-up conditions
  200. if is_activation_quantization_format(self.quant_format):
  201. if self._is_fp8_w8a8(weight_quant, input_quant):
  202. is_fp8_w8a8_supported = self._check_scheme_supported(
  203. CompressedTensorsW8A8Fp8.get_min_capability(), error=False)
  204. if is_fp8_w8a8_supported:
  205. return CompressedTensorsW8A8Fp8(
  206. strategy=weight_quant.strategy,
  207. is_static_input_scheme=(input_quant
  208. and not input_quant.dynamic))
  209. else:
  210. return CompressedTensorsW8A16Fp8(
  211. strategy=weight_quant.strategy,
  212. is_static_input_scheme=(input_quant
  213. and not input_quant.dynamic))
  214. if self._is_fp8_w8a16(weight_quant, input_quant):
  215. return CompressedTensorsW8A16Fp8(
  216. strategy=weight_quant.strategy,
  217. is_static_input_scheme=(input_quant
  218. and not input_quant.dynamic))
  219. if self._is_static_tensor_w8a8(weight_quant, input_quant):
  220. return CompressedTensorsW8A8Int8(
  221. strategy=weight_quant.strategy,
  222. is_static_input_scheme=True)
  223. if self._is_dynamic_token_w8a8(weight_quant, input_quant):
  224. return CompressedTensorsW8A8Int8(
  225. strategy=weight_quant.strategy,
  226. is_static_input_scheme=False)
  227. raise NotImplementedError(
  228. "No compressed-tensors compatible scheme was found.")
  229. def get_scheme(
  230. self,
  231. layer: torch.nn.Module,
  232. layer_name: Optional[str] = None) -> "CompressedTensorsScheme":
  233. """
  234. compressed-tensors supports non uniform in the following way:
  235. ignore: List of layer_names or nn.Module names to be ignored.
  236. targets of config_groups: There can be N config_groups which each
  237. have a quantization scheme. Each config_group has a list of targets
  238. which can be a full layer_name, a regex for a layer_name, or
  239. an nn.Module name.
  240. We first check whether a layer is in the ignore group and use
  241. CompressedTensorsUnquantized (i.e. fp16/bf16) scheme for the layer
  242. We then detect whether a layer_name is found in any target and
  243. use the quantization scheme corresponding to the matched target
  244. to select the CompressedTensorsScheme used for infernece.
  245. """
  246. # Find the "target" in the compressed-tensors config
  247. # that our layer conforms to.
  248. # TODO: add compressed-tensors as dep
  249. # so we do not have to re-write these functions
  250. # need to make accelerate optional in ct to do this
  251. matched_target = find_matched_target(
  252. layer_name=layer_name,
  253. module=layer,
  254. targets=self.target_scheme_map.keys())
  255. # Find the quant_scheme
  256. scheme_dict = self.target_scheme_map[matched_target]
  257. scheme = self._get_scheme_from_parts(
  258. weight_quant=scheme_dict["weights"],
  259. input_quant=scheme_dict["input_activations"])
  260. # Raise error if device does not support the scheme
  261. # (e.g. fp8 needs ada lovelace)
  262. self._check_scheme_supported(scheme.get_min_capability())
  263. return scheme
  264. class CompressedTensorsLinearMethod(LinearMethodBase):
  265. def __init__(self, quantization_config: CompressedTensorsConfig):
  266. self.quantization_config = quantization_config
  267. def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
  268. layer.scheme.process_weights_after_loading(layer)
  269. def create_weights(self, layer: torch.nn.Module,
  270. input_size_per_partition: int,
  271. output_partition_sizes: List[int], input_size: int,
  272. output_size: int, params_dtype: torch.dtype,
  273. **extra_weight_attrs):
  274. """
  275. Use the CompressedTensorsScheme associated with each layer to create
  276. the necessary parameters for the layer. See LinearMethodBase for param
  277. details
  278. """
  279. weight_loader = extra_weight_attrs.get("weight_loader")
  280. layer.scheme.create_weights(
  281. layer=layer,
  282. input_size=input_size,
  283. input_size_per_partition=input_size_per_partition,
  284. output_partition_sizes=output_partition_sizes,
  285. output_size=output_size,
  286. params_dtype=params_dtype,
  287. weight_loader=weight_loader)
  288. def apply(self,
  289. layer: torch.nn.Module,
  290. x: torch.Tensor,
  291. bias: Optional[torch.Tensor] = None):
  292. """
  293. Use the output of create_weights and the CompressedTensorsScheme
  294. associated with the layer to apply the forward pass with the
  295. layer input. See LinearMethodBase for param details
  296. """
  297. scheme = layer.scheme
  298. if scheme is None:
  299. raise ValueError("A scheme must be defined for each layer")
  300. return scheme.apply_weights(layer, x, bias=bias)
  301. class CompressedTensorsKVCacheMethod(BaseKVCacheMethod):
  302. """
  303. Supports loading kv-cache scaling factors from compressed-tensors
  304. checkpoints.
  305. """
  306. def __init__(self, quant_config: CompressedTensorsConfig):
  307. self.validate_kv_cache_scheme(quant_config.kv_cache_scheme)
  308. super().__init__(quant_config)
  309. @staticmethod
  310. def validate_kv_cache_scheme(kv_cache_scheme: Optional[Dict[str, Any]]):
  311. """
  312. Validator for the kv cache scheme. Useful for controlling the
  313. kv cache quantization schemes, that are being supported in Aphrodite
  314. :param kv_cache_scheme: the compressed-tensors kv cache scheme
  315. """
  316. if kv_cache_scheme is None:
  317. return
  318. type_ = kv_cache_scheme.get("type")
  319. num_bits = kv_cache_scheme.get("num_bits")
  320. if type_ != "float" and num_bits != 8:
  321. raise NotImplementedError(
  322. "Currently supported kv cache quantization is "
  323. "num_bits=8, type=float, however "
  324. f"received num_bits={num_bits}, type={type_}")
  325. strategy = kv_cache_scheme.get("strategy")
  326. if strategy != "tensor":
  327. raise NotImplementedError(
  328. "Only support per-tensor scaling factor "
  329. "for compressed-tensors KV cache. "
  330. f"Expected strategy: tensor, found strategy: {strategy}")
  331. is_symmetric = kv_cache_scheme.get("symmetric")
  332. if not is_symmetric:
  333. raise NotImplementedError(
  334. "Only support symmetric scaling factor "
  335. "for compressed-tensors KV cache. "
  336. f"However found symmetric: {is_symmetric}")