from typing import Any, Dict, List, Optional, Set import torch from loguru import logger from aphrodite.common.utils import is_hip from aphrodite.modeling.layers.linear import LinearBase, LinearMethodBase from aphrodite.modeling.layers.vocab_parallel_embedding import ParallelLMHead from aphrodite.modeling.parameter import (ChannelQuantScaleParameter, GroupQuantScaleParameter, PackedAphroditeParameter, PackedColumnParameter, RowAphroditeParameter) from aphrodite.quantization.base_config import QuantizationConfig from aphrodite.quantization.kernels import (MPLinearLayerConfig, choose_mp_linear_kernel) from aphrodite.quantization.utils.marlin_utils import ( check_marlin_supported, marlin_repeat_scales_on_all_ranks, verify_marlin_supported) from aphrodite.scalar_type import scalar_types class GPTQMarlinConfig(QuantizationConfig): """Config class for GPTQ Marlin""" # (num_bits, is_sym) -> quant_type TYPE_MAP = { (4, True): scalar_types.uint4b8, (8, True): scalar_types.uint8b128, } def __init__(self, weight_bits: int, group_size: int, desc_act: bool, is_sym: bool, lm_head_quantized: bool) -> None: if desc_act and group_size == -1: # In this case, act_order == True is the same as act_order == False # (since we have only one group per output channel) desc_act = False self.pack_factor = 32 // weight_bits # packed into int32 self.group_size = group_size self.desc_act = desc_act self.lm_head_quantized = lm_head_quantized if (weight_bits, is_sym) not in self.TYPE_MAP: raise ValueError("Unsupported quantization config: " f"bits={weight_bits}, sym={is_sym}") self.quant_type = self.TYPE_MAP[(weight_bits, is_sym)] def __repr__(self) -> str: return (f"GPTQMarlinConfig(quant_type={self.quant_type}, " f"group_size={self.group_size}, " f"desc_act={self.desc_act}, " f"lm_head_quantized={self.lm_head_quantized})") @classmethod def get_name(cls) -> str: return "gptq_marlin" @classmethod def get_supported_act_dtypes(cls) -> List[torch.dtype]: return [torch.half, torch.bfloat16] @classmethod def get_min_capability(cls) -> int: return 80 @classmethod def get_config_filenames(cls) -> List[str]: return ["quantize_config.json"] @classmethod def from_config(cls, config: Dict[str, Any]) -> "GPTQMarlinConfig": weight_bits = cls.get_from_keys(config, ["bits"]) group_size = cls.get_from_keys(config, ["group_size"]) desc_act = cls.get_from_keys(config, ["desc_act"]) is_sym = cls.get_from_keys(config, ["sym"]) lm_head_quantized = cls.get_from_keys_or(config, ["lm_head"], default=False) return cls(weight_bits, group_size, desc_act, is_sym, lm_head_quantized) @classmethod def override_quantization_method(cls, hf_quant_cfg, user_quant) -> Optional[str]: can_convert = cls.is_gptq_marlin_compatible(hf_quant_cfg) is_valid_user_quant = (user_quant is None or user_quant == "marlin" or user_quant == "gptq_marlin") if is_hip(): return None if can_convert and is_valid_user_quant: msg = ("The model is convertible to {} during runtime." " Using {} kernel.".format(cls.get_name(), cls.get_name())) logger.info(msg) return cls.get_name() if can_convert and user_quant == "gptq": logger.info("Detected that the model can run with gptq_marlin" ", however you specified quantization=gptq explicitly," " so forcing gptq. Use quantization=gptq_marlin for" " faster inference") return None def get_quant_method(self, layer: torch.nn.Module, prefix: str) -> Optional["GPTQMarlinLinearMethod"]: if (isinstance(layer, LinearBase) or (isinstance(layer, ParallelLMHead) and self.lm_head_quantized)): return GPTQMarlinLinearMethod(self) return None def get_scaled_act_names(self) -> List[str]: return [] @classmethod def is_gptq_marlin_compatible(cls, quant_config: Dict[str, Any]): # Extract data from quant config. quant_method = quant_config.get("quant_method", "").lower() num_bits = quant_config.get("bits", None) group_size = quant_config.get("group_size", None) sym = quant_config.get("sym", None) desc_act = quant_config.get("desc_act", None) if quant_method != "gptq": return False # If we cannot find the info needed in the config, cannot convert. if (num_bits is None or group_size is None or sym is None or desc_act is None): return False if (num_bits, sym) not in cls.TYPE_MAP: return False return check_marlin_supported(quant_type=cls.TYPE_MAP[(num_bits, sym)], group_size=group_size) class GPTQMarlinLinearMethod(LinearMethodBase): """Linear method for GPTQ Marlin. Args: quant_config: The GPTQ Marlin quantization config. """ _kernel_backends_being_used: Set[str] = set() def __init__(self, quant_config: GPTQMarlinConfig) -> None: self.quant_config = quant_config # Verify supported on platform. verify_marlin_supported(quant_type=self.quant_config.quant_type, group_size=self.quant_config.group_size) def create_weights( self, layer: torch.nn.Module, input_size_per_partition: int, output_partition_sizes: List[int], input_size: int, output_size: int, params_dtype: torch.dtype, **extra_weight_attrs, ) -> None: output_size_per_partition = sum(output_partition_sizes) is_row_parallel = input_size != input_size_per_partition weight_loader = extra_weight_attrs.get("weight_loader") mp_linear_kernel_config = MPLinearLayerConfig( full_weight_shape=(input_size, output_size), partition_weight_shape=\ (input_size_per_partition, output_size_per_partition), weight_type=self.quant_config.quant_type, act_type=params_dtype, group_size=self.quant_config.group_size, zero_points=False, has_g_idx=self.quant_config.desc_act ) kernel_type = choose_mp_linear_kernel(mp_linear_kernel_config) if kernel_type.__name__ not in self._kernel_backends_being_used: logger.info( f"Using {kernel_type.__name__} for GPTQMarlinLinearMethod") self._kernel_backends_being_used.add(kernel_type.__name__) # Normalize group_size if self.quant_config.group_size != -1: group_size = self.quant_config.group_size else: group_size = input_size # Determine sharding if marlin_repeat_scales_on_all_ranks(self.quant_config.desc_act, self.quant_config.group_size, is_row_parallel): # By setting scale_dim == None, weight_loader will # repeat the scales on each GPU in TP>1 case. scales_and_zp_input_dim = None scales_and_zp_size = input_size // group_size else: # By setting scale_dim == 0, weight_loader will # shard the scales in TP>1 case. scales_and_zp_input_dim = 0 scales_and_zp_size = input_size_per_partition // group_size # Quantized weights qweight = PackedAphroditeParameter( data=torch.empty( input_size_per_partition // self.quant_config.pack_factor, output_size_per_partition, dtype=torch.int32, ), input_dim=0, output_dim=1, packed_dim=0, packed_factor=self.quant_config.pack_factor, weight_loader=weight_loader) # Activation order g_idx = RowAphroditeParameter(data=torch.empty( input_size_per_partition, dtype=torch.int32, ), input_dim=0, weight_loader=weight_loader) qzeros_args = { "data": torch.empty( scales_and_zp_size, output_size_per_partition // self.quant_config.pack_factor, dtype=torch.int32, ), "weight_loader": weight_loader } weight_scale_args = { "data": torch.empty( scales_and_zp_size, output_size_per_partition, dtype=params_dtype, ), "weight_loader": weight_loader } if scales_and_zp_input_dim is None: scales = ChannelQuantScaleParameter(output_dim=1, **weight_scale_args) qzeros = PackedColumnParameter( output_dim=1, packed_dim=1, packed_factor=self.quant_config.pack_factor, **qzeros_args) else: scales = GroupQuantScaleParameter(output_dim=1, input_dim=0, **weight_scale_args) qzeros = PackedAphroditeParameter( input_dim=0, output_dim=1, packed_dim=1, packed_factor=self.quant_config.pack_factor, **qzeros_args) layer.register_parameter("qweight", qweight) layer.register_parameter("g_idx", g_idx) layer.register_parameter("scales", scales) layer.register_parameter("qzeros", qzeros) self.kernel = kernel_type(mp_linear_kernel_config, w_q_param_name="qweight", w_s_param_name="scales", w_zp_param_name="qzeros", w_gidx_param_name="g_idx") def process_weights_after_loading(self, layer: torch.nn.Module) -> None: self.kernel.process_weights_after_loading(layer) def apply( self, layer: torch.nn.Module, x: torch.Tensor, bias: Optional[torch.Tensor] = None, ) -> torch.Tensor: return self.kernel.apply_weights(layer, x, bias)