1
0

squeezellm.py 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. from typing import Any, Dict, List, Optional
  2. from contextlib import suppress
  3. import torch
  4. from torch.nn.parameter import Parameter
  5. from aphrodite.modeling.layers.linear import (LinearMethodBase,
  6. set_weight_attrs)
  7. from aphrodite.quantization.base_config import (QuantizationConfig)
  8. from aphrodite.common.utils import is_hip
  9. HAS_QUANTS = False
  10. with suppress(ImportError):
  11. from aphrodite._quant_C import quant_ops as ops
  12. HAS_QUANTS = True
  13. class SqueezeLLMConfig(QuantizationConfig):
  14. """Config class for SqueezeLLM.
  15. Reference: https://arxiv.org/pdf/2306.07629
  16. """
  17. def __init__(
  18. self,
  19. weight_bits: int,
  20. ) -> None:
  21. if not HAS_QUANTS:
  22. raise ImportError("Could not find the quantization kernels.")
  23. self.weight_bits = weight_bits
  24. if self.weight_bits != 4:
  25. raise ValueError(
  26. "Currently, only 4-bit weight quantization is supported for "
  27. f"SqueezeLLM, but got {self.weight_bits} bits.")
  28. self.pack_factor = 32 // self.weight_bits
  29. def __repr__(self) -> str:
  30. return f"SqueezeLLMConfig(weight_bits={self.weight_bits})"
  31. def get_name(self) -> str:
  32. return "squeezellm"
  33. def get_supported_act_dtypes(self) -> List[torch.dtype]:
  34. return [torch.half]
  35. def get_min_capability(self) -> int:
  36. return 70
  37. @staticmethod
  38. def get_config_filenames() -> List[str]:
  39. return ["quant_config.json"]
  40. @classmethod
  41. def from_config(cls, config: Dict[str, Any]) -> "SqueezeLLMConfig":
  42. weight_bits = cls.get_from_keys(config, ["wbits"])
  43. return cls(weight_bits)
  44. def get_linear_method(self) -> "SqueezeLLMLinearMethod":
  45. return SqueezeLLMLinearMethod(self)
  46. def get_scaled_act_names(self) -> List[str]:
  47. return []
  48. def merge_weight(self) -> bool:
  49. return True
  50. def quant_vocab(self) -> List[bool]:
  51. return [False, False]
  52. def support_fused_moe(self) -> bool:
  53. return False
  54. def rope_style(self) -> Optional[bool]:
  55. return None
  56. class SqueezeLLMLinearMethod(LinearMethodBase):
  57. """Linear method for SqueezeLLM.
  58. Args:
  59. quant_config: The SqueezeLLM quantization config.
  60. """
  61. def __init__(self, quant_config: SqueezeLLMConfig):
  62. self.quant_config = quant_config
  63. def create_weights(self, input_size_per_partition: int,
  64. output_partition_sizes: List[int], input_size: int,
  65. output_size: int,
  66. params_dtype: torch.dtype) -> Dict[str, Any]:
  67. if input_size_per_partition % self.quant_config.pack_factor != 0:
  68. raise ValueError(
  69. "The input size is not aligned with the quantized "
  70. "weight shape. This can be caused by too large "
  71. "tensor parallel size.")
  72. output_size_per_partition = sum(output_partition_sizes)
  73. qweight = Parameter(
  74. torch.empty(
  75. input_size_per_partition // self.quant_config.pack_factor,
  76. output_size_per_partition,
  77. dtype=torch.int32,
  78. ),
  79. requires_grad=False,
  80. )
  81. set_weight_attrs(
  82. qweight, {
  83. "input_dim": 0,
  84. "output_dim": 1,
  85. "packed_dim": 0,
  86. "pack_factor": self.quant_config.pack_factor,
  87. })
  88. lookup_table = Parameter(
  89. torch.empty(
  90. output_size,
  91. self.quant_config.weight_bits**2,
  92. dtype=params_dtype,
  93. ),
  94. requires_grad=False,
  95. )
  96. set_weight_attrs(lookup_table, {
  97. "output_dim": 0,
  98. })
  99. return {
  100. "qweight": qweight,
  101. "lookup_table": lookup_table,
  102. }
  103. def apply_weights(self,
  104. weights: Dict[str, Any],
  105. x: torch.Tensor,
  106. bias: Optional[torch.Tensor] = None) -> torch.Tensor:
  107. qweight = weights["qweight"]
  108. lookup_table = weights["lookup_table"]
  109. out_shape = x.shape[:-1] + (qweight.shape[-1], )
  110. reshaped_x = x.reshape(-1, x.shape[-1])
  111. if is_hip():
  112. out_f = torch.zeros(out_shape, dtype=torch.float)
  113. ops.squeezellm_gemm(reshaped_x, qweight, out_f, lookup_table)
  114. out = out_f.to(dtype=torch.float16)
  115. else:
  116. # NOTE: The output tensor should be zero-initialized.
  117. out = torch.zeros(out_shape, dtype=torch.float16)
  118. ops.squeezellm_gemm(reshaped_x, qweight, out, lookup_table)
  119. if bias is not None:
  120. out = out + bias
  121. return out.reshape(out_shape)
  122. def apply_moe_weights(self, w1: Dict[str,
  123. torch.Tensor], w2: Dict[str,
  124. torch.Tensor],
  125. x: torch.Tensor, gating_output: torch.Tensor,
  126. topk: int, renormalize: bool) -> torch.Tensor:
  127. raise NotImplementedError