gguf.py 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. from typing import Any, Dict, List, Optional
  2. import torch
  3. from torch.nn.parameter import Parameter
  4. from aphrodite._C import ops
  5. from aphrodite.modeling.layers.linear import (LinearMethodBase,
  6. set_weight_attrs)
  7. from aphrodite.modeling.layers.quantization.base_config import (
  8. QuantizationConfig)
  9. GGML_QUANT_SIZES = {
  10. 0: (1, 4), # F32
  11. 1: (1, 2), # F16
  12. 2: (32, 2 + 16), # Q4_0
  13. 3: (32, 2 + 2 + 16), # Q4_1
  14. 6: (32, 2 + 4 + 16), # Q5_0
  15. 7: (32, 2 + 2 + 4 + 16), # Q5_1
  16. 8: (32, 2 + 32), # Q8_0
  17. 9: (32, 4 + 4 + 32), # Q8_1
  18. 10: (256, 2 + 2 + 256 // 16 + 256 // 4), # Q2_K
  19. 11: (256, 2 + 256 // 4 + 256 // 8 + 12), # Q3_K
  20. 12: (256, 2 + 2 + 256 // 2 + 12), # Q4_K
  21. 13: (256, 2 + 2 + 256 // 2 + 256 // 8 + 12), # Q5_K
  22. 14: (256, 2 + 256 // 2 + 256 // 4 + 256 // 16), # Q6_K
  23. 15: (256, 4 + 256 + 256 // 8), # Q8_K
  24. 16: (256, 2 + 256 // 4), # IQ2_XXS
  25. 17: (256, 2 + 256 // 4 + 256 // 32), # IQ2_XS
  26. 18: (256, 2 + 3 * 256 // 8), # IQ3_XXS
  27. 19: (256, 2 + 256 // 8 + 256 // 16), # IQ1_S
  28. 20: (32, 2 + 32 // 2), # IQ4_NL
  29. 21: (256, 2 + 256 // 4 + 256 // 32 + 256 // 8 + 256 // 64), # IQ3_S
  30. 22: (256, 2 + 256 // 4 + 256 // 32 + 256 // 32), # IQ2_S
  31. 23: (256, 2 + 2 + 256 // 64 + 256 // 2), # IQ4_XS
  32. }
  33. class GGUFConfig(QuantizationConfig):
  34. """Config class for GGUF"""
  35. def __repr__(self) -> str:
  36. return "GGUFConfig()"
  37. def get_name(self) -> str:
  38. return "gguf"
  39. def get_supported_act_dtypes(self) -> List[torch.dtype]:
  40. return [torch.half]
  41. def get_min_capability(self) -> int:
  42. return 61
  43. @staticmethod
  44. def get_config_filenames() -> List[str]:
  45. return []
  46. @classmethod
  47. def from_config(cls, config: Dict[str, Any]) -> "GGUFConfig":
  48. return cls()
  49. def get_linear_method(self) -> "GGUFLinearMethod":
  50. return GGUFLinearMethod(self)
  51. def get_scaled_act_names(self) -> List[str]:
  52. return []
  53. def merge_weight(self) -> bool:
  54. return False
  55. def rope_style(self) -> Optional[bool]:
  56. return False
  57. def quant_vocab(self) -> Optional[bool]:
  58. return (True, True)
  59. class GGUFLinearMethod(LinearMethodBase):
  60. """Linear method for GGUF.
  61. Args:
  62. quant_config: The GGUF quantization config.
  63. """
  64. def __init__(self, quant_config: GGUFConfig):
  65. self.quant_config = quant_config
  66. def create_weights(
  67. self,
  68. input_size_per_partition: int,
  69. output_partition_sizes: List[int],
  70. input_size: int,
  71. output_size: int,
  72. params_dtype: torch.dtype,
  73. ) -> Dict[str, Any]:
  74. # The type of weight is unknown until load state dict
  75. weight = torch.nn.parameter.UninitializedParameter(requires_grad=False)
  76. # No need for pack_factor because we don't fuse qkv layers anyway.
  77. set_weight_attrs(weight, {
  78. "input_dim": 1,
  79. "output_dim": 0,
  80. })
  81. weight_type = Parameter(
  82. torch.tensor((1), dtype=torch.int, device="cuda"),
  83. requires_grad=False,
  84. )
  85. set_weight_attrs(weight_type, {"ignore_warning": True})
  86. return {"weight": weight, "weight_type": weight_type}
  87. def apply_weights(self,
  88. weights: Dict[str, Any],
  89. x: torch.Tensor,
  90. bias: Optional[torch.Tensor] = None) -> torch.Tensor:
  91. if isinstance(weights["weight_type"], torch.Tensor):
  92. weights["weight_type"] = int(weights["weight_type"])
  93. # Check tensor parallel shape here on first pass
  94. block_size = GGML_QUANT_SIZES[weights["weight_type"]][1]
  95. if weights["weight"].shape[1] % block_size != 0:
  96. raise ValueError("Size is not aligned with the quantized "
  97. "weight shape.")
  98. weight = weights["weight"]
  99. weight_type = weights["weight_type"]
  100. infeatures = x.shape[-1]
  101. outfeatures = weight.shape[0]
  102. out_shape = x.shape[:-1] + (weight.shape[0], )
  103. reshaped_x = x.reshape(-1, x.shape[-1])
  104. xshape = x.view(-1, x.shape[-1])
  105. if xshape.shape[0] == 1:
  106. out = ops.ggml_mul_mat_vec_a8(weight, reshaped_x, weight_type,
  107. outfeatures)
  108. elif xshape.shape[0] < 8 and weight_type < 16:
  109. out = ops.ggml_mul_mat_a8(weight, reshaped_x, weight_type,
  110. outfeatures)
  111. else:
  112. weight = ops.ggml_dequantize(weight, weight_type, outfeatures,
  113. infeatures)
  114. out = reshaped_x @ weight.T
  115. if bias is not None:
  116. out = out + bias
  117. return out.reshape(out_shape)
  118. def apply_embedding(self, weights: Dict[str, torch.Tensor],
  119. x: torch.Tensor) -> torch.Tensor:
  120. if isinstance(weights["weight_type"], torch.Tensor):
  121. weights["weight_type"] = int(weights["weight_type"])
  122. weight = weights["weight"]
  123. weight_type = weights["weight_type"]
  124. dim, block_size = GGML_QUANT_SIZES[weights["weight_type"]]
  125. vocab_size = weight.shape[0]
  126. hidden_size = weight.shape[1] // block_size * dim
  127. if weight_type < 2:
  128. return torch.embedding(weight.view(vocab_size, -1), x)
  129. x_flat = x.flatten()
  130. quant = torch.index_select(weight.view(vocab_size, -1),
  131. dim=0,
  132. index=x_flat)
  133. dequant = ops.ggml_dequantize(quant, weight_type, hidden_size,
  134. x_flat.shape[0])
  135. return dequant.view(*x.shape, hidden_size)