fp8.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512
  1. from typing import Any, Callable, Dict, List, Optional
  2. import torch
  3. from loguru import logger
  4. from torch.nn import Module
  5. from torch.nn.parameter import Parameter
  6. import aphrodite.common.envs as envs
  7. from aphrodite import _custom_ops as ops
  8. from aphrodite.common.utils import is_hip, print_warning_once
  9. from aphrodite.modeling.layers.fused_moe import (FusedMoE, FusedMoEMethodBase,
  10. FusedMoeWeightScaleSupported)
  11. from aphrodite.modeling.layers.linear import (LinearBase, LinearMethodBase,
  12. UnquantizedLinearMethod)
  13. from aphrodite.modeling.parameter import (ModelWeightParameter,
  14. PerTensorScaleParameter)
  15. from aphrodite.modeling.utils import set_weight_attrs
  16. from aphrodite.platforms import current_platform
  17. from aphrodite.quantization.base_config import (QuantizationConfig,
  18. QuantizeMethodBase)
  19. from aphrodite.quantization.kv_cache import BaseKVCacheMethod
  20. from aphrodite.quantization.utils.marlin_utils_fp8 import (
  21. apply_fp8_marlin_linear, prepare_fp8_layer_for_marlin)
  22. from aphrodite.quantization.utils.quant_utils import is_layer_skipped
  23. from aphrodite.quantization.utils.w8a8_utils import (
  24. all_close_1d, apply_fp8_linear, convert_to_channelwise,
  25. cutlass_fp8_supported, normalize_e4m3fn_to_e4m3fnuz, per_tensor_dequantize,
  26. requantize_with_max_scale)
  27. ACTIVATION_SCHEMES = ["static", "dynamic"]
  28. APHRODITE_TEST_FORCE_FP8_MARLIN = envs.APHRODITE_TEST_FORCE_FP8_MARLIN
  29. class Fp8Config(QuantizationConfig):
  30. """Config class for FP8."""
  31. def __init__(
  32. self,
  33. is_checkpoint_fp8_serialized: bool = False,
  34. activation_scheme: str = "dynamic",
  35. ignored_layers: Optional[List[str]] = None,
  36. ) -> None:
  37. self.is_checkpoint_fp8_serialized = is_checkpoint_fp8_serialized
  38. if is_checkpoint_fp8_serialized:
  39. logger.warning("Detected fp8 checkpoint. Please note that the "
  40. "format is experimental and subject to change.")
  41. if activation_scheme not in ACTIVATION_SCHEMES:
  42. raise ValueError(
  43. f"Unsupported activation scheme {activation_scheme}")
  44. self.activation_scheme = activation_scheme
  45. self.ignored_layers = ignored_layers or []
  46. @classmethod
  47. def get_name(cls) -> str:
  48. return "fp8"
  49. @classmethod
  50. def get_supported_act_dtypes(cls) -> List[torch.dtype]:
  51. return [torch.bfloat16, torch.half]
  52. @classmethod
  53. def get_min_capability(cls) -> int:
  54. return 80
  55. @classmethod
  56. def get_config_filenames(cls) -> List[str]:
  57. return []
  58. @classmethod
  59. def from_config(cls, config: Dict[str, Any]) -> "Fp8Config":
  60. quant_method = cls.get_from_keys(config, ["quant_method"])
  61. is_checkpoint_fp8_serialized = ("fp8" in quant_method)
  62. activation_scheme = cls.get_from_keys(config, ["activation_scheme"])
  63. ignored_layers = cls.get_from_keys_or(config, ["ignored_layers"], None)
  64. return cls(is_checkpoint_fp8_serialized=is_checkpoint_fp8_serialized,
  65. activation_scheme=activation_scheme,
  66. ignored_layers=ignored_layers)
  67. def get_quant_method(self, layer: torch.nn.Module,
  68. prefix: str) -> Optional["QuantizeMethodBase"]:
  69. from aphrodite.attention.layer import (
  70. Attention) # Avoid circular import
  71. if isinstance(layer, LinearBase):
  72. if is_layer_skipped(prefix, self.ignored_layers):
  73. return UnquantizedLinearMethod()
  74. return Fp8LinearMethod(self)
  75. elif isinstance(layer, FusedMoE):
  76. return Fp8MoEMethod(self)
  77. elif isinstance(layer, Attention):
  78. return Fp8KVCacheMethod(self)
  79. return None
  80. def get_scaled_act_names(self) -> List[str]:
  81. return []
  82. class Fp8LinearMethod(LinearMethodBase):
  83. """Linear method for FP8.
  84. Supports loading FP8 checkpoints with static weight scale and
  85. dynamic/static activation scale.
  86. Also supports loading quantized FP16/BF16 model checkpoints with dynamic
  87. activation scaling. The weight scaling factor will be initialized after
  88. the model weights are loaded.
  89. Limitations:
  90. 1. Only support per-tensor quantization due to torch._scaled_mm support.
  91. 2. Only support float8_e4m3fn data type due to the limitation of
  92. torch._scaled_mm (https://github.com/pytorch/pytorch/blob/2e48b39603411a41c5025efbe52f89560b827825/aten/src/ATen/native/cuda/Blas.cpp#L854-L856)
  93. Args:
  94. quant_config: The quantization config.
  95. """
  96. def __init__(self, quant_config: Fp8Config):
  97. self.quant_config = quant_config
  98. self.cutlass_fp8_supported = cutlass_fp8_supported()
  99. # For GPUs that lack FP8 hardware support, we can leverage the Marlin
  100. # kernel for fast weight-only FP8 quantization
  101. capability = current_platform.get_device_capability()
  102. capability = capability[0] * 10 + capability[1]
  103. self.use_marlin = capability < 89 or APHRODITE_TEST_FORCE_FP8_MARLIN
  104. # Disable marlin for rocm
  105. if is_hip():
  106. self.use_marlin = False
  107. def create_weights(
  108. self,
  109. layer: torch.nn.Module,
  110. input_size_per_partition: int,
  111. output_partition_sizes: List[int],
  112. input_size: int,
  113. output_size: int,
  114. params_dtype: torch.dtype,
  115. **extra_weight_attrs,
  116. ):
  117. del input_size, output_size
  118. output_size_per_partition = sum(output_partition_sizes)
  119. weight_loader = extra_weight_attrs.get("weight_loader")
  120. layer.logical_widths = output_partition_sizes
  121. layer.input_size_per_partition = input_size_per_partition
  122. layer.output_size_per_partition = output_size_per_partition
  123. layer.orig_dtype = params_dtype
  124. # WEIGHT
  125. weight_dtype = (torch.float8_e4m3fn
  126. if self.quant_config.is_checkpoint_fp8_serialized else
  127. params_dtype)
  128. weight = ModelWeightParameter(data=torch.empty(
  129. output_size_per_partition,
  130. input_size_per_partition,
  131. dtype=weight_dtype),
  132. input_dim=1,
  133. output_dim=0,
  134. weight_loader=weight_loader)
  135. layer.register_parameter("weight", weight)
  136. # If checkpoint is serialized fp8, load them.
  137. # Otherwise, wait until process_weights_after_loading.
  138. if self.quant_config.is_checkpoint_fp8_serialized:
  139. # WEIGHT SCALE
  140. scale = PerTensorScaleParameter(data=torch.empty(
  141. len(output_partition_sizes), dtype=torch.float32),
  142. weight_loader=weight_loader)
  143. scale[:] = torch.finfo(torch.float32).min
  144. layer.register_parameter("weight_scale", scale)
  145. # INPUT ACTIVATION SCALE
  146. if self.quant_config.activation_scheme == "static":
  147. scale = PerTensorScaleParameter(data=torch.empty(
  148. len(output_partition_sizes), dtype=torch.float32),
  149. weight_loader=weight_loader)
  150. scale[:] = torch.finfo(torch.float32).min
  151. layer.register_parameter("input_scale", scale)
  152. else:
  153. layer.register_parameter("input_scale", None)
  154. def process_weights_after_loading(self, layer: Module) -> None:
  155. layer.weight = torch.nn.Parameter(layer.weight.data,
  156. requires_grad=False)
  157. # If checkpoint not serialized fp8, quantize the weights.
  158. if not self.quant_config.is_checkpoint_fp8_serialized:
  159. qweight, weight_scale = ops.scaled_fp8_quant(layer.weight,
  160. scale=None)
  161. # If using marlin (w8a16), kernel uses channelwise weights,
  162. # so extend the weight scales to be channelwise.
  163. if self.use_marlin:
  164. assert weight_scale.numel() == 1
  165. weight_scale = convert_to_channelwise(
  166. weight_scale.expand(len(layer.logical_widths)),
  167. layer.logical_widths)
  168. # Update the layer with the new values.
  169. layer.weight = Parameter(qweight.t(), requires_grad=False)
  170. layer.weight_scale = Parameter(weight_scale, requires_grad=False)
  171. layer.input_scale = None
  172. # If checkpoint is fp8, handle that there are N scales for N
  173. # shards in a fused module
  174. else:
  175. layer.weight_scale = torch.nn.Parameter(layer.weight_scale.data,
  176. requires_grad=False)
  177. if self.quant_config.activation_scheme == "static":
  178. layer.input_scale = torch.nn.Parameter(layer.input_scale.data,
  179. requires_grad=False)
  180. # If using marlin (w8a16), kernel uses channelwise weights,
  181. # so extend the weight scales to be channelwise.
  182. if self.use_marlin:
  183. weight = layer.weight
  184. weight_scale = convert_to_channelwise(layer.weight_scale,
  185. layer.logical_widths)
  186. # If using w8a8, torch._scaled_mm needs per tensor, so
  187. # requantize the logical shards as a single weight.
  188. else:
  189. # Dequant -> Quant with max scale so we can run per tensor.
  190. weight = layer.weight
  191. weight_scale = layer.weight_scale
  192. # If rocm, use float8_e4m3fnuz.
  193. if is_hip():
  194. weight, weight_scale, input_scale = \
  195. normalize_e4m3fn_to_e4m3fnuz(
  196. weight=weight,
  197. weight_scale=weight_scale,
  198. input_scale=layer.input_scale)
  199. if input_scale is not None:
  200. layer.input_scale = Parameter(input_scale,
  201. requires_grad=False)
  202. weight_scale, weight = requantize_with_max_scale(
  203. weight=weight,
  204. weight_scale=weight_scale,
  205. logical_widths=layer.logical_widths,
  206. )
  207. # Update layer with new values.
  208. layer.weight = Parameter(weight.t(), requires_grad=False)
  209. layer.weight_scale = Parameter(weight_scale, requires_grad=False)
  210. if self.quant_config.activation_scheme == "static":
  211. layer.input_scale = Parameter(layer.input_scale.max(),
  212. requires_grad=False)
  213. if self.use_marlin:
  214. prepare_fp8_layer_for_marlin(layer)
  215. # Activations not quantized for marlin.
  216. del layer.input_scale
  217. def apply(self,
  218. layer: torch.nn.Module,
  219. x: torch.Tensor,
  220. bias: Optional[torch.Tensor] = None) -> torch.Tensor:
  221. if self.use_marlin:
  222. return apply_fp8_marlin_linear(
  223. input=x,
  224. weight=layer.weight,
  225. weight_scale=layer.weight_scale,
  226. workspace=layer.workspace,
  227. size_n=layer.output_size_per_partition,
  228. size_k=layer.input_size_per_partition,
  229. bias=bias)
  230. return apply_fp8_linear(
  231. input=x,
  232. weight=layer.weight,
  233. weight_scale=layer.weight_scale,
  234. input_scale=layer.input_scale,
  235. bias=bias,
  236. cutlass_fp8_supported=self.cutlass_fp8_supported,
  237. use_per_token_if_dynamic=False)
  238. class Fp8MoEMethod(FusedMoEMethodBase):
  239. """MoE method for FP8.
  240. Supports loading FP8 checkpoints with static weight scale and
  241. dynamic/static activation scale.
  242. Also supports loading quantized FP16/BF16 model checkpoints with dynamic
  243. activation scaling. The weight scaling factor will be initialized after
  244. the model weights are loaded.
  245. Args:
  246. quant_config: The quantization config.
  247. """
  248. def __init__(self, quant_config: Fp8Config):
  249. self.quant_config = quant_config
  250. def create_weights(self, layer: Module, num_experts: int, hidden_size: int,
  251. intermediate_size: int, params_dtype: torch.dtype,
  252. **extra_weight_attrs):
  253. if self.quant_config.is_checkpoint_fp8_serialized:
  254. params_dtype = torch.float8_e4m3fn
  255. # WEIGHTS
  256. w13_weight = torch.nn.Parameter(torch.empty(num_experts,
  257. 2 * intermediate_size,
  258. hidden_size,
  259. dtype=params_dtype),
  260. requires_grad=False)
  261. layer.register_parameter("w13_weight", w13_weight)
  262. set_weight_attrs(w13_weight, extra_weight_attrs)
  263. w2_weight = torch.nn.Parameter(torch.empty(num_experts,
  264. hidden_size,
  265. intermediate_size,
  266. dtype=params_dtype),
  267. requires_grad=False)
  268. layer.register_parameter("w2_weight", w2_weight)
  269. set_weight_attrs(w2_weight, extra_weight_attrs)
  270. # WEIGHT_SCALES
  271. # Allocate 2 scales for w1 and w3 respectively.
  272. # They will be combined to a single scale after weight loading.
  273. w13_weight_scale = torch.nn.Parameter(torch.ones(num_experts,
  274. 2,
  275. dtype=torch.float32),
  276. requires_grad=False)
  277. layer.register_parameter("w13_weight_scale", w13_weight_scale)
  278. w2_weight_scale = torch.nn.Parameter(torch.ones(num_experts,
  279. dtype=torch.float32),
  280. requires_grad=False)
  281. layer.register_parameter("w2_weight_scale", w2_weight_scale)
  282. # Add the quantization method used (per tensor/grouped/channel)
  283. # to ensure the weight scales are loaded in properly
  284. extra_weight_attrs.update(
  285. {"quant_method": FusedMoeWeightScaleSupported.TENSOR.value})
  286. # If loading fp8 checkpoint, pass the weight loaders.
  287. # If loading an fp16 checkpoint, do not (we will quantize in
  288. # process_weights_after_loading()
  289. if self.quant_config.is_checkpoint_fp8_serialized:
  290. set_weight_attrs(w13_weight_scale, extra_weight_attrs)
  291. set_weight_attrs(w2_weight_scale, extra_weight_attrs)
  292. # INPUT_SCALES
  293. if self.quant_config.activation_scheme == "static":
  294. if not self.quant_config.is_checkpoint_fp8_serialized:
  295. raise ValueError(
  296. "Found static activation scheme for checkpoint that "
  297. "was not serialized fp8.")
  298. w13_input_scale = torch.nn.Parameter(torch.ones(
  299. num_experts, dtype=torch.float32),
  300. requires_grad=False)
  301. layer.register_parameter("w13_input_scale", w13_input_scale)
  302. set_weight_attrs(w13_input_scale, extra_weight_attrs)
  303. w2_input_scale = torch.nn.Parameter(torch.ones(
  304. num_experts, dtype=torch.float32),
  305. requires_grad=False)
  306. layer.register_parameter("w2_input_scale", w2_input_scale)
  307. set_weight_attrs(w2_input_scale, extra_weight_attrs)
  308. else:
  309. layer.w13_input_scale = None
  310. layer.w2_input_scale = None
  311. def process_weights_after_loading(self, layer: Module) -> None:
  312. # If checkpoint is fp16, quantize in place.
  313. if not self.quant_config.is_checkpoint_fp8_serialized:
  314. # If rocm, use float8_e4m3fnuz as dtype
  315. fp8_dtype = torch.float8_e4m3fnuz \
  316. if is_hip() else torch.float8_e4m3fn
  317. w13_weight = torch.empty_like(layer.w13_weight.data,
  318. dtype=fp8_dtype)
  319. w2_weight = torch.empty_like(layer.w2_weight.data, dtype=fp8_dtype)
  320. # Re-initialize w13_scale because we directly quantize
  321. # merged w13 weights and generate a single scaling factor.
  322. layer.w13_weight_scale = torch.nn.Parameter(torch.ones(
  323. layer.num_experts,
  324. dtype=torch.float32,
  325. device=w13_weight.device),
  326. requires_grad=False)
  327. for expert in range(layer.num_experts):
  328. w13_weight[expert, :, :], layer.w13_weight_scale[
  329. expert] = ops.scaled_fp8_quant(
  330. layer.w13_weight.data[expert, :, :])
  331. w2_weight[expert, :, :], layer.w2_weight_scale[
  332. expert] = ops.scaled_fp8_quant(
  333. layer.w2_weight.data[expert, :, :])
  334. layer.w13_weight = torch.nn.Parameter(w13_weight,
  335. requires_grad=False)
  336. layer.w2_weight = torch.nn.Parameter(w2_weight,
  337. requires_grad=False)
  338. return
  339. # If checkpoint is fp8, we need to handle that the
  340. # MoE kernels require single activation scale and single weight
  341. # scale for w13 per expert.
  342. else:
  343. # Fp8 moe kernels require a single activation scale.
  344. # We take the max of all the scales in case they differ.
  345. if self.quant_config.activation_scheme == "static":
  346. if (layer.w13_input_scale is None
  347. or layer.w2_input_scale is None):
  348. raise ValueError(
  349. "QuantConfig has static quantization, but found "
  350. "activation scales are None.")
  351. if (not all_close_1d(layer.w13_input_scale)
  352. or not all_close_1d(layer.w2_input_scale)):
  353. print_warning_once(
  354. "Found input_scales that are not equal for "
  355. "fp8 MoE layer. Using the maximum across experts "
  356. "for each layer. ")
  357. layer.w13_input_scale = torch.nn.Parameter(
  358. layer.w13_input_scale.max(), requires_grad=False)
  359. layer.w2_input_scale = torch.nn.Parameter(
  360. layer.w2_input_scale.max(), requires_grad=False)
  361. # If rocm, normalize the weights and scales to e4m3fnuz
  362. if is_hip():
  363. # Normalize the weights and scales
  364. w13_weight, w13_weight_scale, w13_input_scale = \
  365. normalize_e4m3fn_to_e4m3fnuz(
  366. layer.w13_weight, layer.w13_weight_scale,
  367. layer.w13_input_scale)
  368. w2_weight, w2_weight_scale, w2_input_scale = \
  369. normalize_e4m3fn_to_e4m3fnuz(
  370. layer.w2_weight, layer.w2_weight_scale,
  371. layer.w2_input_scale)
  372. # Reset the parameter
  373. layer.w13_weight = torch.nn.Parameter(w13_weight,
  374. requires_grad=False)
  375. layer.w13_weight_scale = torch.nn.Parameter(
  376. w13_weight_scale, requires_grad=False)
  377. if w13_input_scale is not None:
  378. layer.w13_input_scale = torch.nn.Parameter(
  379. w13_input_scale, requires_grad=False)
  380. layer.w2_weight = torch.nn.Parameter(w2_weight,
  381. requires_grad=False)
  382. layer.w2_weight_scale = torch.nn.Parameter(w2_weight_scale,
  383. requires_grad=False)
  384. if w2_input_scale is not None:
  385. layer.w2_input_scale = torch.nn.Parameter(
  386. w2_input_scale, requires_grad=False)
  387. # Fp8 moe kernel needs single weight scale for w13 per expert.
  388. # We take the max then dequant and requant each expert.
  389. assert layer.w13_weight_scale is not None
  390. shard_size = layer.intermediate_size_per_partition
  391. max_w13_scales = layer.w13_weight_scale.max(dim=1).values
  392. for expert_id in range(layer.num_experts):
  393. start = 0
  394. for shard_id in range(2):
  395. dq_weight = per_tensor_dequantize(
  396. layer.w13_weight[expert_id][start:start +
  397. shard_size, :],
  398. layer.w13_weight_scale[expert_id][shard_id])
  399. layer.w13_weight[expert_id][
  400. start:start + shard_size, :], _ = ops.scaled_fp8_quant(
  401. dq_weight, max_w13_scales[expert_id])
  402. start += shard_size
  403. layer.w13_weight_scale = torch.nn.Parameter(max_w13_scales,
  404. requires_grad=False)
  405. return
  406. def apply(
  407. self,
  408. layer: torch.nn.Module,
  409. x: torch.Tensor,
  410. router_logits: torch.Tensor,
  411. top_k: int,
  412. renormalize: bool,
  413. use_grouped_topk: bool,
  414. topk_group: Optional[int] = None,
  415. num_expert_group: Optional[int] = None,
  416. custom_routing_function: Optional[Callable] = None,
  417. ) -> torch.Tensor:
  418. from aphrodite.modeling.layers.fused_moe import fused_experts
  419. topk_weights, topk_ids = FusedMoE.select_experts(
  420. hidden_states=x,
  421. router_logits=router_logits,
  422. use_grouped_topk=use_grouped_topk,
  423. top_k=top_k,
  424. renormalize=renormalize,
  425. topk_group=topk_group,
  426. num_expert_group=num_expert_group,
  427. custom_routing_function=custom_routing_function)
  428. return fused_experts(x,
  429. layer.w13_weight,
  430. layer.w2_weight,
  431. topk_weights=topk_weights,
  432. topk_ids=topk_ids,
  433. inplace=True,
  434. use_fp8_w8a8=True,
  435. w1_scale=layer.w13_weight_scale,
  436. w2_scale=layer.w2_weight_scale,
  437. a1_scale=layer.w13_input_scale,
  438. a2_scale=layer.w2_input_scale)
  439. class Fp8KVCacheMethod(BaseKVCacheMethod):
  440. """
  441. Supports loading kv-cache scaling factors from FP8 checkpoints.
  442. """
  443. def __init__(self, quant_config: Fp8Config):
  444. super().__init__(quant_config)