test_marlin_gemm.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503
  1. """Tests for the marlin kernel.
  2. Run `pytest tests/kernels/marlin/test_marlin_gemm.py`.
  3. """
  4. import pytest
  5. import torch
  6. from aphrodite import _custom_ops as ops
  7. from aphrodite.quantization.gptq_marlin_24 import (
  8. GPTQ_MARLIN_24_MAX_PARALLEL, GPTQ_MARLIN_24_MIN_THREAD_N,
  9. GPTQ_MARLIN_24_SUPPORTED_GROUP_SIZES, GPTQ_MARLIN_24_SUPPORTED_QUANT_TYPES)
  10. from aphrodite.quantization.qqq import (MARLIN_QQQ_MAX_PARALLEL,
  11. MARLIN_QQQ_MIN_THREAD_N,
  12. MARLIN_QQQ_SUPPORTED_GROUP_SIZES,
  13. MARLIN_QQQ_SUPPORTED_NUM_BITS)
  14. from aphrodite.quantization.utils.marlin_utils import (
  15. GPTQ_MARLIN_MAX_PARALLEL, GPTQ_MARLIN_MIN_THREAD_N,
  16. MARLIN_SUPPORTED_GROUP_SIZES, marlin_make_empty_g_idx,
  17. marlin_permute_scales, query_marlin_supported_quant_types)
  18. from aphrodite.quantization.utils.marlin_utils_fp8 import pack_fp8_to_int32
  19. from aphrodite.quantization.utils.marlin_utils_test import (
  20. MarlinWorkspace, awq_marlin_quantize, get_weight_perm, marlin_quantize,
  21. marlin_weights)
  22. from aphrodite.quantization.utils.marlin_utils_test_24 import (
  23. marlin_24_quantize)
  24. from aphrodite.quantization.utils.marlin_utils_test_qqq import ( # noqa: E501
  25. marlin_qqq_quantize)
  26. from aphrodite.quantization.utils.quant_utils import (awq_pack, gptq_pack,
  27. gptq_quantize_weights,
  28. quantize_weights,
  29. sort_weights)
  30. from tests.kernels.utils import DEFAULT_OPCHECK_TEST_UTILS, opcheck
  31. from tests.quantization.utils import is_quant_method_supported
  32. ACT_ORDER_OPTS = [False, True]
  33. K_FULL_OPTS = [False, True]
  34. USE_FP32_REDUCE_OPTS = [False, True]
  35. MARLIN_K_CHUNKS = [128]
  36. MARLIN_N_CHUNKS = [64, 128, 256]
  37. MARLIN_24_K_CHUNKS = [128]
  38. MARLIN_24_N_CHUNKS = [512]
  39. MNK_FACTORS = [
  40. (1, 1, 1),
  41. (1, 4, 8),
  42. (1, 7, 5),
  43. (13, 17, 67),
  44. (26, 37, 13),
  45. (67, 13, 11),
  46. ]
  47. DTYPES = [torch.float16, torch.bfloat16]
  48. def compute_max_diff(output, output_ref):
  49. return torch.mean(torch.abs(output - output_ref)) / torch.mean(
  50. torch.abs(output_ref))
  51. def rand_data(shape, dtype=torch.float16):
  52. return torch.randn(shape, dtype=dtype, device="cuda")
  53. @pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"),
  54. reason="Marlin is not supported on this GPU type.")
  55. @pytest.mark.parametrize("k_chunk", MARLIN_K_CHUNKS)
  56. @pytest.mark.parametrize("n_chunk", MARLIN_N_CHUNKS)
  57. @pytest.mark.parametrize("quant_type",
  58. query_marlin_supported_quant_types(False))
  59. @pytest.mark.parametrize("group_size", MARLIN_SUPPORTED_GROUP_SIZES)
  60. @pytest.mark.parametrize("act_order", ACT_ORDER_OPTS)
  61. @pytest.mark.parametrize("mnk_factors", MNK_FACTORS)
  62. def test_gptq_marlin_repack(k_chunk, n_chunk, quant_type, group_size,
  63. act_order, mnk_factors):
  64. m_factor, n_factor, k_factor = mnk_factors
  65. size_k = k_chunk * k_factor
  66. size_n = n_chunk * n_factor
  67. # Filter act_order
  68. if act_order:
  69. if group_size == -1:
  70. return
  71. if group_size == size_k:
  72. return
  73. # Normalize group_size
  74. if group_size == -1:
  75. group_size = size_k
  76. assert group_size <= size_k
  77. # Create input
  78. b_weight = rand_data((size_k, size_n))
  79. # Quantize (and apply act_order if provided)
  80. w_ref, q_w, s, g_idx, rand_perm = gptq_quantize_weights(
  81. b_weight, quant_type, group_size, act_order)
  82. # Pack to GPTQ format
  83. q_w_gptq = gptq_pack(q_w, quant_type.size_bits, size_k, size_n)
  84. # For act_order, sort the "weights" and "g_idx" so that group ids are
  85. # increasing
  86. sort_indices = torch.empty(0, dtype=torch.int, device=b_weight.device)
  87. if act_order:
  88. q_w, g_idx, sort_indices = sort_weights(q_w, g_idx)
  89. # Pack to Marlin format
  90. weight_perm = get_weight_perm(quant_type.size_bits)
  91. marlin_q_w_1 = marlin_weights(q_w, size_k, size_n, quant_type.size_bits,
  92. weight_perm)
  93. opcheck(torch.ops._C.gptq_marlin_repack,
  94. (q_w_gptq, sort_indices, size_k, size_n, quant_type.size_bits))
  95. # Run Marlin repack GPU kernel
  96. marlin_q_w_2 = ops.gptq_marlin_repack(
  97. q_w_gptq,
  98. sort_indices,
  99. size_k,
  100. size_n,
  101. quant_type.size_bits,
  102. )
  103. torch.cuda.synchronize()
  104. torch.testing.assert_close(marlin_q_w_1, marlin_q_w_2)
  105. @pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"),
  106. reason="Marlin is not supported on this GPU type.")
  107. @pytest.mark.parametrize("k_chunk", MARLIN_K_CHUNKS)
  108. @pytest.mark.parametrize("n_chunk", MARLIN_N_CHUNKS)
  109. @pytest.mark.parametrize("quant_type",
  110. query_marlin_supported_quant_types(False))
  111. @pytest.mark.parametrize("group_size", MARLIN_SUPPORTED_GROUP_SIZES)
  112. @pytest.mark.parametrize("mnk_factors", MNK_FACTORS)
  113. def test_awq_marlin_repack(k_chunk, n_chunk, quant_type, group_size,
  114. mnk_factors):
  115. m_factor, n_factor, k_factor = mnk_factors
  116. size_k = k_chunk * k_factor
  117. size_n = n_chunk * n_factor
  118. # Normalize group_size
  119. if group_size == -1:
  120. group_size = size_k
  121. assert group_size <= size_k
  122. # Create input
  123. b_weight = rand_data((size_k, size_n))
  124. # Quantize
  125. w_ref, q_w, s, zp = quantize_weights(b_weight,
  126. quant_type,
  127. group_size,
  128. zero_points=True)
  129. # Pack to AWQ format
  130. q_w_awq = awq_pack(q_w, quant_type.size_bits, size_k, size_n)
  131. # Pack to Marlin format
  132. weight_perm = get_weight_perm(quant_type.size_bits)
  133. marlin_q_w_1 = marlin_weights(q_w, size_k, size_n, quant_type.size_bits,
  134. weight_perm)
  135. opcheck(torch.ops._C.awq_marlin_repack,
  136. (q_w_awq, size_k, size_n, quant_type.size_bits))
  137. # Run Marlin repack GPU kernel
  138. marlin_q_w_2 = ops.awq_marlin_repack(
  139. q_w_awq,
  140. size_k,
  141. size_n,
  142. quant_type.size_bits,
  143. )
  144. torch.cuda.synchronize()
  145. torch.testing.assert_close(marlin_q_w_1, marlin_q_w_2)
  146. @pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"),
  147. reason="Marlin is not supported on this GPU type.")
  148. @pytest.mark.parametrize("k_chunk", MARLIN_K_CHUNKS)
  149. @pytest.mark.parametrize("n_chunk", MARLIN_N_CHUNKS)
  150. @pytest.mark.parametrize("quant_type",
  151. query_marlin_supported_quant_types(False))
  152. @pytest.mark.parametrize("group_size", MARLIN_SUPPORTED_GROUP_SIZES)
  153. @pytest.mark.parametrize("mnk_factors", MNK_FACTORS)
  154. @pytest.mark.parametrize("act_order", ACT_ORDER_OPTS)
  155. @pytest.mark.parametrize("is_k_full", K_FULL_OPTS)
  156. @pytest.mark.parametrize("use_fp32_reduce", USE_FP32_REDUCE_OPTS)
  157. def test_gptq_marlin_gemm(
  158. k_chunk,
  159. n_chunk,
  160. quant_type,
  161. group_size,
  162. mnk_factors,
  163. act_order,
  164. is_k_full,
  165. use_fp32_reduce,
  166. ):
  167. m_factor, n_factor, k_factor = mnk_factors
  168. size_m = m_factor
  169. size_k = k_chunk * k_factor
  170. size_n = n_chunk * n_factor
  171. if act_order:
  172. if group_size == -1:
  173. return
  174. if group_size == size_k:
  175. return
  176. a_input = rand_data((size_m, size_k))
  177. b_weight = rand_data((size_k, size_n))
  178. w_ref, marlin_q_w, marlin_s, g_idx, sort_indices, _ = marlin_quantize(
  179. b_weight, quant_type, group_size, act_order)
  180. marlin_zp = marlin_make_empty_g_idx(marlin_s.device)
  181. workspace = MarlinWorkspace(size_n, GPTQ_MARLIN_MIN_THREAD_N,
  182. GPTQ_MARLIN_MAX_PARALLEL)
  183. opcheck(
  184. torch.ops._C.gptq_marlin_gemm,
  185. (a_input, marlin_q_w, marlin_s, marlin_zp, g_idx, sort_indices,
  186. workspace.scratch, quant_type, a_input.shape[0], b_weight.shape[1],
  187. a_input.shape[1], is_k_full, False, use_fp32_reduce),
  188. test_utils=DEFAULT_OPCHECK_TEST_UTILS)
  189. output = ops.gptq_marlin_gemm(
  190. a_input,
  191. marlin_q_w,
  192. marlin_s,
  193. marlin_zp,
  194. g_idx,
  195. sort_indices,
  196. workspace.scratch,
  197. quant_type,
  198. a_input.shape[0],
  199. b_weight.shape[1],
  200. a_input.shape[1],
  201. is_k_full=is_k_full,
  202. has_zp=False,
  203. use_fp32_reduce=use_fp32_reduce,
  204. )
  205. output_ref = torch.matmul(a_input, w_ref)
  206. torch.cuda.synchronize()
  207. max_diff = compute_max_diff(output, output_ref)
  208. assert max_diff < 0.04
  209. @pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"),
  210. reason="Marlin is not supported on this GPU type.")
  211. @pytest.mark.parametrize("k_chunk", MARLIN_24_K_CHUNKS)
  212. @pytest.mark.parametrize("n_chunk", MARLIN_24_N_CHUNKS)
  213. @pytest.mark.parametrize("quant_type", GPTQ_MARLIN_24_SUPPORTED_QUANT_TYPES)
  214. @pytest.mark.parametrize("group_size", GPTQ_MARLIN_24_SUPPORTED_GROUP_SIZES)
  215. @pytest.mark.parametrize("mnk_factors", MNK_FACTORS)
  216. def test_gptq_marlin_24_gemm(k_chunk, n_chunk, quant_type, group_size,
  217. mnk_factors):
  218. m_factor, n_factor, k_factor = mnk_factors
  219. size_m = m_factor
  220. size_k = k_chunk * k_factor
  221. size_n = n_chunk * n_factor
  222. a_input = rand_data((size_m, size_k))
  223. b_weight = rand_data((size_k, size_n))
  224. (w_24_ref, marlin_24_q_w_comp, marlin_24_meta,
  225. marlin_24_s) = marlin_24_quantize(b_weight, quant_type, group_size)
  226. workspace_24 = MarlinWorkspace(size_n, GPTQ_MARLIN_24_MIN_THREAD_N,
  227. GPTQ_MARLIN_24_MAX_PARALLEL)
  228. output_ref = torch.matmul(a_input, w_24_ref)
  229. opcheck(torch.ops._C.gptq_marlin_24_gemm,
  230. (a_input, marlin_24_q_w_comp, marlin_24_meta, marlin_24_s,
  231. workspace_24.scratch, quant_type, a_input.shape[0],
  232. b_weight.shape[1], a_input.shape[1]),
  233. test_utils=DEFAULT_OPCHECK_TEST_UTILS)
  234. output = ops.gptq_marlin_24_gemm(
  235. a_input,
  236. marlin_24_q_w_comp,
  237. marlin_24_meta,
  238. marlin_24_s,
  239. workspace_24.scratch,
  240. quant_type,
  241. a_input.shape[0],
  242. b_weight.shape[1],
  243. a_input.shape[1],
  244. )
  245. torch.cuda.synchronize()
  246. max_diff = compute_max_diff(output, output_ref)
  247. assert max_diff < 0.04
  248. @pytest.mark.skipif(not is_quant_method_supported("fp8"),
  249. reason="Marlin is not supported on this GPU type.")
  250. @pytest.mark.parametrize("k_chunk", MARLIN_K_CHUNKS)
  251. @pytest.mark.parametrize("n_chunk", MARLIN_N_CHUNKS)
  252. @pytest.mark.parametrize("num_bits", [8])
  253. @pytest.mark.parametrize("group_size", [-1])
  254. @pytest.mark.parametrize("mnk_factors", MNK_FACTORS)
  255. @pytest.mark.parametrize("dtype", DTYPES)
  256. def test_fp8_marlin_gemm(
  257. k_chunk,
  258. n_chunk,
  259. num_bits,
  260. group_size,
  261. mnk_factors,
  262. dtype,
  263. ):
  264. m_factor, n_factor, k_factor = mnk_factors
  265. size_m = m_factor
  266. size_k = k_chunk * k_factor
  267. size_n = n_chunk * n_factor
  268. a_input = rand_data((size_m, size_k), dtype=dtype)
  269. b_weight = rand_data((size_k, size_n), dtype=dtype)
  270. # WEIGHTS
  271. fp8_weight, weight_scale = ops.scaled_fp8_quant(b_weight, scale=None)
  272. # Repack weights to gptq format (packed int32 elements)
  273. packed_gptq_qweight = pack_fp8_to_int32(fp8_weight)
  274. # Repack weights to marlin format
  275. marlin_qweight = ops.gptq_marlin_repack(
  276. b_q_weight=packed_gptq_qweight,
  277. perm=torch.empty(0, dtype=torch.int, device="cuda"),
  278. size_k=size_k,
  279. size_n=size_n,
  280. num_bits=8,
  281. )
  282. # WEIGHT SCALES
  283. # Currently Marlin doesn't support per-tensor scales, so we
  284. # expand it to channelwise
  285. scales = weight_scale.repeat(1, size_n).to(a_input.dtype).to("cuda")
  286. # Permute scales
  287. marlin_scales = marlin_permute_scales(s=scales,
  288. size_k=size_k,
  289. size_n=size_n,
  290. group_size=-1)
  291. workspace = MarlinWorkspace(size_n, GPTQ_MARLIN_MIN_THREAD_N,
  292. GPTQ_MARLIN_MAX_PARALLEL)
  293. opcheck(torch.ops._C.fp8_marlin_gemm,
  294. (a_input, marlin_qweight, marlin_scales, workspace.scratch,
  295. num_bits, a_input.shape[0], b_weight.shape[1], a_input.shape[1]))
  296. output = ops.fp8_marlin_gemm(
  297. a=a_input,
  298. b_q_weight=marlin_qweight,
  299. b_scales=marlin_scales,
  300. workspace=workspace.scratch,
  301. num_bits=num_bits,
  302. size_m=a_input.shape[0],
  303. size_n=b_weight.shape[1],
  304. size_k=a_input.shape[1],
  305. )
  306. output_ref = torch.matmul(a_input, b_weight)
  307. torch.cuda.synchronize()
  308. max_diff = compute_max_diff(output, output_ref)
  309. assert max_diff < 0.04
  310. @pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"),
  311. reason="Marlin is not supported on this GPU type.")
  312. @pytest.mark.parametrize("k_chunk", MARLIN_K_CHUNKS)
  313. @pytest.mark.parametrize("n_chunk", MARLIN_N_CHUNKS)
  314. @pytest.mark.parametrize("quant_type",
  315. query_marlin_supported_quant_types(True))
  316. @pytest.mark.parametrize("group_size", MARLIN_SUPPORTED_GROUP_SIZES)
  317. @pytest.mark.parametrize("mnk_factors", MNK_FACTORS)
  318. @pytest.mark.parametrize("use_fp32_reduce", USE_FP32_REDUCE_OPTS)
  319. def test_awq_marlin_gemm(
  320. k_chunk,
  321. n_chunk,
  322. quant_type,
  323. group_size,
  324. mnk_factors,
  325. use_fp32_reduce,
  326. ):
  327. m_factor, n_factor, k_factor = mnk_factors
  328. size_m = m_factor
  329. size_k = k_chunk * k_factor
  330. size_n = n_chunk * n_factor
  331. a_input = rand_data((size_m, size_k))
  332. b_weight = rand_data((size_k, size_n))
  333. w_ref, marlin_q_w, marlin_s, marlin_zp = awq_marlin_quantize(
  334. b_weight, quant_type, group_size)
  335. g_idx = torch.empty(0, dtype=torch.int, device=marlin_q_w.device)
  336. sort_indices = torch.empty(0, dtype=torch.int, device=marlin_q_w.device)
  337. is_k_full = True
  338. has_zp = True
  339. workspace = MarlinWorkspace(size_n, GPTQ_MARLIN_MIN_THREAD_N,
  340. GPTQ_MARLIN_MAX_PARALLEL)
  341. output = ops.gptq_marlin_gemm(
  342. a_input,
  343. marlin_q_w,
  344. marlin_s,
  345. marlin_zp,
  346. g_idx,
  347. sort_indices,
  348. workspace.scratch,
  349. quant_type,
  350. a_input.shape[0],
  351. b_weight.shape[1],
  352. a_input.shape[1],
  353. is_k_full=is_k_full,
  354. has_zp=has_zp,
  355. use_fp32_reduce=use_fp32_reduce,
  356. )
  357. output_ref = torch.matmul(a_input, w_ref)
  358. torch.cuda.synchronize()
  359. max_diff = compute_max_diff(output, output_ref)
  360. assert max_diff < 0.04
  361. @pytest.mark.skipif(not is_quant_method_supported("qqq"),
  362. reason="Marlin is not supported on this GPU type.")
  363. @pytest.mark.parametrize("k_chunk", MARLIN_K_CHUNKS)
  364. @pytest.mark.parametrize("n_chunk", MARLIN_N_CHUNKS)
  365. @pytest.mark.parametrize("num_bits", MARLIN_QQQ_SUPPORTED_NUM_BITS)
  366. @pytest.mark.parametrize("group_size", MARLIN_QQQ_SUPPORTED_GROUP_SIZES)
  367. @pytest.mark.parametrize("mnk_factors", MNK_FACTORS)
  368. def test_marlin_qqq_gemm(
  369. k_chunk,
  370. n_chunk,
  371. num_bits,
  372. group_size,
  373. mnk_factors,
  374. ):
  375. int8_traits = torch.iinfo(torch.int8)
  376. m_factor, n_factor, k_factor = mnk_factors
  377. size_m = m_factor
  378. size_k = k_chunk * k_factor
  379. size_n = n_chunk * n_factor
  380. a_input = rand_data((size_m, size_k))
  381. b_weight = rand_data((size_k, size_n))
  382. # Quantize activations
  383. s_a = a_input.abs().max(dim=-1, keepdim=True)[0].div(int8_traits.max).to(
  384. torch.float)
  385. q_a = (a_input / s_a).round().clamp(int8_traits.min,
  386. int8_traits.max).to(torch.int8)
  387. # Quantize weights
  388. w_ref, marlin_qqq_q_w, marlin_qqq_s_group, marlin_qqq_s_channel = \
  389. marlin_qqq_quantize(b_weight, num_bits, group_size)
  390. workspace = MarlinWorkspace(size_n, MARLIN_QQQ_MIN_THREAD_N,
  391. MARLIN_QQQ_MAX_PARALLEL)
  392. opcheck(torch.ops._C.marlin_qqq_gemm,
  393. (q_a, marlin_qqq_q_w, s_a, marlin_qqq_s_channel,
  394. marlin_qqq_s_group, workspace.scratch, a_input.shape[0],
  395. b_weight.shape[1], a_input.shape[1]))
  396. output = ops.marlin_qqq_gemm(
  397. q_a,
  398. marlin_qqq_q_w,
  399. s_a,
  400. marlin_qqq_s_channel,
  401. marlin_qqq_s_group,
  402. workspace.scratch,
  403. a_input.shape[0],
  404. b_weight.shape[1],
  405. a_input.shape[1],
  406. )
  407. output_ref = torch.matmul(q_a.half() * s_a.half(), w_ref)
  408. torch.cuda.synchronize()
  409. max_diff = compute_max_diff(output, output_ref)
  410. assert max_diff < 0.04