test_marlin_gemm.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502
  1. """Tests for the marlin kernel.
  2. Run `pytest tests/kernels/marlin/test_marlin_gemm.py`.
  3. """
  4. import pytest
  5. import torch
  6. from aphrodite import _custom_ops as ops
  7. from aphrodite.quantization.gptq_marlin_24 import (
  8. GPTQ_MARLIN_24_MAX_PARALLEL, GPTQ_MARLIN_24_MIN_THREAD_N,
  9. GPTQ_MARLIN_24_SUPPORTED_GROUP_SIZES, GPTQ_MARLIN_24_SUPPORTED_QUANT_TYPES)
  10. from aphrodite.quantization.qqq import (MARLIN_QQQ_MAX_PARALLEL,
  11. MARLIN_QQQ_MIN_THREAD_N,
  12. MARLIN_QQQ_SUPPORTED_GROUP_SIZES,
  13. MARLIN_QQQ_SUPPORTED_NUM_BITS)
  14. from aphrodite.quantization.utils.marlin_utils import (
  15. GPTQ_MARLIN_MAX_PARALLEL, GPTQ_MARLIN_MIN_THREAD_N,
  16. MARLIN_SUPPORTED_GROUP_SIZES, marlin_make_empty_g_idx,
  17. marlin_permute_scales, query_marlin_supported_quant_types)
  18. from aphrodite.quantization.utils.marlin_utils_fp8 import pack_fp8_to_int32
  19. from aphrodite.quantization.utils.marlin_utils_test import (
  20. MarlinWorkspace, awq_marlin_quantize, get_weight_perm, marlin_quantize,
  21. marlin_weights)
  22. from aphrodite.quantization.utils.marlin_utils_test_24 import (
  23. marlin_24_quantize)
  24. from aphrodite.quantization.utils.marlin_utils_test_qqq import ( # noqa: E501
  25. marlin_qqq_quantize)
  26. from aphrodite.quantization.utils.quant_utils import (awq_pack, gptq_pack,
  27. gptq_quantize_weights,
  28. quantize_weights,
  29. sort_weights)
  30. from tests.quantization.utils import is_quant_method_supported
  31. ACT_ORDER_OPTS = [False, True]
  32. K_FULL_OPTS = [False, True]
  33. USE_FP32_REDUCE_OPTS = [False, True]
  34. MARLIN_K_CHUNKS = [128]
  35. MARLIN_N_CHUNKS = [64, 128, 256]
  36. MARLIN_24_K_CHUNKS = [128]
  37. MARLIN_24_N_CHUNKS = [512]
  38. MNK_FACTORS = [
  39. (1, 1, 1),
  40. (1, 4, 8),
  41. (1, 7, 5),
  42. (13, 17, 67),
  43. (26, 37, 13),
  44. (67, 13, 11),
  45. ]
  46. DTYPES = [torch.float16, torch.bfloat16]
  47. def compute_max_diff(output, output_ref):
  48. return torch.mean(torch.abs(output - output_ref)) / torch.mean(
  49. torch.abs(output_ref))
  50. def rand_data(shape, dtype=torch.float16):
  51. return torch.randn(shape, dtype=dtype, device="cuda")
  52. @pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"),
  53. reason="Marlin is not supported on this GPU type.")
  54. @pytest.mark.parametrize("k_chunk", MARLIN_K_CHUNKS)
  55. @pytest.mark.parametrize("n_chunk", MARLIN_N_CHUNKS)
  56. @pytest.mark.parametrize("quant_type",
  57. query_marlin_supported_quant_types(False))
  58. @pytest.mark.parametrize("group_size", MARLIN_SUPPORTED_GROUP_SIZES)
  59. @pytest.mark.parametrize("act_order", ACT_ORDER_OPTS)
  60. @pytest.mark.parametrize("mnk_factors", MNK_FACTORS)
  61. def test_gptq_marlin_repack(k_chunk, n_chunk, quant_type, group_size,
  62. act_order, mnk_factors):
  63. m_factor, n_factor, k_factor = mnk_factors
  64. size_m = m_factor
  65. size_k = k_chunk * k_factor
  66. size_n = n_chunk * n_factor
  67. print(f"MNK = {size_m} {size_n} {size_k}")
  68. # Filter act_order
  69. if act_order:
  70. if group_size == -1:
  71. return
  72. if group_size == size_k:
  73. return
  74. # Normalize group_size
  75. if group_size == -1:
  76. group_size = size_k
  77. assert group_size <= size_k
  78. # Create input
  79. b_weight = rand_data((size_k, size_n))
  80. # Quantize (and apply act_order if provided)
  81. w_ref, q_w, s, g_idx, rand_perm = gptq_quantize_weights(
  82. b_weight, quant_type, group_size, act_order)
  83. # Pack to GPTQ format
  84. q_w_gptq = gptq_pack(q_w, quant_type.size_bits, size_k, size_n)
  85. # For act_order, sort the "weights" and "g_idx" so that group ids are
  86. # increasing
  87. sort_indices = torch.empty(0, dtype=torch.int, device=b_weight.device)
  88. if act_order:
  89. q_w, g_idx, sort_indices = sort_weights(q_w, g_idx)
  90. # Pack to Marlin format
  91. weight_perm = get_weight_perm(quant_type.size_bits)
  92. marlin_q_w_1 = marlin_weights(q_w, size_k, size_n, quant_type.size_bits,
  93. weight_perm)
  94. # Run Marlin repack GPU kernel
  95. marlin_q_w_2 = ops.gptq_marlin_repack(
  96. q_w_gptq,
  97. sort_indices,
  98. size_k,
  99. size_n,
  100. quant_type.size_bits,
  101. )
  102. torch.cuda.synchronize()
  103. torch.testing.assert_close(marlin_q_w_1, marlin_q_w_2)
  104. @pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"),
  105. reason="Marlin is not supported on this GPU type.")
  106. @pytest.mark.parametrize("k_chunk", MARLIN_K_CHUNKS)
  107. @pytest.mark.parametrize("n_chunk", MARLIN_N_CHUNKS)
  108. @pytest.mark.parametrize("quant_type",
  109. query_marlin_supported_quant_types(False))
  110. @pytest.mark.parametrize("group_size", MARLIN_SUPPORTED_GROUP_SIZES)
  111. @pytest.mark.parametrize("mnk_factors", MNK_FACTORS)
  112. def test_awq_marlin_repack(k_chunk, n_chunk, quant_type, group_size,
  113. mnk_factors):
  114. m_factor, n_factor, k_factor = mnk_factors
  115. size_m = m_factor
  116. size_k = k_chunk * k_factor
  117. size_n = n_chunk * n_factor
  118. print(f"MNK = {size_m} {size_n} {size_k}")
  119. # Normalize group_size
  120. if group_size == -1:
  121. group_size = size_k
  122. assert group_size <= size_k
  123. # Create input
  124. b_weight = rand_data((size_k, size_n))
  125. # Quantize
  126. w_ref, q_w, s, zp = quantize_weights(b_weight,
  127. quant_type,
  128. group_size,
  129. zero_points=True)
  130. # Pack to AWQ format
  131. q_w_awq = awq_pack(q_w, quant_type.size_bits, size_k, size_n)
  132. # Pack to Marlin format
  133. weight_perm = get_weight_perm(quant_type.size_bits)
  134. marlin_q_w_1 = marlin_weights(q_w, size_k, size_n, quant_type.size_bits,
  135. weight_perm)
  136. # Run Marlin repack GPU kernel
  137. marlin_q_w_2 = ops.awq_marlin_repack(
  138. q_w_awq,
  139. size_k,
  140. size_n,
  141. quant_type.size_bits,
  142. )
  143. torch.cuda.synchronize()
  144. torch.testing.assert_close(marlin_q_w_1, marlin_q_w_2)
  145. @pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"),
  146. reason="Marlin is not supported on this GPU type.")
  147. @pytest.mark.parametrize("k_chunk", MARLIN_K_CHUNKS)
  148. @pytest.mark.parametrize("n_chunk", MARLIN_N_CHUNKS)
  149. @pytest.mark.parametrize("quant_type",
  150. query_marlin_supported_quant_types(False))
  151. @pytest.mark.parametrize("group_size", MARLIN_SUPPORTED_GROUP_SIZES)
  152. @pytest.mark.parametrize("mnk_factors", MNK_FACTORS)
  153. @pytest.mark.parametrize("act_order", ACT_ORDER_OPTS)
  154. @pytest.mark.parametrize("is_k_full", K_FULL_OPTS)
  155. @pytest.mark.parametrize("use_fp32_reduce", USE_FP32_REDUCE_OPTS)
  156. def test_gptq_marlin_gemm(
  157. k_chunk,
  158. n_chunk,
  159. quant_type,
  160. group_size,
  161. mnk_factors,
  162. act_order,
  163. is_k_full,
  164. use_fp32_reduce,
  165. ):
  166. m_factor, n_factor, k_factor = mnk_factors
  167. size_m = m_factor
  168. size_k = k_chunk * k_factor
  169. size_n = n_chunk * n_factor
  170. print(f"MNK = {size_m} {size_n} {size_k}")
  171. print(f"groupsize = {group_size}")
  172. if act_order:
  173. if group_size == -1:
  174. return
  175. if group_size == size_k:
  176. return
  177. a_input = rand_data((size_m, size_k))
  178. b_weight = rand_data((size_k, size_n))
  179. w_ref, marlin_q_w, marlin_s, g_idx, sort_indices, _ = marlin_quantize(
  180. b_weight, quant_type, group_size, act_order)
  181. marlin_zp = marlin_make_empty_g_idx(marlin_s.device)
  182. workspace = MarlinWorkspace(size_n, GPTQ_MARLIN_MIN_THREAD_N,
  183. GPTQ_MARLIN_MAX_PARALLEL)
  184. output = ops.gptq_marlin_gemm(
  185. a_input,
  186. marlin_q_w,
  187. marlin_s,
  188. marlin_zp,
  189. g_idx,
  190. sort_indices,
  191. workspace.scratch,
  192. quant_type,
  193. a_input.shape[0],
  194. b_weight.shape[1],
  195. a_input.shape[1],
  196. is_k_full=is_k_full,
  197. has_zp=False,
  198. use_fp32_reduce=use_fp32_reduce,
  199. )
  200. output_ref = torch.matmul(a_input, w_ref)
  201. torch.cuda.synchronize()
  202. max_diff = compute_max_diff(output, output_ref)
  203. print("max_diff = {}".format(max_diff))
  204. assert max_diff < 0.04
  205. @pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"),
  206. reason="Marlin is not supported on this GPU type.")
  207. @pytest.mark.parametrize("k_chunk", MARLIN_24_K_CHUNKS)
  208. @pytest.mark.parametrize("n_chunk", MARLIN_24_N_CHUNKS)
  209. @pytest.mark.parametrize("quant_type", GPTQ_MARLIN_24_SUPPORTED_QUANT_TYPES)
  210. @pytest.mark.parametrize("group_size", GPTQ_MARLIN_24_SUPPORTED_GROUP_SIZES)
  211. @pytest.mark.parametrize("mnk_factors", MNK_FACTORS)
  212. def test_gptq_marlin_24_gemm(k_chunk, n_chunk, quant_type, group_size,
  213. mnk_factors):
  214. m_factor, n_factor, k_factor = mnk_factors
  215. size_m = m_factor
  216. size_k = k_chunk * k_factor
  217. size_n = n_chunk * n_factor
  218. print(f"MNK = {size_m} {size_n} {size_k}")
  219. print(f"groupsize = {group_size}")
  220. a_input = rand_data((size_m, size_k))
  221. b_weight = rand_data((size_k, size_n))
  222. (w_24_ref, marlin_24_q_w_comp, marlin_24_meta,
  223. marlin_24_s) = marlin_24_quantize(b_weight, quant_type, group_size)
  224. workspace_24 = MarlinWorkspace(size_n, GPTQ_MARLIN_24_MIN_THREAD_N,
  225. GPTQ_MARLIN_24_MAX_PARALLEL)
  226. output_ref = torch.matmul(a_input, w_24_ref)
  227. output = ops.gptq_marlin_24_gemm(
  228. a_input,
  229. marlin_24_q_w_comp,
  230. marlin_24_meta,
  231. marlin_24_s,
  232. workspace_24.scratch,
  233. quant_type,
  234. a_input.shape[0],
  235. b_weight.shape[1],
  236. a_input.shape[1],
  237. )
  238. torch.cuda.synchronize()
  239. max_diff = compute_max_diff(output, output_ref)
  240. print("max_diff = {}".format(max_diff))
  241. assert max_diff < 0.04
  242. @pytest.mark.skipif(not is_quant_method_supported("fp8"),
  243. reason="Marlin is not supported on this GPU type.")
  244. @pytest.mark.parametrize("k_chunk", MARLIN_K_CHUNKS)
  245. @pytest.mark.parametrize("n_chunk", MARLIN_N_CHUNKS)
  246. @pytest.mark.parametrize("num_bits", [8])
  247. @pytest.mark.parametrize("group_size", [-1])
  248. @pytest.mark.parametrize("mnk_factors", MNK_FACTORS)
  249. @pytest.mark.parametrize("dtype", DTYPES)
  250. def test_fp8_marlin_gemm(
  251. k_chunk,
  252. n_chunk,
  253. num_bits,
  254. group_size,
  255. mnk_factors,
  256. dtype,
  257. ):
  258. m_factor, n_factor, k_factor = mnk_factors
  259. size_m = m_factor
  260. size_k = k_chunk * k_factor
  261. size_n = n_chunk * n_factor
  262. print(f"MNK = {size_m} {size_n} {size_k}")
  263. print(f"groupsize = {group_size}")
  264. a_input = rand_data((size_m, size_k), dtype=dtype)
  265. b_weight = rand_data((size_k, size_n), dtype=dtype)
  266. # WEIGHTS
  267. fp8_weight, weight_scale = ops.scaled_fp8_quant(b_weight, scale=None)
  268. # Repack weights to gptq format (packed int32 elements)
  269. packed_gptq_qweight = pack_fp8_to_int32(fp8_weight)
  270. # Repack weights to marlin format
  271. marlin_qweight = ops.gptq_marlin_repack(
  272. b_q_weight=packed_gptq_qweight,
  273. perm=torch.empty(0, dtype=torch.int, device="cuda"),
  274. size_k=size_k,
  275. size_n=size_n,
  276. num_bits=8,
  277. )
  278. # WEIGHT SCALES
  279. # Currently Marlin doesn't support per-tensor scales, so we
  280. # expand it to channelwise
  281. scales = weight_scale.repeat(1, size_n).to(a_input.dtype).to("cuda")
  282. # Permute scales
  283. marlin_scales = marlin_permute_scales(s=scales,
  284. size_k=size_k,
  285. size_n=size_n,
  286. group_size=-1)
  287. workspace = MarlinWorkspace(size_n, GPTQ_MARLIN_MIN_THREAD_N,
  288. GPTQ_MARLIN_MAX_PARALLEL)
  289. output = ops.fp8_marlin_gemm(
  290. a=a_input,
  291. b_q_weight=marlin_qweight,
  292. b_scales=marlin_scales,
  293. workspace=workspace.scratch,
  294. num_bits=num_bits,
  295. size_m=a_input.shape[0],
  296. size_n=b_weight.shape[1],
  297. size_k=a_input.shape[1],
  298. )
  299. output_ref = torch.matmul(a_input, b_weight)
  300. torch.cuda.synchronize()
  301. max_diff = compute_max_diff(output, output_ref)
  302. print("max_diff = {}".format(max_diff))
  303. assert max_diff < 0.04
  304. @pytest.mark.skipif(not is_quant_method_supported("gptq_marlin"),
  305. reason="Marlin is not supported on this GPU type.")
  306. @pytest.mark.parametrize("k_chunk", MARLIN_K_CHUNKS)
  307. @pytest.mark.parametrize("n_chunk", MARLIN_N_CHUNKS)
  308. @pytest.mark.parametrize("quant_type",
  309. query_marlin_supported_quant_types(True))
  310. @pytest.mark.parametrize("group_size", MARLIN_SUPPORTED_GROUP_SIZES)
  311. @pytest.mark.parametrize("mnk_factors", MNK_FACTORS)
  312. @pytest.mark.parametrize("use_fp32_reduce", USE_FP32_REDUCE_OPTS)
  313. def test_awq_marlin_gemm(
  314. k_chunk,
  315. n_chunk,
  316. quant_type,
  317. group_size,
  318. mnk_factors,
  319. use_fp32_reduce,
  320. ):
  321. m_factor, n_factor, k_factor = mnk_factors
  322. size_m = m_factor
  323. size_k = k_chunk * k_factor
  324. size_n = n_chunk * n_factor
  325. print(f"MNK = {size_m} {size_n} {size_k}")
  326. print(f"groupsize = {group_size}")
  327. a_input = rand_data((size_m, size_k))
  328. b_weight = rand_data((size_k, size_n))
  329. w_ref, marlin_q_w, marlin_s, marlin_zp = awq_marlin_quantize(
  330. b_weight, quant_type, group_size)
  331. g_idx = torch.empty(0, dtype=torch.int, device=marlin_q_w.device)
  332. sort_indices = torch.empty(0, dtype=torch.int, device=marlin_q_w.device)
  333. is_k_full = True
  334. has_zp = True
  335. workspace = MarlinWorkspace(size_n, GPTQ_MARLIN_MIN_THREAD_N,
  336. GPTQ_MARLIN_MAX_PARALLEL)
  337. output = ops.gptq_marlin_gemm(
  338. a_input,
  339. marlin_q_w,
  340. marlin_s,
  341. marlin_zp,
  342. g_idx,
  343. sort_indices,
  344. workspace.scratch,
  345. quant_type,
  346. a_input.shape[0],
  347. b_weight.shape[1],
  348. a_input.shape[1],
  349. is_k_full=is_k_full,
  350. has_zp=has_zp,
  351. use_fp32_reduce=use_fp32_reduce,
  352. )
  353. output_ref = torch.matmul(a_input, w_ref)
  354. torch.cuda.synchronize()
  355. max_diff = compute_max_diff(output, output_ref)
  356. print("max_diff = {}".format(max_diff))
  357. assert max_diff < 0.04
  358. @pytest.mark.skipif(not is_quant_method_supported("qqq"),
  359. reason="Marlin is not supported on this GPU type.")
  360. @pytest.mark.parametrize("k_chunk", MARLIN_K_CHUNKS)
  361. @pytest.mark.parametrize("n_chunk", MARLIN_N_CHUNKS)
  362. @pytest.mark.parametrize("num_bits", MARLIN_QQQ_SUPPORTED_NUM_BITS)
  363. @pytest.mark.parametrize("group_size", MARLIN_QQQ_SUPPORTED_GROUP_SIZES)
  364. @pytest.mark.parametrize("mnk_factors", MNK_FACTORS)
  365. def test_marlin_qqq_gemm(
  366. k_chunk,
  367. n_chunk,
  368. num_bits,
  369. group_size,
  370. mnk_factors,
  371. ):
  372. int8_traits = torch.iinfo(torch.int8)
  373. m_factor, n_factor, k_factor = mnk_factors
  374. size_m = m_factor
  375. size_k = k_chunk * k_factor
  376. size_n = n_chunk * n_factor
  377. print(f"MNK = {size_m} {size_n} {size_k}")
  378. print(f"groupsize = {group_size}")
  379. a_input = rand_data((size_m, size_k))
  380. b_weight = rand_data((size_k, size_n))
  381. # Quantize activations
  382. s_a = a_input.abs().max(dim=-1, keepdim=True)[0].div(int8_traits.max).to(
  383. torch.float)
  384. q_a = (a_input / s_a).round().clamp(int8_traits.min,
  385. int8_traits.max).to(torch.int8)
  386. # Quantize weights
  387. w_ref, marlin_qqq_q_w, marlin_qqq_s_group, marlin_qqq_s_channel = \
  388. marlin_qqq_quantize(b_weight, num_bits, group_size)
  389. workspace = MarlinWorkspace(size_n, MARLIN_QQQ_MIN_THREAD_N,
  390. MARLIN_QQQ_MAX_PARALLEL)
  391. output = ops.marlin_qqq_gemm(
  392. q_a,
  393. marlin_qqq_q_w,
  394. s_a,
  395. marlin_qqq_s_channel,
  396. marlin_qqq_s_group,
  397. workspace.scratch,
  398. a_input.shape[0],
  399. b_weight.shape[1],
  400. a_input.shape[1],
  401. )
  402. output_ref = torch.matmul(q_a.half() * s_a.half(), w_ref)
  403. torch.cuda.synchronize()
  404. max_diff = compute_max_diff(output, output_ref)
  405. print("max_diff = {}".format(max_diff))
  406. assert max_diff < 0.04