test_flashinfer.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467
  1. from typing import List, Optional, Tuple
  2. import flashinfer
  3. import pytest
  4. import torch
  5. NUM_HEADS = [(16, 16), (32, 8), (64, 8), (6, 1)]
  6. HEAD_SIZES = [128, 256]
  7. BLOCK_SIZES = [16, 32]
  8. DTYPES = [torch.float16, torch.bfloat16]
  9. NUM_BLOCKS = 32768 # Large enough to test overflow in index calculation.
  10. def ref_paged_attn(
  11. query: torch.Tensor,
  12. key_cache: torch.Tensor,
  13. value_cache: torch.Tensor,
  14. query_lens: List[int],
  15. kv_lens: List[int],
  16. block_tables: torch.Tensor,
  17. scale: float,
  18. sliding_window: Optional[int] = None,
  19. soft_cap: Optional[float] = None,
  20. ) -> torch.Tensor:
  21. num_seqs = len(query_lens)
  22. block_tables = block_tables.cpu().numpy()
  23. _, block_size, num_kv_heads, head_size = key_cache.shape
  24. outputs: List[torch.Tensor] = []
  25. start_idx = 0
  26. for i in range(num_seqs):
  27. query_len = query_lens[i]
  28. kv_len = kv_lens[i]
  29. q = query[start_idx:start_idx + query_len]
  30. q *= scale
  31. num_kv_blocks = (kv_len + block_size - 1) // block_size
  32. block_indices = block_tables[i, :num_kv_blocks]
  33. k = key_cache[block_indices].view(-1, num_kv_heads, head_size)
  34. k = k[:kv_len]
  35. v = value_cache[block_indices].view(-1, num_kv_heads, head_size)
  36. v = v[:kv_len]
  37. if q.shape[1] != k.shape[1]:
  38. k = torch.repeat_interleave(k, q.shape[1] // k.shape[1], dim=1)
  39. v = torch.repeat_interleave(v, q.shape[1] // v.shape[1], dim=1)
  40. attn = torch.einsum("qhd,khd->hqk", q, k).float()
  41. empty_mask = torch.ones(query_len, kv_len)
  42. mask = torch.triu(empty_mask, diagonal=kv_len - query_len + 1).bool()
  43. if sliding_window is not None:
  44. sliding_window_mask = torch.triu(empty_mask,
  45. diagonal=kv_len -
  46. (query_len + sliding_window) +
  47. 1).bool().logical_not()
  48. mask |= sliding_window_mask
  49. if soft_cap is not None:
  50. attn = soft_cap * torch.tanh(attn / soft_cap)
  51. attn.masked_fill_(mask, float("-inf"))
  52. attn = torch.softmax(attn, dim=-1).to(v.dtype)
  53. out = torch.einsum("hqk,khd->qhd", attn, v)
  54. outputs.append(out)
  55. start_idx += query_len
  56. return torch.cat(outputs, dim=0)
  57. @pytest.mark.parametrize("kv_lens", [[1328, 18, 463], [1, 54, 293, 70]])
  58. @pytest.mark.parametrize("num_heads", NUM_HEADS)
  59. @pytest.mark.parametrize("head_size", HEAD_SIZES)
  60. @pytest.mark.parametrize("block_size", BLOCK_SIZES)
  61. @pytest.mark.parametrize("dtype", DTYPES)
  62. @pytest.mark.parametrize("soft_cap", [None, 30.0, 50.0])
  63. @torch.inference_mode
  64. def test_flashinfer_decode_with_paged_kv(
  65. kv_lens: List[int],
  66. num_heads: Tuple[int, int],
  67. head_size: int,
  68. dtype: torch.dtype,
  69. block_size: int,
  70. soft_cap: Optional[float],
  71. ) -> None:
  72. torch.set_default_device("cuda")
  73. torch.cuda.manual_seed_all(0)
  74. num_seqs = len(kv_lens)
  75. num_query_heads = num_heads[0]
  76. num_kv_heads = num_heads[1]
  77. assert num_query_heads % num_kv_heads == 0
  78. max_kv_len = max(kv_lens)
  79. scale = head_size**-0.5
  80. query = torch.randn(num_seqs, num_query_heads, head_size, dtype=dtype)
  81. key_value_cache = torch.randn(NUM_BLOCKS,
  82. 2,
  83. block_size,
  84. num_kv_heads,
  85. head_size,
  86. dtype=dtype)
  87. key_cache = key_value_cache[:, 0, :, :, :].squeeze(1)
  88. value_cache = key_value_cache[:, 1, :, :, :].squeeze(1)
  89. max_num_blocks_per_seq = (max_kv_len + block_size - 1) // block_size
  90. block_tables = torch.randint(0,
  91. NUM_BLOCKS,
  92. (num_seqs, max_num_blocks_per_seq),
  93. dtype=torch.int32)
  94. kv_indptr = [0]
  95. kv_indices = []
  96. kv_last_page_lens = []
  97. for i in range(num_seqs):
  98. seq_len = kv_lens[i]
  99. assert seq_len > 0
  100. num_blocks = (seq_len + block_size - 1) // block_size
  101. kv_indices.extend(block_tables[i, :num_blocks])
  102. kv_indptr.append(kv_indptr[-1] + num_blocks)
  103. kv_last_page_len = seq_len % block_size
  104. if kv_last_page_len == 0:
  105. kv_last_page_len = block_size
  106. kv_last_page_lens.append(kv_last_page_len)
  107. kv_indptr = torch.tensor(kv_indptr, dtype=torch.int32)
  108. kv_indices = torch.tensor(kv_indices, dtype=torch.int32)
  109. kv_last_page_lens = torch.tensor(kv_last_page_lens, dtype=torch.int32)
  110. workspace_buffer = torch.empty(128 * 1024 * 1024, dtype=torch.int8)
  111. wrapper = flashinfer.\
  112. BatchDecodeWithPagedKVCacheWrapper(workspace_buffer, "NHD",
  113. use_tensor_cores=(
  114. (num_query_heads//num_kv_heads) > 4)
  115. )
  116. wrapper.begin_forward(kv_indptr,
  117. kv_indices,
  118. kv_last_page_lens,
  119. num_query_heads,
  120. num_kv_heads,
  121. head_size,
  122. block_size,
  123. "NONE",
  124. data_type=dtype)
  125. output = wrapper.forward(query, key_value_cache, logits_soft_cap=soft_cap)
  126. ref_output = ref_paged_attn(query=query,
  127. key_cache=key_cache,
  128. value_cache=value_cache,
  129. query_lens=[1] * num_seqs,
  130. kv_lens=kv_lens,
  131. block_tables=block_tables,
  132. scale=scale,
  133. soft_cap=soft_cap)
  134. torch.testing.assert_close(output, ref_output, atol=1e-2, rtol=1e-2), \
  135. f"{torch.max(torch.abs(output - ref_output))}"
  136. @pytest.mark.parametrize("seq_lens", [[(1, 1328), (5, 18), (129, 463)]])
  137. @pytest.mark.parametrize("num_heads", NUM_HEADS)
  138. @pytest.mark.parametrize("head_size", HEAD_SIZES)
  139. @pytest.mark.parametrize("block_size", BLOCK_SIZES)
  140. @pytest.mark.parametrize("dtype", DTYPES)
  141. @pytest.mark.parametrize("soft_cap", [None, 30.0, 50.0])
  142. @torch.inference_mode
  143. def test_flashinfer_prefill_with_paged_kv(seq_lens: List[Tuple[int, int]],
  144. num_heads: Tuple[int, int],
  145. head_size: int, dtype: torch.dtype,
  146. block_size: int,
  147. soft_cap: Optional[float]) -> None:
  148. torch.set_default_device("cuda")
  149. torch.cuda.manual_seed_all(0)
  150. num_seqs = len(seq_lens)
  151. query_lens = [x[0] for x in seq_lens]
  152. kv_lens = [x[1] for x in seq_lens]
  153. num_query_heads = num_heads[0]
  154. num_kv_heads = num_heads[1]
  155. assert num_query_heads % num_kv_heads == 0
  156. max_kv_len = max(kv_lens)
  157. scale = head_size**-0.5
  158. query = torch.randn(sum(query_lens),
  159. num_query_heads,
  160. head_size,
  161. dtype=dtype)
  162. key_value_cache = torch.randn(NUM_BLOCKS,
  163. 2,
  164. block_size,
  165. num_kv_heads,
  166. head_size,
  167. dtype=dtype)
  168. key_cache = key_value_cache[:, 0, :, :, :].squeeze(1)
  169. value_cache = key_value_cache[:, 1, :, :, :].squeeze(1)
  170. # Normalize the scale of the key and value caches to mitigate
  171. # numerical instability.
  172. key_cache /= head_size**0.5
  173. value_cache /= head_size**0.5
  174. max_num_blocks_per_seq = (max_kv_len + block_size - 1) // block_size
  175. block_tables = torch.randint(0,
  176. NUM_BLOCKS,
  177. (num_seqs, max_num_blocks_per_seq),
  178. dtype=torch.int32)
  179. qo_indptr = [0]
  180. kv_indptr = [0]
  181. kv_indices = []
  182. kv_last_page_lens = []
  183. for i in range(num_seqs):
  184. seq_len = kv_lens[i]
  185. assert seq_len > 0
  186. num_blocks = (seq_len + block_size - 1) // block_size
  187. kv_indices.extend(block_tables[i, :num_blocks])
  188. kv_indptr.append(kv_indptr[-1] + num_blocks)
  189. kv_last_page_len = seq_len % block_size
  190. if kv_last_page_len == 0:
  191. kv_last_page_len = block_size
  192. kv_last_page_lens.append(kv_last_page_len)
  193. qo_indptr.append(qo_indptr[-1] + query_lens[i])
  194. qo_indptr = torch.tensor(qo_indptr, dtype=torch.int32)
  195. kv_indptr = torch.tensor(kv_indptr, dtype=torch.int32)
  196. kv_indices = torch.tensor(kv_indices, dtype=torch.int32)
  197. kv_last_page_lens = torch.tensor(kv_last_page_lens, dtype=torch.int32)
  198. workspace_buffer = torch.empty(128 * 1024 * 1024, dtype=torch.int8)
  199. wrapper = flashinfer.BatchPrefillWithPagedKVCacheWrapper(
  200. workspace_buffer, "NHD")
  201. wrapper.begin_forward(
  202. qo_indptr,
  203. kv_indptr,
  204. kv_indices,
  205. kv_last_page_lens,
  206. num_query_heads,
  207. num_kv_heads,
  208. head_size,
  209. block_size,
  210. )
  211. output = wrapper.forward(
  212. query,
  213. key_value_cache,
  214. logits_soft_cap=soft_cap,
  215. )
  216. ref_output = ref_paged_attn(query=query,
  217. key_cache=key_cache,
  218. value_cache=value_cache,
  219. query_lens=query_lens,
  220. kv_lens=kv_lens,
  221. block_tables=block_tables,
  222. scale=scale,
  223. soft_cap=soft_cap)
  224. torch.testing.assert_close(output, ref_output, atol=1e-2, rtol=1e-2), \
  225. f"{torch.max(torch.abs(output - ref_output))}"
  226. @pytest.mark.parametrize("seq_lens", [[(1, 132), (5, 18)]])
  227. @pytest.mark.parametrize("num_heads", [(32, 8), (6, 1)])
  228. @pytest.mark.parametrize("head_size", HEAD_SIZES)
  229. @pytest.mark.parametrize("block_size", BLOCK_SIZES)
  230. @pytest.mark.parametrize("dtype", DTYPES)
  231. @pytest.mark.parametrize("soft_cap", [None, 30.0, 50.0])
  232. def test_flashinfer_prefill_with_paged_fp8_kv(
  233. seq_lens: List[Tuple[int, int]], num_heads: Tuple[int, int],
  234. head_size: int, dtype: torch.dtype, block_size: int,
  235. soft_cap: Optional[float]) -> None:
  236. torch.set_default_device("cuda")
  237. torch.cuda.manual_seed_all(0)
  238. num_seqs = len(seq_lens)
  239. query_lens = [x[0] for x in seq_lens]
  240. kv_lens = [x[1] for x in seq_lens]
  241. num_query_heads = num_heads[0]
  242. num_kv_heads = num_heads[1]
  243. assert num_query_heads % num_kv_heads == 0
  244. max_kv_len = max(kv_lens)
  245. scale = head_size**-0.5
  246. kv_cache_dtype = torch.float8_e4m3fn
  247. query = torch.randn(sum(query_lens),
  248. num_query_heads,
  249. head_size,
  250. dtype=dtype)
  251. NUM_BLOCKS_FP8 = 2048
  252. key_value_cache = torch.randn(NUM_BLOCKS_FP8,
  253. 2,
  254. block_size,
  255. num_kv_heads,
  256. head_size,
  257. dtype=dtype)
  258. key_cache, value_cache = torch.chunk(key_value_cache, 2, dim=1)
  259. key_cache /= head_size**0.5
  260. value_cache /= head_size**0.5
  261. k_scale = key_cache.amax().item() / 448.0
  262. v_scale = value_cache.amax().item() / 448.0
  263. kv_cache_fp8 = torch.cat([key_cache / k_scale, value_cache / v_scale],
  264. dim=1).to(kv_cache_dtype)
  265. assert (kv_cache_fp8.shape == key_value_cache.shape)
  266. max_num_blocks_per_seq = (max_kv_len + block_size - 1) // block_size
  267. block_tables = torch.randint(0,
  268. NUM_BLOCKS_FP8,
  269. (num_seqs, max_num_blocks_per_seq),
  270. dtype=torch.int32)
  271. qo_indptr = [0]
  272. kv_indptr = [0]
  273. kv_indices = []
  274. kv_last_page_lens = []
  275. for i in range(num_seqs):
  276. seq_len = kv_lens[i]
  277. assert seq_len > 0
  278. num_blocks = (seq_len + block_size - 1) // block_size
  279. kv_indices.extend(block_tables[i, :num_blocks])
  280. kv_indptr.append(kv_indptr[-1] + num_blocks)
  281. kv_last_page_len = seq_len % block_size
  282. if kv_last_page_len == 0:
  283. kv_last_page_len = block_size
  284. kv_last_page_lens.append(kv_last_page_len)
  285. qo_indptr.append(qo_indptr[-1] + query_lens[i])
  286. qo_indptr = torch.tensor(qo_indptr, dtype=torch.int32)
  287. kv_indptr = torch.tensor(kv_indptr, dtype=torch.int32)
  288. kv_indices = torch.tensor(kv_indices, dtype=torch.int32)
  289. kv_last_page_lens = torch.tensor(kv_last_page_lens, dtype=torch.int32)
  290. workspace_buffer = torch.empty(128 * 1024 * 1024, dtype=torch.int8)
  291. wrapper = flashinfer.BatchPrefillWithPagedKVCacheWrapper(
  292. workspace_buffer, "NHD")
  293. wrapper.begin_forward(
  294. qo_indptr,
  295. kv_indptr,
  296. kv_indices,
  297. kv_last_page_lens,
  298. num_query_heads,
  299. num_kv_heads,
  300. head_size,
  301. block_size,
  302. )
  303. output = wrapper.forward(query,
  304. kv_cache_fp8,
  305. logits_soft_cap=soft_cap,
  306. k_scale=k_scale,
  307. v_scale=v_scale)
  308. ref_output = ref_paged_attn(query=query,
  309. key_cache=key_cache.squeeze(1),
  310. value_cache=value_cache.squeeze(1),
  311. query_lens=query_lens,
  312. kv_lens=kv_lens,
  313. block_tables=block_tables,
  314. scale=scale,
  315. soft_cap=soft_cap)
  316. del query
  317. del block_tables
  318. # verify prefill fp8
  319. torch.testing.assert_close(output, ref_output, atol=1e-2, rtol=1e-2), \
  320. f"{torch.max(torch.abs(output - ref_output))}"
  321. @pytest.mark.parametrize("kv_lens", [[1328, 18, 463], [1, 54, 293, 70]])
  322. @pytest.mark.parametrize("num_heads", [(32, 8), (64, 8), (6, 1)])
  323. @pytest.mark.parametrize("head_size", HEAD_SIZES)
  324. @pytest.mark.parametrize("block_size", BLOCK_SIZES)
  325. @pytest.mark.parametrize("dtype", DTYPES)
  326. @pytest.mark.parametrize("soft_cap", [None, 30.0, 50.0])
  327. @torch.inference_mode
  328. def test_flashinfer_decode_with_paged_fp8_kv(
  329. kv_lens: List[int],
  330. num_heads: Tuple[int, int],
  331. head_size: int,
  332. dtype: torch.dtype,
  333. block_size: int,
  334. soft_cap: Optional[float],
  335. ) -> None:
  336. # test doesn't work for num_heads = (16,16)
  337. torch.set_default_device("cuda")
  338. torch.cuda.manual_seed_all(0)
  339. num_seqs = len(kv_lens)
  340. num_query_heads = num_heads[0]
  341. num_kv_heads = num_heads[1]
  342. assert num_query_heads % num_kv_heads == 0
  343. max_kv_len = max(kv_lens)
  344. scale = head_size**-0.5
  345. use_tensor_cores = (num_query_heads // num_kv_heads) > 4
  346. kv_cache_dtype = torch.float8_e4m3fn
  347. query = torch.randn(num_seqs, num_query_heads, head_size, dtype=dtype)
  348. NUM_BLOCKS_FP8 = 2048
  349. key_value_cache = torch.randn(NUM_BLOCKS_FP8,
  350. 2,
  351. block_size,
  352. num_kv_heads,
  353. head_size,
  354. dtype=dtype)
  355. key_cache, value_cache = torch.chunk(key_value_cache, 2, dim=1)
  356. key_cache /= head_size**0.5
  357. value_cache /= head_size**0.5
  358. k_scale = key_cache.amax().item() / 448.0
  359. v_scale = value_cache.amax().item() / 448.0
  360. key_cache_fp8 = (key_cache / k_scale).to(kv_cache_dtype)
  361. value_cache_fp8 = (value_cache / v_scale).to(kv_cache_dtype)
  362. assert (key_cache_fp8.shape[1] == 1 and value_cache_fp8.shape[1] == 1)
  363. kv_cache_fp8 = torch.cat([key_cache_fp8, value_cache_fp8], dim=1)
  364. max_num_blocks_per_seq = (max_kv_len + block_size - 1) // block_size
  365. block_tables = torch.randint(0,
  366. NUM_BLOCKS_FP8,
  367. (num_seqs, max_num_blocks_per_seq),
  368. dtype=torch.int32)
  369. kv_indptr = [0]
  370. kv_indices = []
  371. kv_last_page_lens = []
  372. for i in range(num_seqs):
  373. seq_len = kv_lens[i]
  374. assert seq_len > 0
  375. num_blocks = (seq_len + block_size - 1) // block_size
  376. kv_indices.extend(block_tables[i, :num_blocks])
  377. kv_indptr.append(kv_indptr[-1] + num_blocks)
  378. kv_last_page_len = seq_len % block_size
  379. if kv_last_page_len == 0:
  380. kv_last_page_len = block_size
  381. kv_last_page_lens.append(kv_last_page_len)
  382. kv_indptr = torch.tensor(kv_indptr, dtype=torch.int32)
  383. kv_indices = torch.tensor(kv_indices, dtype=torch.int32)
  384. kv_last_page_lens = torch.tensor(kv_last_page_lens, dtype=torch.int32)
  385. workspace_buffer = torch.empty(128 * 1024 * 1024, dtype=torch.int8)
  386. wrapper = flashinfer.\
  387. BatchDecodeWithPagedKVCacheWrapper(workspace_buffer, "NHD",
  388. use_tensor_cores=use_tensor_cores)
  389. wrapper.begin_forward(kv_indptr,
  390. kv_indices,
  391. kv_last_page_lens,
  392. num_query_heads,
  393. num_kv_heads,
  394. head_size,
  395. block_size,
  396. "NONE",
  397. data_type=dtype)
  398. output = wrapper.forward(query,
  399. kv_cache_fp8,
  400. logits_soft_cap=soft_cap,
  401. k_scale=k_scale,
  402. v_scale=v_scale)
  403. key_cache = key_value_cache[:, 0, :, :, :].squeeze(1)
  404. value_cache = key_value_cache[:, 1, :, :, :].squeeze(1)
  405. ref_output = ref_paged_attn(query=query,
  406. key_cache=key_cache,
  407. value_cache=value_cache,
  408. query_lens=[1] * num_seqs,
  409. kv_lens=kv_lens,
  410. block_tables=block_tables,
  411. scale=scale,
  412. soft_cap=soft_cap)
  413. # Temporary fix: Increasing the tolerance. Seems like a flashinfer issue
  414. torch.testing.assert_close(output, ref_output, atol=2e-2, rtol=1e-2), \
  415. f"{torch.max(torch.abs(output - ref_output))}"