test_multi_step_worker.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788
  1. import random
  2. from typing import Dict, List
  3. from unittest.mock import MagicMock
  4. import pytest
  5. import torch
  6. from aphrodite.common.sequence import (ExecuteModelRequest, HiddenStates,
  7. Logprob, SamplerOutput, get_all_seq_ids)
  8. from aphrodite.modeling.utils import set_random_seed
  9. from aphrodite.spec_decode.draft_model_runner import TP1DraftModelRunner
  10. from aphrodite.spec_decode.multi_step_worker import MultiStepWorker
  11. from aphrodite.spec_decode.top1_proposer import Top1Proposer
  12. from aphrodite.task_handler.worker import Worker
  13. from .utils import (assert_logprobs_dict_allclose, create_batch,
  14. create_seq_group_metadata_from_prompts, create_worker,
  15. patch_execute_model_with_seeds, zero_kv_cache)
  16. @pytest.mark.parametrize("num_steps", list(range(1, 17)))
  17. def test_assert_enough_kv_space(num_steps: int):
  18. """Test that the multi step worker checks for sufficient space in the KV
  19. cache. It should throw if it cannot run all the steps.
  20. """
  21. block_size = 16
  22. num_gpu_blocks = 2048 // block_size
  23. prompts = [
  24. list(range(block_size * 3)),
  25. list(range(block_size * 2)),
  26. ]
  27. prev_output_tokens = [
  28. list(range(block_size * 1)),
  29. list(range(block_size * 2)),
  30. ]
  31. final_prompt_lens = [
  32. len(prompt + output) + num_steps
  33. for prompt, output in zip(prompts, prev_output_tokens)
  34. ]
  35. inputs = create_seq_group_metadata_from_prompts(
  36. prompts,
  37. num_gpu_blocks,
  38. block_size,
  39. final_prompt_lens,
  40. continuations=prev_output_tokens,
  41. )
  42. assert_enough_kv_space = MultiStepWorker._assert_enough_kv_space # pylint: disable=protected-access
  43. worker = MagicMock()
  44. worker.model_runner.block_size = block_size
  45. for seq_group_metadata in inputs:
  46. original_block_tables = seq_group_metadata.block_tables
  47. # No exception.
  48. assert_enough_kv_space(worker, inputs, num_steps)
  49. seq_group_metadata.block_tables = {
  50. seq_id: []
  51. for seq_id, physical_blocks in original_block_tables.items()
  52. }
  53. # Expect exception.
  54. with pytest.raises(
  55. ValueError, match="times but found insufficient KV space for"
  56. ):
  57. assert_enough_kv_space(worker, inputs, num_steps)
  58. seq_group_metadata.block_tables = original_block_tables
  59. @torch.inference_mode()
  60. def test_same_output_for_single_step():
  61. """Verify the multi step worker produces the same output as the normal
  62. worker for num_steps=1.
  63. """
  64. seed = 100
  65. model_name = "JackFram/llama-68m"
  66. block_size = 32
  67. num_gpu_blocks = 2048 // block_size
  68. multi_step_worker = create_worker(
  69. MultiStepWorker,
  70. model_name,
  71. block_size,
  72. num_gpu_blocks,
  73. seed,
  74. model_runner_cls=TP1DraftModelRunner,
  75. )
  76. worker = create_worker(
  77. Worker,
  78. model_name,
  79. block_size,
  80. num_gpu_blocks,
  81. seed,
  82. )
  83. # multi_step_worker.model_runner = worker.model_runner
  84. # multi_step_worker.cache_engine = worker.cache_engine
  85. num_steps = 1
  86. prompts = [
  87. [1, 2, 3, 4, 5],
  88. [6, 7, 8, 9, 10],
  89. ]
  90. final_prompt_lens = [len(prompt) + num_steps for prompt in prompts]
  91. multi_step_seq_group = create_seq_group_metadata_from_prompts(
  92. prompts, num_gpu_blocks, block_size, final_prompt_lens=final_prompt_lens
  93. )
  94. zero_kv_cache(multi_step_worker.cache_engine)
  95. set_random_seed(seed)
  96. actual_output, _ = multi_step_worker.sampler_output(
  97. execute_model_req=ExecuteModelRequest(
  98. seq_group_metadata_list=multi_step_seq_group
  99. ),
  100. sample_len=num_steps,
  101. seq_ids_with_bonus_token_in_last_step=set(),
  102. )
  103. assert len(actual_output) == num_steps
  104. actual_output = actual_output[0]
  105. single_step_seq_group = create_seq_group_metadata_from_prompts(
  106. prompts, num_gpu_blocks, block_size, final_prompt_lens=final_prompt_lens
  107. )
  108. zero_kv_cache(worker.cache_engine)
  109. set_random_seed(seed)
  110. expected_output = worker.execute_model(
  111. execute_model_req=ExecuteModelRequest(
  112. seq_group_metadata_list=single_step_seq_group
  113. )
  114. )[0]
  115. actual_token_ids = [
  116. output.samples[0].output_token for output in actual_output
  117. ]
  118. actual_logprobs = [output.samples[0].logprobs for output in actual_output]
  119. expected_token_ids = [
  120. output.samples[0].output_token for output in expected_output
  121. ]
  122. expected_logprobs = [
  123. output.samples[0].logprobs for output in expected_output
  124. ]
  125. assert actual_token_ids == expected_token_ids
  126. print(f"{actual_logprobs=}")
  127. print(f"{expected_logprobs=}")
  128. assert_logprobs_dict_allclose(actual_logprobs, expected_logprobs)
  129. @torch.inference_mode()
  130. def test_same_output_for_multi_step():
  131. """Verify the multi-step worker produces the same output as the normal
  132. worker when num_steps > 1. This test runs the multi-step worker once, and
  133. then runs the worker num_steps times, and compares the output.
  134. """
  135. seed = 100
  136. model_name = "JackFram/llama-68m"
  137. block_size = 16
  138. num_gpu_blocks = 2048 // block_size
  139. multi_step_worker = create_worker(
  140. MultiStepWorker,
  141. model_name,
  142. block_size,
  143. num_gpu_blocks,
  144. seed,
  145. model_runner_cls=TP1DraftModelRunner,
  146. )
  147. worker = create_worker(
  148. Worker,
  149. model_name,
  150. block_size,
  151. num_gpu_blocks,
  152. seed,
  153. )
  154. # Make sure we go over the block boundary.
  155. num_steps = block_size + 1
  156. random.seed(seed)
  157. prompts = [
  158. [random.randint(0, 1000) for _ in range(random.randint(10, 20))]
  159. for _ in range(10)
  160. ]
  161. final_prompt_lens = [len(prompt) + num_steps for prompt in prompts]
  162. rand_seeds = list(random.randint(0, 100) for _ in range(num_steps))
  163. multi_step_worker.execute_model = patch_execute_model_with_seeds(
  164. multi_step_worker, rand_seeds
  165. )
  166. worker.execute_model = patch_execute_model_with_seeds(worker, rand_seeds)
  167. continuations = [[1] for _ in prompts]
  168. seq_group_metadata_list = create_seq_group_metadata_from_prompts(
  169. prompts,
  170. num_gpu_blocks,
  171. block_size,
  172. continuations=continuations,
  173. final_prompt_lens=final_prompt_lens,
  174. )
  175. # Run multi-step.
  176. zero_kv_cache(multi_step_worker.cache_engine)
  177. set_random_seed(seed)
  178. multi_step_output, _ = multi_step_worker.sampler_output(
  179. execute_model_req=ExecuteModelRequest(
  180. seq_group_metadata_list=seq_group_metadata_list
  181. ),
  182. sample_len=num_steps,
  183. seq_ids_with_bonus_token_in_last_step=set(),
  184. )
  185. # Run single-step repeatedly.
  186. zero_kv_cache(worker.cache_engine)
  187. single_step_output: List[SamplerOutput] = []
  188. continuations = [[1] for _ in prompts]
  189. set_random_seed(seed)
  190. for _ in multi_step_output:
  191. seq_group_metadata_list = create_seq_group_metadata_from_prompts(
  192. prompts,
  193. num_gpu_blocks,
  194. block_size,
  195. continuations=continuations,
  196. final_prompt_lens=final_prompt_lens,
  197. )
  198. single_step_output.extend(
  199. worker.execute_model(
  200. execute_model_req=ExecuteModelRequest(
  201. seq_group_metadata_list=seq_group_metadata_list
  202. )
  203. )
  204. )
  205. # Append output tokens to new sequence data.
  206. for i, seq_group_output in enumerate(single_step_output[-1]):
  207. continuations[i].append(seq_group_output.samples[0].output_token)
  208. # Get token ids and logprobs for comparison.
  209. multi_step_output_logprobs: List[List[Dict[int, Logprob]]] = [
  210. [] for _ in prompts
  211. ]
  212. single_step_output_logprobs: List[List[Dict[int, Logprob]]] = [
  213. [] for _ in prompts
  214. ]
  215. multi_step_output_token_ids: List[List[int]] = [[] for _ in prompts]
  216. single_step_output_token_ids: List[List[int]] = [[] for _ in prompts]
  217. for i, _ in enumerate(prompts):
  218. for multi_step, single_step in zip(
  219. multi_step_output, single_step_output
  220. ):
  221. multi_step_output_token_ids[i].append(
  222. multi_step[i].samples[0].output_token
  223. )
  224. single_step_output_token_ids[i].append(
  225. single_step[i].samples[0].output_token
  226. )
  227. multi_step_output_logprobs[i].append(
  228. multi_step[i].samples[0].logprobs
  229. )
  230. single_step_output_logprobs[i].append(
  231. single_step[i].samples[0].logprobs
  232. )
  233. # Print per-sequence token ids
  234. for i, (multi_step_tokens, single_step_tokens) in enumerate(
  235. zip(multi_step_output_token_ids, single_step_output_token_ids)
  236. ):
  237. print(f"{i=} {multi_step_tokens=}")
  238. print(f"{i=} {single_step_tokens=}")
  239. print(f"{i=} equal {multi_step_tokens == single_step_tokens}")
  240. # Assert token ids are equal.
  241. for multi_step_tokens, single_step_tokens in zip(
  242. multi_step_output_token_ids, single_step_output_token_ids
  243. ):
  244. assert multi_step_tokens == single_step_tokens
  245. # Assert logprobs are equal.
  246. for multi_step_logprobs, single_step_logprobs in zip(
  247. multi_step_output_logprobs, single_step_output_logprobs
  248. ):
  249. assert_logprobs_dict_allclose(multi_step_logprobs, single_step_logprobs)
  250. @torch.inference_mode()
  251. def test_multi_step_with_batch_expansion_correct_output():
  252. """
  253. In this test we verify that the MultiStepWorker is able to handle bonus
  254. tokens correctly. The test verifies that if a sequence has a
  255. bonus token then the MultiStepWorker is able to expand the batch by adding
  256. new sequences corresponding to the sequences with bonus tokens. The
  257. expanded batch is then used for predicting the next tokens.
  258. """
  259. seed = 100
  260. model_name = "JackFram/llama-68m"
  261. block_size = 16
  262. num_gpu_blocks = 2048 // block_size
  263. batch_size = 128
  264. multi_step_worker = create_worker(
  265. MultiStepWorker,
  266. model_name,
  267. block_size,
  268. num_gpu_blocks,
  269. seed,
  270. model_runner_cls=TP1DraftModelRunner,
  271. )
  272. worker = create_worker(
  273. Worker,
  274. model_name,
  275. block_size,
  276. num_gpu_blocks,
  277. seed,
  278. )
  279. random.seed(seed)
  280. prompts = [[0] for _ in range(batch_size)]
  281. num_steps = 2
  282. final_prompt_lens = [(num_steps + 1) for prompt in prompts]
  283. rand_seeds = list(random.randint(0, 100) for _ in range(num_steps))
  284. multi_step_worker.execute_model = patch_execute_model_with_seeds(
  285. multi_step_worker, rand_seeds
  286. )
  287. worker.execute_model = patch_execute_model_with_seeds(worker, rand_seeds)
  288. # Create the test continuations
  289. continuations = [[random.randint(0, 1000)] for _ in prompts]
  290. seq_group_metadata_list = create_seq_group_metadata_from_prompts(
  291. prompts,
  292. num_gpu_blocks,
  293. block_size,
  294. continuations=continuations,
  295. final_prompt_lens=final_prompt_lens,
  296. )
  297. # Run single-step twice to generate 2 tokens. This
  298. # will simulate the bonus token case with the second token
  299. # being the bonus token.
  300. zero_kv_cache(worker.cache_engine)
  301. single_step_output: List[SamplerOutput] = []
  302. set_random_seed(seed)
  303. for _ in range(num_steps):
  304. seq_group_metadata_list = create_seq_group_metadata_from_prompts(
  305. prompts,
  306. num_gpu_blocks,
  307. block_size,
  308. continuations=continuations,
  309. final_prompt_lens=final_prompt_lens,
  310. )
  311. single_step_output.extend(
  312. worker.execute_model(
  313. execute_model_req=ExecuteModelRequest(
  314. seq_group_metadata_list=seq_group_metadata_list
  315. )
  316. )
  317. )
  318. # Append output tokens to new sequence data.
  319. for i, seq_group_output in enumerate(single_step_output[-1]):
  320. continuations[i].append(seq_group_output.samples[0].output_token)
  321. # Create continuations for the MultiStepWorker. The continuations have
  322. # 2 tokens in order to simulate the bonus token case.
  323. multi_step_continuations = []
  324. for continuation in continuations:
  325. multi_step_continuations.append(continuation[:2])
  326. seq_group_metadata_list = create_seq_group_metadata_from_prompts(
  327. prompts,
  328. num_gpu_blocks,
  329. block_size,
  330. continuations=multi_step_continuations,
  331. final_prompt_lens=final_prompt_lens,
  332. )
  333. # Run multi-step and verify that the third token prediction is accurate
  334. # for all sequences.
  335. zero_kv_cache(multi_step_worker.cache_engine)
  336. all_seq_ids = {i for i in range(batch_size)}
  337. multi_step_output, _ = multi_step_worker.sampler_output(
  338. execute_model_req=ExecuteModelRequest(
  339. seq_group_metadata_list=seq_group_metadata_list
  340. ),
  341. sample_len=1,
  342. seq_ids_with_bonus_token_in_last_step=all_seq_ids,
  343. )
  344. for index, output in enumerate(multi_step_output[-1].outputs):
  345. assert continuations[index][-1] == output.samples[0].output_token
  346. @torch.inference_mode()
  347. def test_multi_step_with_batch_expansion_incorrect_output():
  348. """
  349. Tests the MultiStepWorker's ability to handle batch expansion with bonus
  350. tokens in a negative case scenario. This test provides the MultiStepWorker
  351. with a batch containing sequences with bonus tokens but specifies the
  352. sequence IDs with bonus tokens incorrectly. The test verifies that the
  353. MultiStepWorker generates correct tokens for the sequences where the
  354. sequence ID is specified correctly and incorrect tokens for those where
  355. the sequence ID is specified incorrectly.
  356. """
  357. seed = 100
  358. model_name = "JackFram/llama-68m"
  359. block_size = 16
  360. num_gpu_blocks = 2048 // block_size
  361. batch_size = 128
  362. multi_step_worker = create_worker(
  363. MultiStepWorker,
  364. model_name,
  365. block_size,
  366. num_gpu_blocks,
  367. seed,
  368. model_runner_cls=TP1DraftModelRunner,
  369. )
  370. worker = create_worker(
  371. Worker,
  372. model_name,
  373. block_size,
  374. num_gpu_blocks,
  375. seed,
  376. )
  377. random.seed(seed)
  378. prompts = [[0] for _ in range(batch_size)]
  379. num_steps = 2
  380. final_prompt_lens = [(num_steps + 1) for prompt in prompts]
  381. rand_seeds = list(random.randint(0, 100) for _ in range(num_steps))
  382. multi_step_worker.execute_model = patch_execute_model_with_seeds(
  383. multi_step_worker, rand_seeds
  384. )
  385. worker.execute_model = patch_execute_model_with_seeds(worker, rand_seeds)
  386. # Create the test continuations
  387. continuations = [[random.randint(0, 1000)] for _ in prompts]
  388. seq_group_metadata_list = create_seq_group_metadata_from_prompts(
  389. prompts,
  390. num_gpu_blocks,
  391. block_size,
  392. continuations=continuations,
  393. final_prompt_lens=final_prompt_lens,
  394. )
  395. # Run single-step twice to generate 2 tokens. This
  396. # will simulate the bonus token case with the second token
  397. # being the bonus token.
  398. zero_kv_cache(worker.cache_engine)
  399. single_step_output: List[SamplerOutput] = []
  400. set_random_seed(seed)
  401. for _ in range(num_steps):
  402. seq_group_metadata_list = create_seq_group_metadata_from_prompts(
  403. prompts,
  404. num_gpu_blocks,
  405. block_size,
  406. continuations=continuations,
  407. final_prompt_lens=final_prompt_lens,
  408. )
  409. single_step_output.extend(
  410. worker.execute_model(
  411. execute_model_req=ExecuteModelRequest(
  412. seq_group_metadata_list=seq_group_metadata_list
  413. )
  414. )
  415. )
  416. # Append output tokens to new sequence data.
  417. for i, seq_group_output in enumerate(single_step_output[-1]):
  418. continuations[i].append(seq_group_output.samples[0].output_token)
  419. # Create continuations for the MultiStepWorker. The continuations have
  420. # 2 tokens in order to simulate the bonus token case.
  421. multi_step_continuations = []
  422. for continuation in continuations:
  423. multi_step_continuations.append(continuation[:2])
  424. seq_group_metadata_list = create_seq_group_metadata_from_prompts(
  425. prompts,
  426. num_gpu_blocks,
  427. block_size,
  428. continuations=multi_step_continuations,
  429. final_prompt_lens=final_prompt_lens,
  430. )
  431. # Run multi-step. In this run INCORRECTLY specify that only the odd number
  432. # sequences have bonus tokens. Verify that with this setting the third token
  433. # prediction is accurate only for the odd numbered sequences. Also verify
  434. # that the prediction might be wrong for some of the even numbered
  435. # sequences.
  436. zero_kv_cache(multi_step_worker.cache_engine)
  437. set_random_seed(seed)
  438. odd_seq_ids = {i for i in range(batch_size) if i % 2 != 0}
  439. multi_step_output, _ = multi_step_worker.sampler_output(
  440. execute_model_req=ExecuteModelRequest(
  441. seq_group_metadata_list=seq_group_metadata_list
  442. ),
  443. sample_len=1,
  444. seq_ids_with_bonus_token_in_last_step=odd_seq_ids,
  445. )
  446. num_mismatch = 0
  447. for index, output in enumerate(multi_step_output[-1].outputs):
  448. if (index % 2) != 0:
  449. assert continuations[index][-1] == output.samples[0].output_token
  450. elif continuations[index][-1] != output.samples[0].output_token:
  451. num_mismatch += 1
  452. # The prediction is accurate for some of the sequences even without proper
  453. # handling of the bonus tokens. Hence verify that the number of sequences
  454. # for which there is a mismatch is > 0.
  455. assert num_mismatch > 0
  456. @torch.inference_mode()
  457. def test_draft_proposals_full_speculation_len():
  458. """Verify Top1Proposer correctly handles case where all sequences
  459. can speculate.
  460. """
  461. k = 10
  462. batch_size = 32
  463. vocab_size = 32_000
  464. device = "cuda:0"
  465. draft_worker = MagicMock()
  466. proposer = Top1Proposer(
  467. worker=draft_worker,
  468. device=device,
  469. vocab_size=vocab_size,
  470. max_proposal_len=2048,
  471. )
  472. draft_worker.sampler_output.return_value = (
  473. [
  474. SamplerOutput(
  475. outputs=[],
  476. sampled_token_probs=torch.rand(
  477. batch_size, vocab_size, device=device, dtype=torch.float32
  478. ),
  479. logprobs=torch.rand(
  480. batch_size, vocab_size, device=device, dtype=torch.float32
  481. ),
  482. sampled_token_ids=torch.randint(
  483. low=0,
  484. high=vocab_size,
  485. size=(batch_size,),
  486. device=device,
  487. dtype=torch.long,
  488. ),
  489. )
  490. for _ in range(k)
  491. ],
  492. True,
  493. )
  494. seq_group_metadata_list, _, _ = create_batch(batch_size, k)
  495. proposals = proposer.get_spec_proposals(
  496. execute_model_req=ExecuteModelRequest(
  497. seq_group_metadata_list=seq_group_metadata_list,
  498. num_lookahead_slots=k,
  499. ),
  500. seq_ids_with_bonus_token_in_last_step=set(),
  501. )
  502. assert torch.is_tensor(proposals.proposal_token_ids)
  503. assert torch.is_tensor(proposals.proposal_probs)
  504. assert proposals.proposal_token_ids.shape == torch.Size([batch_size, k])
  505. assert proposals.proposal_probs.shape[:-1] == torch.Size([batch_size, k])
  506. assert proposals.proposal_lens.shape == torch.Size([batch_size])
  507. assert proposals.proposal_lens.tolist() == [k for _ in range(batch_size)]
  508. @torch.inference_mode()
  509. def test_draft_proposals_no_speculations():
  510. """Verify Top1Proposer correctly handles case where no sequences
  511. can speculate.
  512. """
  513. k = 10
  514. batch_size = 32
  515. vocab_size = 32_000
  516. device = "cuda:0"
  517. prompt_len = 10
  518. draft_worker = MagicMock()
  519. proposer = Top1Proposer(
  520. worker=draft_worker,
  521. device=device,
  522. vocab_size=vocab_size,
  523. max_proposal_len=prompt_len + k - 1,
  524. )
  525. seq_group_metadata_list, _, _ = create_batch(
  526. batch_size, k, prompt_len=prompt_len
  527. )
  528. proposals = proposer.get_spec_proposals(
  529. execute_model_req=ExecuteModelRequest(
  530. seq_group_metadata_list=seq_group_metadata_list,
  531. num_lookahead_slots=k,
  532. ),
  533. seq_ids_with_bonus_token_in_last_step=set(),
  534. )
  535. assert torch.is_tensor(proposals.proposal_token_ids)
  536. assert torch.is_tensor(proposals.proposal_probs)
  537. assert proposals.proposal_token_ids.shape == torch.Size([batch_size, k])
  538. assert proposals.proposal_probs.shape[:-1] == torch.Size([batch_size, k])
  539. assert proposals.proposal_lens.shape == torch.Size([batch_size])
  540. assert proposals.proposal_lens.tolist() == [0 for _ in range(batch_size)]
  541. @torch.inference_mode()
  542. def test_draft_proposals_mixed_k():
  543. """Verify Top1Proposer correctly handles case some sequences can
  544. speculate and some can't.
  545. """
  546. k = 10
  547. batch_size = 32
  548. vocab_size = 32_000
  549. device = "cuda:0"
  550. small_prompt_len = 5
  551. long_prompt_len = 10
  552. prev_output_token_len = 20
  553. expected_num_proposal_seqs = 6
  554. expected_num_no_proposal_seqs = batch_size - expected_num_proposal_seqs
  555. prompt_len = (
  556. [small_prompt_len for _ in range(expected_num_proposal_seqs - 1)]
  557. + [long_prompt_len for _ in range(expected_num_no_proposal_seqs)]
  558. + [small_prompt_len]
  559. )
  560. draft_worker = MagicMock()
  561. proposer = Top1Proposer(
  562. worker=draft_worker,
  563. device=device,
  564. vocab_size=vocab_size,
  565. max_proposal_len=long_prompt_len + prev_output_token_len + k - 1,
  566. )
  567. draft_worker.sampler_output.return_value = (
  568. [
  569. SamplerOutput(
  570. outputs=[],
  571. sampled_token_probs=torch.rand(
  572. expected_num_proposal_seqs,
  573. vocab_size,
  574. device=device,
  575. dtype=torch.float32,
  576. ),
  577. logprobs=torch.rand(
  578. expected_num_proposal_seqs,
  579. vocab_size,
  580. device=device,
  581. dtype=torch.float32,
  582. ),
  583. sampled_token_ids=torch.randint(
  584. low=0,
  585. high=vocab_size,
  586. size=(expected_num_proposal_seqs,),
  587. device=device,
  588. dtype=torch.long,
  589. ),
  590. )
  591. for _ in range(k)
  592. ],
  593. True,
  594. )
  595. seq_group_metadata_list, _, _ = create_batch(
  596. batch_size,
  597. k,
  598. prompt_len=prompt_len,
  599. prev_output_token_len=prev_output_token_len,
  600. )
  601. proposals = proposer.get_spec_proposals(
  602. execute_model_req=ExecuteModelRequest(
  603. seq_group_metadata_list=seq_group_metadata_list,
  604. num_lookahead_slots=k,
  605. ),
  606. seq_ids_with_bonus_token_in_last_step=set(),
  607. )
  608. assert torch.is_tensor(proposals.proposal_token_ids)
  609. assert torch.is_tensor(proposals.proposal_probs)
  610. assert proposals.proposal_token_ids.shape == torch.Size([batch_size, k])
  611. assert proposals.proposal_probs.shape[:-1] == torch.Size([batch_size, k])
  612. assert proposals.proposal_lens.shape == torch.Size([batch_size])
  613. assert proposals.proposal_lens.tolist() == [
  614. k for _ in range(expected_num_proposal_seqs - 1)
  615. ] + [0 for _ in range(expected_num_no_proposal_seqs)] + [k]
  616. @torch.inference_mode()
  617. def test_use_draft_model_runner_advance_step():
  618. """Verify that draft model runner triggers advance step
  619. when applicable.
  620. """
  621. seed = 100
  622. model_name = "JackFram/llama-68m"
  623. k = 5
  624. batch_size = 32
  625. block_size = 32
  626. num_gpu_blocks = 2048 // block_size
  627. worker = create_worker(
  628. MultiStepWorker,
  629. model_name,
  630. block_size,
  631. num_gpu_blocks,
  632. seed,
  633. model_runner_cls=TP1DraftModelRunner,
  634. )
  635. # Mock "_gpu_advance_step" to raise an exception when called.
  636. exception_secret = "artificial stop"
  637. worker.model_runner._gpu_advance_step = MagicMock()
  638. worker.model_runner._gpu_advance_step.side_effect = ValueError(
  639. exception_secret
  640. )
  641. seq_group_metadata_list, _, _ = create_batch(batch_size, k)
  642. # Fallback (should not call) when num_steps=1.
  643. execute_model_req = ExecuteModelRequest(
  644. seq_group_metadata_list=seq_group_metadata_list,
  645. num_lookahead_slots=k,
  646. num_steps=1,
  647. )
  648. worker.execute_model(execute_model_req=execute_model_req)
  649. # Expect exception if _gpu_advance_step is called.
  650. execute_model_req = ExecuteModelRequest(
  651. seq_group_metadata_list=seq_group_metadata_list,
  652. num_lookahead_slots=k,
  653. num_steps=k,
  654. )
  655. with pytest.raises(ValueError, match=exception_secret):
  656. worker.execute_model(execute_model_req=execute_model_req)
  657. call_args_list = worker.model_runner._gpu_advance_step.call_args_list
  658. assert len(call_args_list) == 1
  659. @torch.inference_mode()
  660. def test_expand_execute_model_request_sync_with_expand_hidden_states():
  661. """
  662. In this test we verify that the logic for expanding the
  663. seq_group_metadata_list remains in sync with the expansion logic of
  664. the HiddenStates in _expand_execute_model_request.
  665. """
  666. k = 5
  667. batch_size = 16
  668. seq_with_bonus_token_in_last_step = [1, 3, 8, 10, 13, 15]
  669. seq_group_metadata_list, _, _ = create_batch(batch_size, k)
  670. execute_model_request = ExecuteModelRequest(
  671. seq_group_metadata_list,
  672. previous_hidden_states=HiddenStates(
  673. torch.arange(batch_size),
  674. seq_group_metadata_list,
  675. torch.arange(batch_size, 2 * batch_size),
  676. ),
  677. )
  678. (
  679. expanded_execute_model_request,
  680. orig_seq_group_ids,
  681. ) = MultiStepWorker._expand_execute_model_request(
  682. execute_model_request, seq_with_bonus_token_in_last_step
  683. )
  684. all_seq_ids = torch.tensor(
  685. get_all_seq_ids(expanded_execute_model_request.seq_group_metadata_list)
  686. )
  687. ref_expanded_hidden_states = all_seq_ids + batch_size
  688. ref_expanded_hidden_states[orig_seq_group_ids] -= batch_size
  689. assert (ref_expanded_hidden_states == expanded_execute_model_request.
  690. previous_hidden_states.hidden_states).all().item()