test_mlp_correctness.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407
  1. """This docstring details important information on the testing methodology.
  2. Most of the tests rely on "greedy equality", where we expect the output of
  3. speculative decoding on a sequence to exactly match the output of normal non-
  4. speculative decoding.
  5. Since speculative decoding with rejection sampling guarantees that the output
  6. distribution matches the target model's output distribution (up to hardware
  7. numerics, see https://arxiv.org/pdf/2302.01318.pdf), we can expect greedy
  8. equality.
  9. However, we still need to verify below scenario could be passed:
  10. * Batch size 1 greedy equality
  11. * Batch size >1 greedy equality
  12. * Test greedy equality under preemption
  13. * Test greedy equality under various number of speculative tokens.
  14. With those tests, we can say at least, MLPSpeculator would not break the
  15. correctess for the target model outputs.
  16. """
  17. from unittest.mock import patch
  18. import pytest
  19. from aphrodite.modeling.layers.vocab_parallel_embedding import pad_vocab_size
  20. from .conftest import run_equality_correctness_test
  21. # main model
  22. MAIN_MODEL = "JackFram/llama-160m"
  23. # speculative model
  24. SPEC_MODEL = "ibm-fms/llama-160m-accelerator"
  25. # max. number of speculative tokens: this corresponds to
  26. # n_predict in the config.json of the speculator model.
  27. MAX_SPEC_TOKENS = 3
  28. # precision
  29. PRECISION = "float32"
  30. @pytest.mark.parametrize(
  31. "common_llm_kwargs",
  32. [{
  33. # Skip cuda graph recording for fast test.
  34. "enforce_eager": True,
  35. # Required for spec decode.
  36. "use_v2_block_manager": True,
  37. # Print spec metrics.
  38. "disable_log_stats": False,
  39. # Precision
  40. "dtype": PRECISION,
  41. # Main model
  42. "model_name": MAIN_MODEL,
  43. }])
  44. @pytest.mark.parametrize("per_test_common_llm_kwargs", [{}])
  45. @pytest.mark.parametrize("baseline_llm_kwargs", [{}])
  46. @pytest.mark.parametrize("test_llm_kwargs", [
  47. {
  48. "speculative_model": SPEC_MODEL,
  49. },
  50. ])
  51. @pytest.mark.parametrize("output_len", [
  52. 128,
  53. ])
  54. @pytest.mark.parametrize("batch_size", [1, 32])
  55. @pytest.mark.parametrize("seed", [1])
  56. def test_mlp_e2e_greedy_correctness(aphrodite_runner, common_llm_kwargs,
  57. per_test_common_llm_kwargs,
  58. baseline_llm_kwargs, test_llm_kwargs,
  59. batch_size: int, output_len: int,
  60. seed: int):
  61. """Verify greedy equality with different batch size."""
  62. run_equality_correctness_test(aphrodite_runner,
  63. common_llm_kwargs,
  64. per_test_common_llm_kwargs,
  65. baseline_llm_kwargs,
  66. test_llm_kwargs,
  67. batch_size,
  68. max_output_len=output_len,
  69. seed=seed,
  70. temperature=0.0)
  71. @pytest.mark.parametrize(
  72. "common_llm_kwargs",
  73. [{
  74. # Skip cuda graph recording for fast test.
  75. "enforce_eager": True,
  76. # Required for spec decode.
  77. "use_v2_block_manager": True,
  78. # Print spec metrics.
  79. "disable_log_stats": False,
  80. # Precision
  81. "dtype": PRECISION,
  82. # Main model
  83. "model_name": MAIN_MODEL,
  84. }])
  85. @pytest.mark.parametrize("per_test_common_llm_kwargs", [{}])
  86. @pytest.mark.parametrize("baseline_llm_kwargs", [{}])
  87. @pytest.mark.parametrize("test_llm_kwargs", [
  88. {
  89. "speculative_model": SPEC_MODEL,
  90. },
  91. ])
  92. @pytest.mark.parametrize("output_len", [2048])
  93. @pytest.mark.parametrize("batch_size", [1, 32])
  94. @pytest.mark.parametrize("seed", [1])
  95. def test_mlp_e2e_acceptance_rate(aphrodite_runner, common_llm_kwargs,
  96. per_test_common_llm_kwargs,
  97. baseline_llm_kwargs, test_llm_kwargs,
  98. batch_size: int, output_len: int, seed: int):
  99. """Verify acceptance rate with different batch size and large output
  100. length."""
  101. run_equality_correctness_test(aphrodite_runner,
  102. common_llm_kwargs,
  103. per_test_common_llm_kwargs,
  104. baseline_llm_kwargs,
  105. test_llm_kwargs,
  106. batch_size,
  107. max_output_len=output_len,
  108. temperature=0.0,
  109. seed=seed,
  110. expected_acceptance_rate=0.48)
  111. @pytest.mark.parametrize(
  112. "common_llm_kwargs",
  113. [{
  114. # Skip cuda graph recording for fast test.
  115. "enforce_eager": True,
  116. # Required for spec decode.
  117. "use_v2_block_manager": True,
  118. # Print spec metrics.
  119. "disable_log_stats": False,
  120. # Precision
  121. "dtype": PRECISION,
  122. # Main model
  123. "model_name": MAIN_MODEL,
  124. # Speculative model
  125. "speculative_model": SPEC_MODEL,
  126. }])
  127. @pytest.mark.parametrize("per_test_common_llm_kwargs", [{}])
  128. @pytest.mark.parametrize("baseline_llm_kwargs", [{"seed": 1}])
  129. @pytest.mark.parametrize("test_llm_kwargs", [{"seed": 5}])
  130. @pytest.mark.parametrize("output_len", [64])
  131. @pytest.mark.parametrize("batch_size", [1, 32])
  132. @pytest.mark.parametrize("temperature", [0.1, 1.0])
  133. @pytest.mark.parametrize("seed", [1])
  134. def test_mlp_e2e_seeded_correctness(aphrodite_runner, common_llm_kwargs,
  135. per_test_common_llm_kwargs,
  136. baseline_llm_kwargs, test_llm_kwargs,
  137. batch_size: int, output_len: int,
  138. temperature: float, seed: int):
  139. """Verify seeded runs produce the same output."""
  140. run_equality_correctness_test(aphrodite_runner,
  141. common_llm_kwargs,
  142. per_test_common_llm_kwargs,
  143. baseline_llm_kwargs,
  144. test_llm_kwargs,
  145. batch_size,
  146. max_output_len=output_len,
  147. temperature=temperature,
  148. seed=seed)
  149. # Ensure this same test does fail if we _don't_ include per-request seeds
  150. with pytest.raises(AssertionError):
  151. run_equality_correctness_test(aphrodite_runner,
  152. common_llm_kwargs,
  153. per_test_common_llm_kwargs,
  154. baseline_llm_kwargs,
  155. test_llm_kwargs,
  156. batch_size,
  157. max_output_len=output_len,
  158. temperature=temperature,
  159. seed=seed,
  160. disable_seed=True)
  161. @pytest.mark.parametrize(
  162. "common_llm_kwargs",
  163. [{
  164. "block_size": 8,
  165. # 2 for small prompt, 256//8 for generated.
  166. "num_gpu_blocks_override": 2 + 256 // 8,
  167. "max_model_len": (2 + 256 // 8) * 8,
  168. # Skip cuda graph recording for fast test.
  169. "enforce_eager": True,
  170. # Required for spec decode.
  171. "use_v2_block_manager": True,
  172. # Precision
  173. "dtype": PRECISION,
  174. # Main model
  175. "model_name": MAIN_MODEL,
  176. }])
  177. @pytest.mark.parametrize("per_test_common_llm_kwargs", [{}])
  178. @pytest.mark.parametrize("baseline_llm_kwargs", [{}])
  179. @pytest.mark.parametrize("test_llm_kwargs", [
  180. {
  181. "speculative_model": SPEC_MODEL,
  182. },
  183. ])
  184. @pytest.mark.parametrize(
  185. "output_len",
  186. [
  187. # Use small output len for fast test.
  188. 128,
  189. ])
  190. @pytest.mark.parametrize("batch_size", [4])
  191. @pytest.mark.parametrize("seed", [1])
  192. def test_mlp_e2e_greedy_correctness_with_preemption(
  193. aphrodite_runner, common_llm_kwargs, per_test_common_llm_kwargs,
  194. baseline_llm_kwargs, test_llm_kwargs, batch_size: int, output_len: int,
  195. seed: int):
  196. """Verify greedy equality, even when some sequences are preempted mid-
  197. generation.
  198. """
  199. run_equality_correctness_test(aphrodite_runner,
  200. common_llm_kwargs,
  201. per_test_common_llm_kwargs,
  202. baseline_llm_kwargs,
  203. test_llm_kwargs,
  204. batch_size,
  205. max_output_len=output_len,
  206. seed=seed,
  207. temperature=0.0)
  208. @pytest.mark.parametrize(
  209. "common_llm_kwargs",
  210. [{
  211. "block_size": 8,
  212. # 2 for small prompt, 256//8 for generated.
  213. "num_gpu_blocks_override": 2 + 256 // 8,
  214. "max_model_len": (2 + 256 // 8) * 8,
  215. # Skip cuda graph recording for fast test.
  216. "enforce_eager": True,
  217. # Required for spec decode.
  218. "use_v2_block_manager": True,
  219. # Precision
  220. "dtype": PRECISION,
  221. # Main model
  222. "model_name": MAIN_MODEL,
  223. }])
  224. @pytest.mark.parametrize("per_test_common_llm_kwargs", [{}])
  225. @pytest.mark.parametrize("baseline_llm_kwargs", [{}])
  226. @pytest.mark.parametrize("test_llm_kwargs", [
  227. {
  228. "speculative_model": SPEC_MODEL,
  229. },
  230. ])
  231. @pytest.mark.parametrize(
  232. "output_len",
  233. [
  234. # Use small output len for fast test.
  235. 128,
  236. ])
  237. @pytest.mark.parametrize("batch_size", [4])
  238. @pytest.mark.parametrize("seed", [1])
  239. def test_mlp_e2e_greedy_correctness_with_padding(
  240. aphrodite_runner, common_llm_kwargs, per_test_common_llm_kwargs,
  241. baseline_llm_kwargs, test_llm_kwargs, batch_size: int, output_len: int,
  242. seed: int):
  243. """Verify greedy equality when the vocab dimension is padded
  244. """
  245. # Default pad_to is 64, test model has vocab_size of 32000
  246. def patched_pad_vocab_size(vocab_size, pad_to=None):
  247. return pad_vocab_size(vocab_size, pad_to=32064)
  248. with patch(
  249. "aphrodite.modeling.layers.vocab_parallel_embedding.pad_vocab_size",
  250. patched_pad_vocab_size):
  251. run_equality_correctness_test(aphrodite_runner,
  252. common_llm_kwargs,
  253. per_test_common_llm_kwargs,
  254. baseline_llm_kwargs,
  255. test_llm_kwargs,
  256. batch_size,
  257. max_output_len=output_len,
  258. seed=seed,
  259. temperature=0.0)
  260. @pytest.mark.parametrize(
  261. "common_llm_kwargs",
  262. [{
  263. # Skip cuda graph recording for fast test.
  264. "enforce_eager": True,
  265. # Required for spec decode.
  266. "use_v2_block_manager": True,
  267. # Precision
  268. "dtype": PRECISION,
  269. # Main model
  270. "model_name": MAIN_MODEL,
  271. }])
  272. @pytest.mark.parametrize("per_test_common_llm_kwargs", [{}])
  273. @pytest.mark.parametrize("baseline_llm_kwargs", [{}])
  274. @pytest.mark.parametrize(
  275. "test_llm_kwargs",
  276. [
  277. {
  278. "speculative_model": SPEC_MODEL,
  279. "num_speculative_tokens": k,
  280. }
  281. # Try a range of num. speculative tokens
  282. for k in range(1, 1 + MAX_SPEC_TOKENS)
  283. ])
  284. @pytest.mark.parametrize("batch_size", [2])
  285. @pytest.mark.parametrize(
  286. "output_len",
  287. [
  288. # Use smaller output len for fast test.
  289. 32,
  290. ])
  291. @pytest.mark.parametrize("seed", [1])
  292. def test_mlp_different_k(aphrodite_runner, common_llm_kwargs,
  293. per_test_common_llm_kwargs, baseline_llm_kwargs,
  294. test_llm_kwargs, batch_size: int, seed: int,
  295. output_len: int):
  296. """Verify that mlp speculative decoding produces exact equality
  297. to without spec decode with different values of num_speculative_tokens.
  298. """
  299. run_equality_correctness_test(aphrodite_runner,
  300. common_llm_kwargs,
  301. per_test_common_llm_kwargs,
  302. baseline_llm_kwargs,
  303. test_llm_kwargs,
  304. batch_size,
  305. max_output_len=output_len,
  306. seed=seed,
  307. temperature=0.0)
  308. @pytest.mark.parametrize(
  309. "common_llm_kwargs",
  310. [{
  311. # Skip cuda graph recording for fast test.
  312. "enforce_eager": True,
  313. # Required for spec decode.
  314. "use_v2_block_manager": True,
  315. # Precision
  316. "dtype": PRECISION,
  317. # Main model
  318. "model_name": MAIN_MODEL,
  319. }])
  320. @pytest.mark.parametrize("per_test_common_llm_kwargs", [{}])
  321. @pytest.mark.parametrize("baseline_llm_kwargs", [{}])
  322. @pytest.mark.parametrize("test_llm_kwargs",
  323. [{
  324. "speculative_model": SPEC_MODEL,
  325. "speculative_disable_by_batch_size": 4
  326. }])
  327. @pytest.mark.parametrize("batch_size", [1, 5])
  328. @pytest.mark.parametrize(
  329. "output_len",
  330. [
  331. # Use smaller output len for fast test.
  332. 32,
  333. ])
  334. @pytest.mark.parametrize("seed", [1])
  335. def test_mlp_disable_queue(aphrodite_runner, common_llm_kwargs,
  336. per_test_common_llm_kwargs, baseline_llm_kwargs,
  337. test_llm_kwargs, batch_size: int, seed: int,
  338. output_len: int):
  339. """Verify that mlp speculative decoding produces exact equality
  340. to without spec decode when speculation is disabled for large
  341. batch sizes.
  342. """
  343. run_equality_correctness_test(aphrodite_runner,
  344. common_llm_kwargs,
  345. per_test_common_llm_kwargs,
  346. baseline_llm_kwargs,
  347. test_llm_kwargs,
  348. batch_size,
  349. max_output_len=output_len,
  350. seed=seed,
  351. temperature=0.0)