test_ngram_correctness.py 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. """This docstring details important information on the testing methodology.
  2. Most of the tests rely on "greedy equality", where we expect the output of
  3. speculative decoding on a sequence to exactly match the output of normal non-
  4. speculative decoding.
  5. Since speculative decoding with rejection sampling guarantees that the output
  6. distribution matches the target model's output distribution (up to hardware
  7. numerics, see https://arxiv.org/pdf/2302.01318.pdf), we can expect greedy
  8. equality.
  9. For ngram lookup, its idea comes from https://github.com/apoorvumang/prompt-lookup-decoding,
  10. and is merged into transform code base: https://github.com/huggingface/transformers/pull/27775.
  11. Since there is no model is needed for generate the proposal, we could make
  12. the testcase much simpler than drafter multi-step one.
  13. However, we still need to verify below scenario could be passed:
  14. * Batch size 1 greedy equality
  15. * Batch size >1 greedy equality
  16. * Test greedy equality under preemption
  17. * Test greedy equality under various ngram sizes / speculative sizes
  18. With those tests, we can say at least, ngram spec would not break the correctess
  19. for the target model outputs.
  20. """
  21. import pytest
  22. from .conftest import run_equality_correctness_test
  23. @pytest.mark.parametrize(
  24. "common_llm_kwargs",
  25. [{
  26. # Skip cuda graph recording for fast test.
  27. "enforce_eager": True,
  28. # Required for spec decode.
  29. "use_v2_block_manager": True,
  30. # Print spec metrics.
  31. "disable_log_stats": False,
  32. }])
  33. @pytest.mark.parametrize("per_test_common_llm_kwargs", [
  34. {
  35. "model_name": "JackFram/llama-68m",
  36. },
  37. ])
  38. @pytest.mark.parametrize("baseline_llm_kwargs", [{}])
  39. @pytest.mark.parametrize("test_llm_kwargs", [
  40. {
  41. "speculative_model": "[ngram]",
  42. "num_speculative_tokens": 5,
  43. "ngram_prompt_lookup_max": 3,
  44. },
  45. ])
  46. @pytest.mark.parametrize("output_len", [
  47. 256,
  48. ])
  49. @pytest.mark.parametrize("batch_size", [1, 32])
  50. @pytest.mark.parametrize("seed", [1])
  51. def test_ngram_e2e_greedy_correctness(aphrodite_runner, common_llm_kwargs,
  52. per_test_common_llm_kwargs,
  53. baseline_llm_kwargs, test_llm_kwargs,
  54. batch_size: int, output_len: int,
  55. seed: int):
  56. """Verify greedy equality on a tiny model with different batch size."""
  57. run_equality_correctness_test(aphrodite_runner,
  58. common_llm_kwargs,
  59. per_test_common_llm_kwargs,
  60. baseline_llm_kwargs,
  61. test_llm_kwargs,
  62. batch_size,
  63. max_output_len=output_len,
  64. seed=seed,
  65. temperature=0.0)
  66. @pytest.mark.parametrize(
  67. "common_llm_kwargs",
  68. [{
  69. "block_size": 8,
  70. # 2 for small prompt, 256//8 for generated.
  71. "num_gpu_blocks_override": 2 + 256 // 8,
  72. "max_model_len": (2 + 256 // 8) * 8,
  73. # Skip cuda graph recording for fast test.
  74. "enforce_eager": True,
  75. # Required for spec decode.
  76. "use_v2_block_manager": True
  77. }])
  78. @pytest.mark.parametrize("per_test_common_llm_kwargs", [
  79. {
  80. "model_name": "JackFram/llama-160m",
  81. },
  82. ])
  83. @pytest.mark.parametrize("baseline_llm_kwargs", [{}])
  84. @pytest.mark.parametrize("test_llm_kwargs", [
  85. {
  86. "speculative_model": "[ngram]",
  87. "num_speculative_tokens": 5,
  88. "ngram_prompt_lookup_max": 3,
  89. },
  90. ])
  91. @pytest.mark.parametrize(
  92. "output_len",
  93. [
  94. # Use small output len for fast test.
  95. 256,
  96. ])
  97. @pytest.mark.parametrize("batch_size", [4])
  98. @pytest.mark.parametrize("seed", [1])
  99. def test_ngram_e2e_greedy_correctness_with_preemption(
  100. aphrodite_runner, common_llm_kwargs, per_test_common_llm_kwargs,
  101. baseline_llm_kwargs, test_llm_kwargs, batch_size: int, output_len: int,
  102. seed: int):
  103. """Verify greedy equality, even when some sequences are preempted mid-
  104. generation.
  105. """
  106. run_equality_correctness_test(aphrodite_runner,
  107. common_llm_kwargs,
  108. per_test_common_llm_kwargs,
  109. baseline_llm_kwargs,
  110. test_llm_kwargs,
  111. batch_size,
  112. max_output_len=output_len,
  113. temperature=0,
  114. seed=seed)
  115. @pytest.mark.parametrize(
  116. "common_llm_kwargs",
  117. [{
  118. "model_name": "JackFram/llama-68m",
  119. # Skip cuda graph recording for fast test.
  120. "enforce_eager": True,
  121. # Required for spec decode.
  122. "use_v2_block_manager": True
  123. }])
  124. @pytest.mark.parametrize("per_test_common_llm_kwargs", [{}])
  125. @pytest.mark.parametrize("baseline_llm_kwargs", [{}])
  126. @pytest.mark.parametrize(
  127. "test_llm_kwargs",
  128. [
  129. {
  130. "speculative_model": "[ngram]",
  131. "num_speculative_tokens": k,
  132. "ngram_prompt_lookup_max": 3,
  133. }
  134. # Try a range of common k, as well as large speculation.
  135. for k in [1, 3, 5]
  136. ] + [
  137. {
  138. "speculative_model": "[ngram]",
  139. "num_speculative_tokens": k,
  140. "ngram_prompt_lookup_max": 1,
  141. }
  142. # Try a range of common k, as well as large speculation.
  143. for k in [1, 3, 5]
  144. ])
  145. @pytest.mark.parametrize("batch_size", [2])
  146. @pytest.mark.parametrize(
  147. "output_len",
  148. [
  149. # Use smaller output len for fast test.
  150. 32,
  151. ])
  152. @pytest.mark.parametrize("seed", [1])
  153. def test_ngram_different_k(aphrodite_runner, common_llm_kwargs,
  154. per_test_common_llm_kwargs, baseline_llm_kwargs,
  155. test_llm_kwargs, batch_size: int, output_len: int,
  156. seed: int):
  157. """Verify that ngram speculative decoding produces exact equality
  158. to without spec decode with many different values of k and
  159. different ngram_prompt_lookup_max.
  160. """
  161. run_equality_correctness_test(aphrodite_runner,
  162. common_llm_kwargs,
  163. per_test_common_llm_kwargs,
  164. baseline_llm_kwargs,
  165. test_llm_kwargs,
  166. batch_size,
  167. max_output_len=output_len,
  168. seed=seed,
  169. temperature=0.0)
  170. @pytest.mark.parametrize(
  171. "common_llm_kwargs",
  172. [{
  173. "model_name": "JackFram/llama-68m",
  174. # Skip cuda graph recording for fast test.
  175. "enforce_eager": True,
  176. # Required for spec decode.
  177. "use_v2_block_manager": True
  178. }])
  179. @pytest.mark.parametrize("per_test_common_llm_kwargs", [{}])
  180. @pytest.mark.parametrize("baseline_llm_kwargs", [{}])
  181. @pytest.mark.parametrize("test_llm_kwargs",
  182. [{
  183. "speculative_model": "[ngram]",
  184. "num_speculative_tokens": 5,
  185. "ngram_prompt_lookup_max": 3,
  186. "speculative_disable_by_batch_size": 4
  187. }])
  188. @pytest.mark.parametrize("batch_size", [1, 5])
  189. @pytest.mark.parametrize(
  190. "output_len",
  191. [
  192. # Use smaller output len for fast test.
  193. 32,
  194. ])
  195. @pytest.mark.parametrize("seed", [1])
  196. def test_ngram_disable_queue(aphrodite_runner, common_llm_kwargs,
  197. per_test_common_llm_kwargs, baseline_llm_kwargs,
  198. test_llm_kwargs, batch_size: int, output_len: int,
  199. seed: int):
  200. """Verify that ngram speculative decoding produces exact equality
  201. to without spec decode with many different values of k and
  202. different ngram_prompt_lookup_max.
  203. """
  204. run_equality_correctness_test(aphrodite_runner,
  205. common_llm_kwargs,
  206. per_test_common_llm_kwargs,
  207. baseline_llm_kwargs,
  208. test_llm_kwargs,
  209. batch_size,
  210. max_output_len=output_len,
  211. seed=seed,
  212. temperature=0.0)