test_chunked_prefill.py 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
  1. """Compare the outputs of HF and Aphrodite when using greedy sampling.
  2. It tests chunked prefill. Chunked prefill can be enabled by
  3. enable_chunked_prefill=True. If prefill size exceeds max_num_batched_tokens,
  4. prefill requests are chunked.
  5. Run `pytest tests/models/test_chunked_prefill.py`.
  6. """
  7. from contextlib import nullcontext
  8. import pytest
  9. from ..models.utils import check_logprobs_close, check_outputs_equal
  10. MODELS = [
  11. "facebook/opt-125m",
  12. "meta-llama/Llama-2-7b-hf",
  13. ]
  14. E5M2_KV_MODELS = [
  15. "facebook/opt-125m",
  16. "meta-llama/Llama-2-7b-chat-hf",
  17. ]
  18. E4M3_KV_MODELS = [
  19. "meta-llama/Llama-2-7b-chat-hf", "nm-testing/Qwen2-1.5B-Instruct-FP8-K-V",
  20. "nm-testing/TinyLlama-1.1B-compressed-tensors-kv-cache-scheme"
  21. ]
  22. KV_CACHE_QUANTIZATION_PATHS = {
  23. "meta-llama/Llama-2-7b-chat-hf":
  24. "./tests/fp8_kv/llama2-7b-fp8-kv/kv_cache_scales.json"
  25. }
  26. @pytest.mark.parametrize("model", MODELS)
  27. @pytest.mark.parametrize("dtype", ["half"])
  28. @pytest.mark.parametrize("max_tokens", [32])
  29. @pytest.mark.parametrize("chunked_prefill_token_size", [1, 4, 16])
  30. @pytest.mark.parametrize("enforce_eager", [False, True])
  31. # NOTE: Increasing this in this suite will fail CI because we currently cannot
  32. # reset distributed env properly. Use a value > 1 just when you test.
  33. @pytest.mark.parametrize("tensor_parallel_size", [1])
  34. def test_models(
  35. hf_runner,
  36. aphrodite_runner,
  37. example_prompts,
  38. model: str,
  39. dtype: str,
  40. max_tokens: int,
  41. chunked_prefill_token_size: int,
  42. enforce_eager: bool,
  43. tensor_parallel_size: int,
  44. ) -> None:
  45. """
  46. Checks exact match decode between huggingface model and aphrodite runner
  47. with chunked prefill.
  48. """
  49. max_num_seqs = chunked_prefill_token_size
  50. max_num_batched_tokens = chunked_prefill_token_size
  51. with hf_runner(model, dtype=dtype) as hf_model:
  52. hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens)
  53. with aphrodite_runner(
  54. model,
  55. dtype=dtype,
  56. max_num_batched_tokens=max_num_batched_tokens,
  57. enable_chunked_prefill=True,
  58. tensor_parallel_size=tensor_parallel_size,
  59. enforce_eager=enforce_eager,
  60. max_num_seqs=max_num_seqs,
  61. ) as aphrodite_model:
  62. aphrodite_outputs = aphrodite_model.generate_greedy(example_prompts,
  63. max_tokens)
  64. check_outputs_equal(
  65. outputs_0_lst=hf_outputs,
  66. outputs_1_lst=aphrodite_outputs,
  67. name_0="hf",
  68. name_1="aphrodite",
  69. )
  70. @pytest.mark.parametrize("kv_cache_dtype,model",
  71. [("fp8_e5m2", m)
  72. for m in E5M2_KV_MODELS] + [("fp8_e4m3", m)
  73. for m in E4M3_KV_MODELS])
  74. # Due to low-precision numerical divergence, we only test logprob of 4 tokens
  75. @pytest.mark.parametrize("max_tokens", [4])
  76. @pytest.mark.parametrize("chunked_prefill_token_size", [1, 4, 16])
  77. @pytest.mark.parametrize("enforce_eager", [False, True])
  78. # NOTE: Increasing this in this suite will fail CI because we currently cannot
  79. # reset distributed env properly. Use a value > 1 just when you test.
  80. @pytest.mark.parametrize("tensor_parallel_size", [1])
  81. # Due to low-precision numerical divergence, this test is too sensitive to
  82. # the async postprocessor
  83. @pytest.mark.parametrize("disable_async_output_proc", [True])
  84. def test_models_with_fp8_kv_cache(
  85. aphrodite_runner,
  86. example_prompts,
  87. kv_cache_dtype: str,
  88. model: str,
  89. max_tokens: int,
  90. chunked_prefill_token_size: int,
  91. enforce_eager: bool,
  92. tensor_parallel_size: int,
  93. disable_async_output_proc: bool,
  94. ) -> None:
  95. """
  96. Only checks log probs match between chunked-prefill and
  97. non-chunked-prefill version of Aphrodite model runner.
  98. This test is used when there is discrepancy in kernels
  99. / numerics (e.g. when using lower-precision types like FP8).
  100. """
  101. NUM_LOG_PROBS = 8
  102. if model == "facebook/opt-125m":
  103. pytest.skip(
  104. "#7378: CUDA illegal memory access (undiagnosed) facebook/opt-125m"
  105. )
  106. max_num_seqs = chunked_prefill_token_size
  107. max_num_batched_tokens = chunked_prefill_token_size
  108. extra_kwargs = {}
  109. if model in KV_CACHE_QUANTIZATION_PATHS:
  110. extra_kwargs["quantization_param_path"] = KV_CACHE_QUANTIZATION_PATHS[
  111. model]
  112. with aphrodite_runner(
  113. model,
  114. tensor_parallel_size=tensor_parallel_size,
  115. enforce_eager=enforce_eager,
  116. max_num_seqs=max_num_seqs,
  117. kv_cache_dtype=kv_cache_dtype,
  118. disable_async_output_proc=disable_async_output_proc,
  119. **extra_kwargs,
  120. ) as aphrodite_model:
  121. no_chunked_prefill_outputs = aphrodite_model.generate_greedy_logprobs(
  122. example_prompts, max_tokens, NUM_LOG_PROBS)
  123. with aphrodite_runner(
  124. model,
  125. max_num_batched_tokens=max_num_batched_tokens,
  126. enable_chunked_prefill=True,
  127. tensor_parallel_size=tensor_parallel_size,
  128. enforce_eager=enforce_eager,
  129. max_num_seqs=max_num_seqs,
  130. kv_cache_dtype=kv_cache_dtype,
  131. disable_async_output_proc=disable_async_output_proc,
  132. **extra_kwargs,
  133. ) as aphrodite_model:
  134. chunked_prefill_outputs = aphrodite_model.generate_greedy_logprobs(
  135. example_prompts, max_tokens, NUM_LOG_PROBS)
  136. check_logprobs_close(
  137. outputs_0_lst=no_chunked_prefill_outputs,
  138. outputs_1_lst=chunked_prefill_outputs,
  139. name_0="no_chunked_prefill",
  140. name_1="chunked_prefill",
  141. )
  142. @pytest.mark.parametrize("max_tokens", [16])
  143. @pytest.mark.parametrize("enforce_eager", [False])
  144. @pytest.mark.parametrize("chunk_size", [30, 32])
  145. @pytest.mark.parametrize("use_v2_block_manager", [False, True])
  146. # NOTE: Increasing this in this suite will fail CI because we currently cannot
  147. # reset distributed env properly. Use a value > 1 just when you test.
  148. @pytest.mark.parametrize("tensor_parallel_size", [1])
  149. def test_with_prefix_caching(
  150. aphrodite_runner,
  151. max_tokens: int,
  152. enforce_eager: bool,
  153. chunk_size: int,
  154. use_v2_block_manager: bool,
  155. tensor_parallel_size: int,
  156. ) -> None:
  157. """
  158. Checks exact match decode with and without prefix caching
  159. with chunked prefill enabled.
  160. """
  161. model = "meta-llama/Llama-2-7b-chat-hf"
  162. # The common prompt has 142 tokens with Llama-2 tokenizer.
  163. common_prompt = "You are a helpful AI assistant " * 20
  164. unique_prompts = [
  165. "Question", # Warmup
  166. "Question", # Fully cached
  167. "Another question", # Partial cached
  168. ]
  169. full_prompts = [f"{common_prompt}\n{p}" for p in unique_prompts]
  170. max_num_batched_tokens = max_num_seqs = chunk_size
  171. outputs = {} # type: ignore
  172. check_result = True
  173. for enable in (True, False):
  174. with aphrodite_runner(
  175. model,
  176. dtype="half",
  177. max_num_batched_tokens=max_num_batched_tokens,
  178. enable_chunked_prefill=True,
  179. enable_prefix_caching=enable,
  180. tensor_parallel_size=tensor_parallel_size,
  181. use_v2_block_manager=use_v2_block_manager,
  182. enforce_eager=enforce_eager,
  183. max_num_seqs=max_num_seqs,
  184. ) as aphrodite_model:
  185. # It should fail when prefix caching is enable and chunk
  186. # size is not a multiple of block size (16).
  187. should_fail = chunk_size % 16 != 0 and enable
  188. check_result &= not should_fail
  189. outputs[enable] = []
  190. # Send the request one-by-one to ensure the cache is populated.
  191. with pytest.raises(ValueError) if should_fail else nullcontext():
  192. for prompt in full_prompts:
  193. outputs[enable] += aphrodite_model.generate_greedy(
  194. [prompt], max_tokens)
  195. # Check results only if we did not expect a failure.
  196. if check_result:
  197. check_outputs_equal(
  198. outputs_0_lst=outputs[False],
  199. outputs_1_lst=outputs[True],
  200. name_0="w/o prefix caching",
  201. name_1="with prefix caching",
  202. )