test_chunked_prefill_distributed.py 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475
  1. """Compare the outputs of HF and distributed Aphrodite when using greedy
  2. sampling.
  3. Run:
  4. ```sh
  5. pytest test_chunked_prefill_distributed.py
  6. ```
  7. """
  8. import os
  9. import pytest
  10. from aphrodite.common.utils import cuda_device_count_stateless
  11. from ..models.utils import check_outputs_equal
  12. from ..utils import fork_new_process_for_each_test
  13. @pytest.mark.skipif(cuda_device_count_stateless() < 2,
  14. reason="Need at least 2 GPUs to run the test.")
  15. @pytest.mark.parametrize("model, distributed_executor_backend", [
  16. ("facebook/opt-125m", "ray"),
  17. ("meta-llama/Llama-2-7b-hf", "ray"),
  18. ("facebook/opt-125m", "mp"),
  19. ("meta-llama/Llama-2-7b-hf", "mp"),
  20. ])
  21. @fork_new_process_for_each_test
  22. def test_models(
  23. hf_runner,
  24. aphrodite_runner,
  25. example_prompts,
  26. model: str,
  27. distributed_executor_backend: str,
  28. ) -> None:
  29. if model == "meta-llama/Llama-2-7b-hf" and distributed_executor_backend == "ray": # noqa
  30. assert distributed_executor_backend == "ray"
  31. os.environ["APHRODITE_USE_RAY_SPMD_WORKER"] = "1"
  32. os.environ["APHRODITE_USE_RAY_COMPILED_DAG"] = "1"
  33. dtype = "half"
  34. max_tokens = 5
  35. chunked_prefill_token_size = 16
  36. # Add a chunked prefill config.
  37. max_num_seqs = min(chunked_prefill_token_size, 256)
  38. assert chunked_prefill_token_size != -1
  39. enable_chunked_prefill = True
  40. max_num_batched_tokens = chunked_prefill_token_size
  41. # NOTE: take care of the order. run Aphrodite first, and then run HF.
  42. # Aphrodite needs a fresh new process without cuda initialization.
  43. # if we run HF first, the cuda initialization will be done and it
  44. # will hurt multiprocessing backend with fork method (the default method).
  45. with aphrodite_runner(
  46. model,
  47. dtype=dtype,
  48. tensor_parallel_size=2,
  49. max_num_seqs=max_num_seqs,
  50. enable_chunked_prefill=enable_chunked_prefill,
  51. max_num_batched_tokens=max_num_batched_tokens,
  52. distributed_executor_backend=distributed_executor_backend,
  53. ) as aphrodite_model:
  54. aphrodite_outputs = aphrodite_model.generate_greedy(
  55. example_prompts, max_tokens)
  56. with hf_runner(model, dtype=dtype) as hf_model:
  57. hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens)
  58. check_outputs_equal(
  59. outputs_0_lst=hf_outputs,
  60. outputs_1_lst=aphrodite_outputs,
  61. name_0="hf",
  62. name_1="aphrodite",
  63. )