test_integration_dist_tp4.py 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. """Tests which cover integration of the speculative decoding framework with
  2. tensor parallelism.
  3. """
  4. import pytest
  5. import torch
  6. from .conftest import run_greedy_equality_correctness_test
  7. @pytest.mark.skipif(torch.cuda.device_count() < 4,
  8. reason="Need at least 4 GPUs to run the test.")
  9. @pytest.mark.parametrize(
  10. "common_llm_kwargs",
  11. [{
  12. # Use a small model for a fast test.
  13. # Note this is repeated in the test body; to initialize a tokenizer.
  14. "model": "JackFram/llama-68m",
  15. # Skip cuda graph recording for fast test.
  16. "enforce_eager": True,
  17. # Required for spec decode.
  18. "use_v2_block_manager": True,
  19. "tensor_parallel_size": 4,
  20. # Use AsyncLLM engine, so that the engine runs in its own process.
  21. # Otherwise, since aphrodite does not follow true SPMD, the test runner
  22. # process will have both the engine and the rank0 worker. NCCL is not
  23. # cleaned up properly, and its server host thread leaks, causing the
  24. # second run of the test to fail with internal NCCL error.
  25. "use_async": True,
  26. }])
  27. @pytest.mark.parametrize("per_test_common_llm_kwargs", [
  28. {
  29. "speculative_model": "JackFram/llama-68m",
  30. "num_speculative_tokens": 5,
  31. },
  32. ])
  33. @pytest.mark.parametrize("baseline_llm_kwargs", [{}])
  34. @pytest.mark.parametrize(
  35. "test_llm_kwargs",
  36. [
  37. #TODO(wooyeon): add spec_draft_dp=2 case
  38. {
  39. "speculative_draft_tensor_parallel_size": 1,
  40. },
  41. ])
  42. @pytest.mark.parametrize("batch_size", [2])
  43. @pytest.mark.parametrize("seed", [1])
  44. def test_draft_model_tp_lt_target_model_tp4(test_llm_generator,
  45. baseline_llm_generator,
  46. batch_size: int):
  47. """Verify spec decode works well with smaller tp for draft models.
  48. """
  49. run_greedy_equality_correctness_test(baseline_llm_generator,
  50. test_llm_generator,
  51. batch_size,
  52. max_output_len=32,
  53. force_output_len=True)
  54. @pytest.mark.skipif(torch.cuda.device_count() < 4,
  55. reason="Need at least 4 GPUs to run the test.")
  56. @pytest.mark.parametrize(
  57. "common_llm_kwargs",
  58. [{
  59. "model": "JackFram/llama-160m",
  60. # Skip cuda graph recording for fast test.
  61. "enforce_eager": True,
  62. # Required for spec decode.
  63. "use_v2_block_manager": True,
  64. "tensor_parallel_size": 4,
  65. # Use AsyncLLM engine, so that the engine runs in its own process.
  66. # Otherwise, since aphrodite does not follow true SPMD, the test runner
  67. # process will have both the engine and the rank0 worker. NCCL is not
  68. # cleaned up properly, and its server host thread leaks, causing the
  69. # second run of the test to fail with internal NCCL error.
  70. "use_async": True,
  71. }])
  72. @pytest.mark.parametrize("per_test_common_llm_kwargs", [{}])
  73. @pytest.mark.parametrize("baseline_llm_kwargs", [{}])
  74. @pytest.mark.parametrize(
  75. "test_llm_kwargs",
  76. [
  77. {
  78. "speculative_model": "JackFram/llama-68m",
  79. "num_speculative_tokens": 5,
  80. # Artificially limit the draft model max model len; this forces
  81. # aphrodite to skip speculation once the sequences grow beyond
  82. # 32-k tokens.
  83. "speculative_max_model_len": 32,
  84. },
  85. ])
  86. @pytest.mark.parametrize("batch_size", [8])
  87. @pytest.mark.parametrize(
  88. "output_len",
  89. [
  90. # This must be a good bit larger than speculative_max_model_len so that
  91. # we can test the case where all seqs are skipped, but still small to
  92. # ensure fast test.
  93. 64,
  94. ])
  95. @pytest.mark.parametrize("seed", [1])
  96. def test_skip_speculation(baseline_llm_generator, test_llm_generator,
  97. batch_size: int, output_len: int):
  98. """Verify job failure with RuntimeError when all sequences skip speculation.
  99. We do this by setting the max model len of the draft model to an
  100. artificially low value, such that when the sequences grow beyond it, they
  101. are skipped in speculative decoding.
  102. TODO: fix it to pass without raising Error. (#5814)
  103. """
  104. with pytest.raises(RuntimeError):
  105. run_greedy_equality_correctness_test(baseline_llm_generator,
  106. test_llm_generator,
  107. batch_size,
  108. max_output_len=output_len,
  109. force_output_len=True)