test_basic_correctness.py 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. """Compare the short outputs of HF and Aphrodite when using greedy sampling.
  2. Run `pytest tests/basic_correctness/test_basic_correctness.py`.
  3. """
  4. import os
  5. import pickle
  6. import re
  7. import weakref
  8. from unittest.mock import patch
  9. import pytest
  10. from aphrodite import LLM
  11. from aphrodite.common.utils import is_hip
  12. from aphrodite.worker.model_runner import ModelInputForGPUWithSamplingMetadata
  13. from ..models.utils import check_outputs_equal
  14. from ..utils import multi_gpu_test
  15. MODELS = [
  16. "facebook/opt-125m",
  17. "meta-llama/Llama-2-7b-hf",
  18. ]
  19. TARGET_TEST_SUITE = os.environ.get("TARGET_TEST_SUITE", "L4")
  20. def test_aphrodite_gc_ed():
  21. """Verify aphrodite instance is GC'ed when it is deleted"""
  22. llm = LLM("facebook/opt-125m")
  23. weak_llm = weakref.ref(llm)
  24. del llm
  25. # If there's any circular reference to aphrodite, this fails
  26. # because llm instance is not GC'ed.
  27. assert weak_llm() is None
  28. @pytest.mark.parametrize("model", MODELS)
  29. @pytest.mark.parametrize("backend", ["FLASH_ATTN", "XFORMERS", "FLASHINFER"])
  30. @pytest.mark.parametrize("dtype", ["half"])
  31. @pytest.mark.parametrize("max_tokens", [5])
  32. @pytest.mark.parametrize("enforce_eager", [False, True])
  33. def test_models(
  34. hf_runner,
  35. aphrodite_runner,
  36. example_prompts,
  37. model: str,
  38. backend: str,
  39. dtype: str,
  40. max_tokens: int,
  41. enforce_eager: bool,
  42. ) -> None:
  43. if backend == "FLASHINFER" and is_hip():
  44. pytest.skip("Flashinfer does not support ROCm/HIP.")
  45. os.environ["APHRODITE_ATTENTION_BACKEND"] = backend
  46. with hf_runner(model, dtype=dtype) as hf_model:
  47. hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens)
  48. with aphrodite_runner(model,
  49. dtype=dtype,
  50. enforce_eager=enforce_eager,
  51. gpu_memory_utilization=0.7) as aphrodite_model:
  52. aphrodite_outputs = aphrodite_model.generate_greedy(
  53. example_prompts, max_tokens)
  54. check_outputs_equal(
  55. outputs_0_lst=hf_outputs,
  56. outputs_1_lst=aphrodite_outputs,
  57. name_0="hf",
  58. name_1="aphrodite",
  59. )
  60. @multi_gpu_test(num_gpus=2)
  61. @pytest.mark.parametrize(
  62. "model, distributed_executor_backend, attention_backend, "
  63. "test_suite", [
  64. ("facebook/opt-125m", "ray", "", "L4"),
  65. ("facebook/opt-125m", "mp", "", "L4"),
  66. ("meta-llama/Llama-2-7b-hf", "ray", "", "L4"),
  67. ("meta-llama/Llama-2-7b-hf", "mp", "", "L4"),
  68. ("facebook/opt-125m", "ray", "", "A100"),
  69. ("facebook/opt-125m", "mp", "", "A100"),
  70. ("facebook/opt-125m", "mp", "FLASHINFER", "A100"),
  71. ("meta-llama/Meta-Llama-3-8B", "ray", "FLASHINFER", "A100"),
  72. ])
  73. def test_models_distributed(
  74. hf_runner,
  75. aphrodite_runner,
  76. example_prompts,
  77. model: str,
  78. distributed_executor_backend: str,
  79. attention_backend: str,
  80. test_suite: str,
  81. ) -> None:
  82. if test_suite != TARGET_TEST_SUITE:
  83. pytest.skip(f"Skip test for {test_suite}")
  84. if model == "meta-llama/Llama-2-7b-hf" and distributed_executor_backend == "ray" and attention_backend == "" and test_suite == "L4": # noqa
  85. # test ray adag
  86. os.environ['APHRODITE_USE_RAY_SPMD_WORKER'] = "1"
  87. os.environ['APHRODITE_USE_RAY_COMPILED_DAG'] = "1"
  88. if attention_backend:
  89. os.environ["APHRODITE_ATTENTION_BACKEND"] = attention_backend
  90. dtype = "half"
  91. max_tokens = 5
  92. # NOTE: take care of the order. run Aphrodite first, and then run HF.
  93. # Aphrodite needs a fresh new process without cuda initialization.
  94. # if we run HF first, the cuda initialization will be done and it
  95. # will hurt multiprocessing backend with fork method (the default method).
  96. with aphrodite_runner(model,
  97. dtype=dtype,
  98. tensor_parallel_size=2,
  99. distributed_executor_backend=distributed_executor_backend
  100. ) as aphrodite_model:
  101. aphrodite_outputs = aphrodite_model.generate_greedy(
  102. example_prompts, max_tokens)
  103. with hf_runner(model, dtype=dtype) as hf_model:
  104. hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens)
  105. check_outputs_equal(
  106. outputs_0_lst=hf_outputs,
  107. outputs_1_lst=aphrodite_outputs,
  108. name_0="hf",
  109. name_1="aphrodite",
  110. )
  111. def test_model_with_failure(aphrodite_runner) -> None:
  112. try:
  113. with patch("aphrodite.modeling.models.opt.OPTForCausalLM.forward",
  114. side_effect=ValueError()):
  115. with pytest.raises(ValueError) as exc_info:
  116. aphrodite_runner("facebook/opt-125m",
  117. dtype="half",
  118. enforce_eager=False,
  119. gpu_memory_utilization=0.7)
  120. matches = re.search(r"input dumped to (.+).pkl",
  121. str(exc_info.value))
  122. assert matches is not None
  123. filename = f"{matches.group(1)}.pkl"
  124. with open(filename, "rb") as filep:
  125. inputs = pickle.load(filep)
  126. if any(key not in inputs for key in ("arg_1", "arg_2", "arg_3")):
  127. raise AssertionError("Missing keys in dumped inputs. Dumped keys: "
  128. f"{list(inputs.keys())}")
  129. assert isinstance(inputs["arg_1"],
  130. ModelInputForGPUWithSamplingMetadata)
  131. finally:
  132. os.remove(filename)