test_danube3_4b.py 1.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152
  1. """Compare the outputs of HF and Aphrodite when using greedy sampling.
  2. This tests danube3 separately because its head size isn't supported on CPU yet.
  3. Run `pytest tests/models/test_danube3_4b.py`.
  4. """
  5. import pytest
  6. from ...utils import check_outputs_equal
  7. MODELS = ["h2oai/h2o-danube3-4b-base"]
  8. target_dtype = "half"
  9. @pytest.mark.parametrize("model", MODELS)
  10. @pytest.mark.parametrize("dtype", [target_dtype])
  11. @pytest.mark.parametrize("max_tokens", [32])
  12. def test_models(
  13. hf_runner,
  14. aphrodite_runner,
  15. example_prompts,
  16. model: str,
  17. dtype: str,
  18. max_tokens: int,
  19. ) -> None:
  20. with hf_runner(model, dtype=dtype) as hf_model:
  21. hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens)
  22. with aphrodite_runner(model, dtype=dtype) as aphrodite_model:
  23. aphrodite_outputs = aphrodite_model.generate_greedy(example_prompts, max_tokens)
  24. check_outputs_equal(
  25. outputs_0_lst=hf_outputs,
  26. outputs_1_lst=aphrodite_outputs,
  27. name_0="hf",
  28. name_1="aphrodite",
  29. )
  30. @pytest.mark.parametrize("model", MODELS)
  31. @pytest.mark.parametrize("dtype", [target_dtype])
  32. def test_model_print(
  33. aphrodite_runner,
  34. model: str,
  35. dtype: str,
  36. ) -> None:
  37. with aphrodite_runner(model, dtype=dtype) as aphrodite_model:
  38. # This test is for verifying whether the model's extra_repr
  39. # can be printed correctly.
  40. print(aphrodite_model.model.llm_engine.model_executor.driver_worker.
  41. model_runner.model)