1
0

test_danube3_4b.py 1.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253
  1. """Compare the outputs of HF and Aphrodite when using greedy sampling.
  2. This tests danube3 separately because its head size isn't supported on CPU yet.
  3. Run `pytest tests/models/test_danube3_4b.py`.
  4. """
  5. import pytest
  6. from .utils import check_outputs_equal
  7. MODELS = ["h2oai/h2o-danube3-4b-base"]
  8. target_dtype = "half"
  9. @pytest.mark.parametrize("model", MODELS)
  10. @pytest.mark.parametrize("dtype", [target_dtype])
  11. @pytest.mark.parametrize("max_tokens", [32])
  12. def test_models(
  13. hf_runner,
  14. aphrodite_runner,
  15. example_prompts,
  16. model: str,
  17. dtype: str,
  18. max_tokens: int,
  19. ) -> None:
  20. with hf_runner(model, dtype=dtype) as hf_model:
  21. hf_outputs = hf_model.generate_greedy(example_prompts, max_tokens)
  22. with aphrodite_runner(model, dtype=dtype) as aphrodite_model:
  23. aphrodite_outputs = aphrodite_model.generate_greedy(
  24. example_prompts, max_tokens)
  25. check_outputs_equal(
  26. outputs_0_lst=hf_outputs,
  27. outputs_1_lst=aphrodite_outputs,
  28. name_0="hf",
  29. name_1="aphrodite",
  30. )
  31. @pytest.mark.parametrize("model", MODELS)
  32. @pytest.mark.parametrize("dtype", [target_dtype])
  33. def test_model_print(
  34. aphrodite_runner,
  35. model: str,
  36. dtype: str,
  37. ) -> None:
  38. with aphrodite_runner(model, dtype=dtype) as aphrodite_model:
  39. # This test is for verifying whether the model's extra_repr
  40. # can be printed correctly.
  41. print(aphrodite_model.model.llm_engine.model_executor.driver_worker.
  42. model_runner.model)