test_chameleon.py 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. from typing import List, Optional, Type
  2. import pytest
  3. from transformers import BatchEncoding
  4. from aphrodite.common.utils import STR_DTYPE_TO_TORCH_DTYPE
  5. from aphrodite.multimodal.utils import rescale_image_size
  6. from ..conftest import IMAGE_ASSETS, AphroditeRunner, HfRunner, _ImageAssets
  7. from .utils import check_outputs_equal
  8. pytestmark = pytest.mark.vlm
  9. HF_IMAGE_PROMPTS = IMAGE_ASSETS.prompts({
  10. "stop_sign":
  11. "USER: <image>\nWhat's the content of the image?\nASSISTANT:",
  12. "cherry_blossom":
  13. "USER: <image>\nWhat is the season?\nASSISTANT:",
  14. })
  15. models = ["facebook/chameleon-7b"]
  16. def run_test(
  17. hf_runner: Type[HfRunner],
  18. aphrodite_runner: Type[AphroditeRunner],
  19. image_assets: _ImageAssets,
  20. model: str,
  21. *,
  22. size_factors: List[float],
  23. dtype: str,
  24. max_tokens: int,
  25. num_logprobs: int,
  26. tensor_parallel_size: int,
  27. distributed_executor_backend: Optional[str] = None,
  28. ):
  29. """Inference result should be the same between hf and aphrodite.
  30. All the image fixtures for the test is under tests/images.
  31. For huggingface runner, we provide the PIL images as input.
  32. For aphrodite runner, we provide MultiModalDataDict objects
  33. and corresponding vision language config as input.
  34. Note, the text input is also adjusted to abide by aphrodite contract.
  35. The text output is sanitized to be able to compare with hf.
  36. """
  37. torch_dtype = STR_DTYPE_TO_TORCH_DTYPE[dtype]
  38. images = [asset.pil_image for asset in image_assets]
  39. inputs_per_image = [(
  40. [prompt for _ in size_factors],
  41. [rescale_image_size(image, factor) for factor in size_factors],
  42. ) for image, prompt in zip(images, HF_IMAGE_PROMPTS)]
  43. with aphrodite_runner(model,
  44. max_model_len=4096,
  45. dtype=dtype,
  46. tensor_parallel_size=tensor_parallel_size,
  47. distributed_executor_backend=distributed_executor_backend,
  48. enforce_eager=True) as aphrodite_model:
  49. aphrodite_outputs_per_image = [
  50. aphrodite_model.generate_greedy_logprobs(prompts,
  51. max_tokens,
  52. num_logprobs=num_logprobs,
  53. images=images)
  54. for prompts, images in inputs_per_image
  55. ]
  56. def process(hf_inputs: BatchEncoding):
  57. hf_inputs["pixel_values"] = hf_inputs["pixel_values"] \
  58. .to(torch_dtype) # type: ignore
  59. return hf_inputs
  60. with hf_runner(model,
  61. dtype=dtype,
  62. postprocess_inputs=process,
  63. is_vision_model=True) as hf_model:
  64. hf_outputs_per_image = [
  65. hf_model.generate_greedy_logprobs_limit(prompts,
  66. max_tokens,
  67. num_logprobs=num_logprobs,
  68. images=images)
  69. for prompts, images in inputs_per_image
  70. ]
  71. for hf_outputs, aphrodite_outputs in zip(hf_outputs_per_image,
  72. aphrodite_outputs_per_image):
  73. # HF Logprobs include image tokens, unlike Aphrodite, so we don't
  74. # directly compare them
  75. check_outputs_equal(
  76. outputs_0_lst=[outputs[:2] for outputs in hf_outputs],
  77. outputs_1_lst=[outputs[:2] for outputs in aphrodite_outputs],
  78. name_0="hf",
  79. name_1="aphrodite",
  80. )
  81. @pytest.mark.parametrize("model", models)
  82. @pytest.mark.parametrize(
  83. "size_factors",
  84. [
  85. # No image
  86. [],
  87. # Single-scale
  88. [1.0],
  89. # Single-scale, batched
  90. [1.0, 1.0, 1.0],
  91. # Multi-scale
  92. [0.25, 0.5, 1.0],
  93. ],
  94. )
  95. @pytest.mark.parametrize("dtype", ["bfloat16"])
  96. @pytest.mark.parametrize("max_tokens", [8])
  97. @pytest.mark.parametrize("num_logprobs", [5])
  98. def test_models(hf_runner, aphrodite_runner, image_assets, model, size_factors,
  99. dtype, max_tokens, num_logprobs) -> None:
  100. run_test(
  101. hf_runner,
  102. aphrodite_runner,
  103. image_assets,
  104. model,
  105. size_factors=size_factors,
  106. dtype=dtype,
  107. max_tokens=max_tokens,
  108. num_logprobs=num_logprobs,
  109. tensor_parallel_size=1,
  110. )