1
0

test_llava_image_embeds.py 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. from typing import List, Optional, Tuple, Type
  2. import pytest
  3. from transformers import AutoConfig, AutoTokenizer
  4. from aphrodite.common.sequence import SampleLogprobs
  5. from ..conftest import IMAGE_ASSETS, AphroditeRunner, HfRunner, _ImageAssets
  6. from .utils import check_logprobs_close
  7. pytestmark = pytest.mark.vlm
  8. HF_IMAGE_PROMPTS = IMAGE_ASSETS.prompts({
  9. "stop_sign":
  10. "USER: <image>\nWhat's the content of the image?\nASSISTANT:",
  11. "cherry_blossom":
  12. "USER: <image>\nWhat is the season?\nASSISTANT:",
  13. })
  14. models = [
  15. "llava-hf/llava-1.5-7b-hf",
  16. ]
  17. def aphrodite_to_hf_output(aphrodite_output: Tuple[List[int], str,
  18. Optional[SampleLogprobs]],
  19. model: str):
  20. """Sanitize aphrodite output to be comparable with hf output."""
  21. output_ids, output_str, out_logprobs = aphrodite_output
  22. config = AutoConfig.from_pretrained(model)
  23. image_token_id = config.image_token_index
  24. tokenizer = AutoTokenizer.from_pretrained(model)
  25. eos_token_id = tokenizer.eos_token_id
  26. hf_output_ids = [
  27. token_id for idx, token_id in enumerate(output_ids)
  28. if token_id != image_token_id or output_ids[idx - 1] != image_token_id
  29. ]
  30. assert output_str[0] == " "
  31. hf_output_str = output_str[1:]
  32. if hf_output_ids[-1] == eos_token_id:
  33. hf_output_str = hf_output_str + tokenizer.decode(eos_token_id)
  34. return hf_output_ids, hf_output_str, out_logprobs
  35. def run_test(
  36. hf_runner: Type[HfRunner],
  37. aphrodite_runner: Type[AphroditeRunner],
  38. image_assets: _ImageAssets,
  39. model: str,
  40. *,
  41. size_factors: List[float],
  42. dtype: str,
  43. max_tokens: int,
  44. num_logprobs: int,
  45. tensor_parallel_size: int,
  46. distributed_executor_backend: Optional[str] = None,
  47. ):
  48. """Inference result should be the same between hf and aphrodite.
  49. All the image fixtures for the test is under tests/images.
  50. For huggingface runner, we provide the PIL images as input.
  51. For aphrodite runner, we provide MultiModalDataDict objects
  52. and corresponding vision language config as input.
  53. Note, the text input is also adjusted to abide by aphrodite contract.
  54. The text output is sanitized to be able to compare with hf.
  55. """
  56. # Aphrodite to load from image embeddings
  57. aphrodite_images = [asset.image_embeds for asset in image_assets]
  58. # transformers to load from PIL images
  59. hf_images = [asset.pil_image for asset in image_assets]
  60. aphrodite_inputs_per_image = [(
  61. [prompt for _ in size_factors],
  62. [image for _ in size_factors],
  63. ) for image, prompt in zip(aphrodite_images, HF_IMAGE_PROMPTS)]
  64. hf_inputs_per_image = [(
  65. [prompt for _ in size_factors],
  66. [image for _ in size_factors],
  67. ) for image, prompt in zip(hf_images, HF_IMAGE_PROMPTS)]
  68. # NOTE: take care of the order. run Aphrodite first, and then run HF.
  69. # Aphrodite needs a fresh new process without cuda initialization.
  70. # if we run HF first, the cuda initialization will be done and it
  71. # will hurt multiprocessing backend with fork method (the default method).
  72. # max_model_len should be greater than image_feature_size
  73. with aphrodite_runner(model,
  74. dtype=dtype,
  75. tensor_parallel_size=tensor_parallel_size,
  76. distributed_executor_backend=distributed_executor_backend,
  77. enforce_eager=True) as aphrodite_model:
  78. aphrodite_outputs_per_image = [
  79. aphrodite_model.generate_greedy_logprobs(prompts,
  80. max_tokens,
  81. num_logprobs=num_logprobs,
  82. images=images)
  83. for prompts, images in aphrodite_inputs_per_image
  84. ]
  85. with hf_runner(model, dtype=dtype, is_vision_model=True) as hf_model:
  86. hf_outputs_per_image = [
  87. hf_model.generate_greedy_logprobs_limit(prompts,
  88. max_tokens,
  89. num_logprobs=num_logprobs,
  90. images=images)
  91. for prompts, images in hf_inputs_per_image
  92. ]
  93. for hf_outputs, aphrodite_outputs in zip(hf_outputs_per_image,
  94. aphrodite_outputs_per_image):
  95. # TODO: Check whether using original CLIPVisionModel can improve
  96. # consistency against HF
  97. check_logprobs_close(
  98. outputs_0_lst=hf_outputs,
  99. outputs_1_lst=[
  100. aphrodite_to_hf_output(aphrodite_output, model)
  101. for aphrodite_output in aphrodite_outputs
  102. ],
  103. name_0="hf",
  104. name_1="aphrodite",
  105. )
  106. @pytest.mark.parametrize("model", models)
  107. @pytest.mark.parametrize(
  108. "size_factors",
  109. [
  110. # No image
  111. [],
  112. # Single-scale
  113. [1.0],
  114. # Single-scale, batched
  115. [1.0, 1.0, 1.0],
  116. ],
  117. )
  118. @pytest.mark.parametrize("dtype", ["half"])
  119. @pytest.mark.parametrize("max_tokens", [128])
  120. @pytest.mark.parametrize("num_logprobs", [5])
  121. def test_models(hf_runner, aphrodite_runner, image_assets, model, size_factors,
  122. dtype: str, max_tokens: int, num_logprobs: int) -> None:
  123. run_test(
  124. hf_runner,
  125. aphrodite_runner,
  126. image_assets,
  127. model,
  128. size_factors=size_factors,
  129. dtype=dtype,
  130. max_tokens=max_tokens,
  131. num_logprobs=num_logprobs,
  132. tensor_parallel_size=1,
  133. )