test_llava.py 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. from typing import List, Optional, Tuple, Type
  2. import pytest
  3. from transformers import AutoConfig, AutoTokenizer, BatchEncoding
  4. from aphrodite.common.sequence import SampleLogprobs
  5. from aphrodite.common.utils import STR_DTYPE_TO_TORCH_DTYPE
  6. from aphrodite.multimodal.utils import rescale_image_size
  7. from ..conftest import IMAGE_ASSETS, AphroditeRunner, HfRunner, _ImageAssets
  8. from .utils import check_logprobs_close
  9. pytestmark = pytest.mark.vlm
  10. HF_IMAGE_PROMPTS = IMAGE_ASSETS.prompts({
  11. "stop_sign":
  12. "USER: <image>\nWhat's the content of the image?\nASSISTANT:",
  13. "cherry_blossom":
  14. "USER: <image>\nWhat is the season?\nASSISTANT:",
  15. })
  16. models = [
  17. "llava-hf/llava-1.5-7b-hf",
  18. # TODO: Get this model to produce meaningful output in Aphrodite
  19. # "TIGER-Lab/Mantis-8B-siglip-llama3",
  20. ]
  21. def aphrodite_to_hf_output(aphrodite_output: Tuple[List[int], str,
  22. Optional[SampleLogprobs]],
  23. model: str):
  24. """Sanitize aphrodite output to be comparable with hf output."""
  25. output_ids, output_str, out_logprobs = aphrodite_output
  26. config = AutoConfig.from_pretrained(model)
  27. image_token_id = config.image_token_index
  28. tokenizer = AutoTokenizer.from_pretrained(model)
  29. eos_token_id = tokenizer.eos_token_id
  30. hf_output_ids = [
  31. token_id for idx, token_id in enumerate(output_ids)
  32. if token_id != image_token_id or output_ids[idx - 1] != image_token_id
  33. ]
  34. assert output_str[0] == " "
  35. hf_output_str = output_str[1:]
  36. if hf_output_ids[-1] == eos_token_id:
  37. hf_output_str = hf_output_str + tokenizer.decode(eos_token_id)
  38. return hf_output_ids, hf_output_str, out_logprobs
  39. def run_test(
  40. hf_runner: Type[HfRunner],
  41. aphrodite_runner: Type[AphroditeRunner],
  42. image_assets: _ImageAssets,
  43. model: str,
  44. *,
  45. size_factors: List[float],
  46. dtype: str,
  47. max_tokens: int,
  48. num_logprobs: int,
  49. tensor_parallel_size: int,
  50. distributed_executor_backend: Optional[str] = None,
  51. ):
  52. """Inference result should be the same between hf and aphrodite.
  53. All the image fixtures for the test is under tests/images.
  54. For huggingface runner, we provide the PIL images as input.
  55. For aphrodite runner, we provide MultiModalDataDict objects
  56. and corresponding MultiModalConfig as input.
  57. Note, the text input is also adjusted to abide by aphrodite contract.
  58. The text output is sanitized to be able to compare with hf.
  59. """
  60. # NOTE: For local use; this isn't tested in CI yet (see TODO above)
  61. if model.startswith("TIGER-Lab/Mantis"):
  62. from mantis.models.mllava import MLlavaProcessor
  63. torch_dtype = STR_DTYPE_TO_TORCH_DTYPE[dtype]
  64. mantis_processor = MLlavaProcessor.from_pretrained(
  65. model, torch_dtype=torch_dtype)
  66. assert isinstance(mantis_processor, MLlavaProcessor)
  67. else:
  68. mantis_processor = None
  69. images = [asset.pil_image for asset in image_assets]
  70. inputs_per_image = [(
  71. [prompt for _ in size_factors],
  72. [rescale_image_size(image, factor) for factor in size_factors],
  73. ) for image, prompt in zip(images, HF_IMAGE_PROMPTS)]
  74. # NOTE: take care of the order. run Aphrodite first, and then run HF.
  75. # Aphrodite needs a fresh new process without cuda initialization.
  76. # if we run HF first, the cuda initialization will be done and it
  77. # will hurt multiprocessing backend with fork method (the default method).
  78. # max_model_len should be greater than image_feature_size
  79. with aphrodite_runner(model,
  80. dtype=dtype,
  81. tensor_parallel_size=tensor_parallel_size,
  82. distributed_executor_backend=distributed_executor_backend,
  83. enforce_eager=True) as aphrodite_model:
  84. aphrodite_outputs_per_image = [
  85. aphrodite_model.generate_greedy_logprobs(prompts,
  86. max_tokens,
  87. num_logprobs=num_logprobs,
  88. images=images)
  89. for prompts, images in inputs_per_image
  90. ]
  91. if mantis_processor is not None:
  92. def process(hf_inputs: BatchEncoding):
  93. hf_inputs["pixel_values"] = hf_inputs["pixel_values"] \
  94. .to(torch_dtype) # type: ignore
  95. return hf_inputs
  96. else:
  97. def process(hf_inputs: BatchEncoding):
  98. return hf_inputs
  99. with hf_runner(model,
  100. dtype=dtype,
  101. postprocess_inputs=process,
  102. is_vision_model=True) as hf_model:
  103. hf_outputs_per_image = [
  104. hf_model.generate_greedy_logprobs_limit(prompts,
  105. max_tokens,
  106. num_logprobs=num_logprobs,
  107. images=images)
  108. for prompts, images in inputs_per_image
  109. ]
  110. for hf_outputs, aphrodite_outputs in zip(hf_outputs_per_image,
  111. aphrodite_outputs_per_image):
  112. # TODO: Check whether using original CLIPVisionModel can improve
  113. # consistency against HF
  114. check_logprobs_close(
  115. outputs_0_lst=hf_outputs,
  116. outputs_1_lst=[
  117. aphrodite_to_hf_output(aphrodite_output, model)
  118. for aphrodite_output in aphrodite_outputs
  119. ],
  120. name_0="hf",
  121. name_1="aphrodite",
  122. )
  123. @pytest.mark.parametrize("model", models)
  124. @pytest.mark.parametrize(
  125. "size_factors",
  126. [
  127. # No image
  128. [],
  129. # Single-scale
  130. [1.0],
  131. # Single-scale, batched
  132. [1.0, 1.0, 1.0],
  133. # Multi-scale
  134. [0.25, 0.5, 1.0],
  135. ],
  136. )
  137. @pytest.mark.parametrize("dtype", ["half"])
  138. @pytest.mark.parametrize("max_tokens", [128])
  139. @pytest.mark.parametrize("num_logprobs", [5])
  140. def test_models(hf_runner, aphrodite_runner, image_assets, model, size_factors,
  141. dtype: str, max_tokens: int, num_logprobs: int) -> None:
  142. run_test(
  143. hf_runner,
  144. aphrodite_runner,
  145. image_assets,
  146. model,
  147. size_factors=size_factors,
  148. dtype=dtype,
  149. max_tokens=max_tokens,
  150. num_logprobs=num_logprobs,
  151. tensor_parallel_size=1,
  152. )