test_llava.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315
  1. from typing import List, Optional, Tuple, Type, overload
  2. import pytest
  3. from transformers import (AutoConfig, AutoModelForVision2Seq, AutoTokenizer,
  4. BatchEncoding)
  5. from aphrodite.common.sequence import SampleLogprobs
  6. from aphrodite.common.utils import STR_DTYPE_TO_TORCH_DTYPE
  7. from aphrodite.multimodal.utils import rescale_image_size
  8. from ..conftest import (IMAGE_ASSETS, AphroditeRunner, HfRunner,
  9. PromptImageInput, _ImageAssets)
  10. from .utils import check_logprobs_close
  11. pytestmark = pytest.mark.vlm
  12. _LIMIT_IMAGE_PER_PROMPT = 4
  13. HF_IMAGE_PROMPTS = IMAGE_ASSETS.prompts({
  14. "stop_sign":
  15. "USER: <image>\nWhat's the content of the image?\nASSISTANT:",
  16. "cherry_blossom":
  17. "USER: <image>\nWhat is the season?\nASSISTANT:",
  18. })
  19. models = [
  20. "llava-hf/llava-1.5-7b-hf",
  21. # TODO: Get this model to produce meaningful output in Aphrodite
  22. # "TIGER-Lab/Mantis-8B-siglip-llama3",
  23. ]
  24. def aphrodite_to_hf_output(aphrodite_output: Tuple[List[int], str,
  25. Optional[SampleLogprobs]],
  26. model: str):
  27. """Sanitize aphrodite output to be comparable with hf output."""
  28. output_ids, output_str, out_logprobs = aphrodite_output
  29. config = AutoConfig.from_pretrained(model)
  30. image_token_id = config.image_token_index
  31. tokenizer = AutoTokenizer.from_pretrained(model)
  32. eos_token_id = tokenizer.eos_token_id
  33. hf_output_ids = [
  34. token_id for idx, token_id in enumerate(output_ids)
  35. if token_id != image_token_id or output_ids[idx - 1] != image_token_id
  36. ]
  37. assert output_str[0] == " "
  38. hf_output_str = output_str[1:]
  39. if hf_output_ids[-1] == eos_token_id:
  40. hf_output_str = hf_output_str + tokenizer.decode(eos_token_id)
  41. return hf_output_ids, hf_output_str, out_logprobs
  42. @overload
  43. def run_test(
  44. hf_runner: Type[HfRunner],
  45. aphrodite_runner: Type[AphroditeRunner],
  46. image_assets: _ImageAssets,
  47. model: str,
  48. *,
  49. size_factors: List[float],
  50. dtype: str,
  51. max_tokens: int,
  52. num_logprobs: int,
  53. tensor_parallel_size: int,
  54. distributed_executor_backend: Optional[str] = None,
  55. ):
  56. ...
  57. @overload
  58. def run_test(
  59. hf_runner: Type[HfRunner],
  60. aphrodite_runner: Type[AphroditeRunner],
  61. image_assets: _ImageAssets,
  62. model: str,
  63. *,
  64. sizes: List[Tuple[int, int]],
  65. dtype: str,
  66. max_tokens: int,
  67. num_logprobs: int,
  68. tensor_parallel_size: int,
  69. distributed_executor_backend: Optional[str] = None,
  70. ):
  71. ...
  72. def run_test(
  73. hf_runner: Type[HfRunner],
  74. aphrodite_runner: Type[AphroditeRunner],
  75. image_assets: _ImageAssets,
  76. model: str,
  77. *,
  78. size_factors: Optional[List[float]] = None,
  79. sizes: Optional[List[Tuple[int, int]]] = None,
  80. dtype: str,
  81. max_tokens: int,
  82. num_logprobs: int,
  83. tensor_parallel_size: int,
  84. distributed_executor_backend: Optional[str] = None,
  85. ):
  86. images = [asset.pil_image for asset in image_assets]
  87. if size_factors is not None:
  88. inputs_per_image = [(
  89. [prompt for _ in size_factors],
  90. [rescale_image_size(image, factor) for factor in size_factors],
  91. ) for image, prompt in zip(images, HF_IMAGE_PROMPTS)]
  92. elif sizes is not None:
  93. inputs_per_image = [(
  94. [prompt for _ in sizes],
  95. [image.resize(size) for size in sizes],
  96. ) for image, prompt in zip(images, HF_IMAGE_PROMPTS)]
  97. else:
  98. raise ValueError("You must provide either `size_factors` or `sizes`")
  99. _run_test(hf_runner,
  100. aphrodite_runner,
  101. inputs_per_image,
  102. model,
  103. dtype=dtype,
  104. max_tokens=max_tokens,
  105. num_logprobs=num_logprobs,
  106. tensor_parallel_size=tensor_parallel_size,
  107. distributed_executor_backend=distributed_executor_backend)
  108. def _run_test(
  109. hf_runner: Type[HfRunner],
  110. aphrodite_runner: Type[AphroditeRunner],
  111. inputs: List[Tuple[List[str], PromptImageInput]],
  112. model: str,
  113. *,
  114. dtype: str,
  115. max_tokens: int,
  116. num_logprobs: int,
  117. tensor_parallel_size: int,
  118. distributed_executor_backend: Optional[str] = None,
  119. ):
  120. """Inference result should be the same between hf and aphrodite.
  121. All the image fixtures for the test is under tests/images.
  122. For huggingface runner, we provide the PIL images as input.
  123. For aphrodite runner, we provide MultiModalDataDict objects
  124. and corresponding MultiModalConfig as input.
  125. Note, the text input is also adjusted to abide by aphrodite contract.
  126. The text output is sanitized to be able to compare with hf.
  127. """
  128. # NOTE: For local use; this isn't tested in CI yet (see TODO above)
  129. if model.startswith("TIGER-Lab/Mantis"):
  130. from mantis.models.mllava import MLlavaProcessor
  131. torch_dtype = STR_DTYPE_TO_TORCH_DTYPE[dtype]
  132. mantis_processor = MLlavaProcessor.from_pretrained(
  133. model, torch_dtype=torch_dtype)
  134. assert isinstance(mantis_processor, MLlavaProcessor)
  135. else:
  136. mantis_processor = None
  137. # NOTE: take care of the order. run Aphrodite first, and then run HF.
  138. # Aphrodite needs a fresh new process without cuda initialization.
  139. # if we run HF first, the cuda initialization will be done and it
  140. # will hurt multiprocessing backend with fork method (the default method).
  141. # max_model_len should be greater than image_feature_size
  142. with aphrodite_runner(model,
  143. dtype=dtype,
  144. max_model_len=4096,
  145. tensor_parallel_size=tensor_parallel_size,
  146. distributed_executor_backend=distributed_executor_backend,
  147. enforce_eager=True,
  148. limit_mm_per_prompt={"image": _LIMIT_IMAGE_PER_PROMPT
  149. }) as aphrodite_model:
  150. aphrodite_outputs_per_image = [
  151. aphrodite_model.generate_greedy_logprobs(prompts,
  152. max_tokens,
  153. num_logprobs=num_logprobs,
  154. images=images)
  155. for prompts, images in inputs
  156. ]
  157. if mantis_processor is not None:
  158. def process(hf_inputs: BatchEncoding):
  159. hf_inputs["pixel_values"] = hf_inputs["pixel_values"] \
  160. .to(torch_dtype) # type: ignore
  161. return hf_inputs
  162. else:
  163. def process(hf_inputs: BatchEncoding):
  164. return hf_inputs
  165. with hf_runner(model,
  166. dtype=dtype,
  167. postprocess_inputs=process,
  168. auto_cls=AutoModelForVision2Seq) as hf_model:
  169. hf_outputs_per_image = [
  170. hf_model.generate_greedy_logprobs_limit(prompts,
  171. max_tokens,
  172. num_logprobs=num_logprobs,
  173. images=images)
  174. for prompts, images in inputs
  175. ]
  176. for hf_outputs, aphrodite_outputs in zip(hf_outputs_per_image,
  177. aphrodite_outputs_per_image):
  178. # TODO: Check whether using original CLIPVisionModel can improve
  179. # consistency against HF
  180. check_logprobs_close(
  181. outputs_0_lst=hf_outputs,
  182. outputs_1_lst=[
  183. aphrodite_to_hf_output(aphrodite_output, model)
  184. for aphrodite_output in aphrodite_outputs
  185. ],
  186. name_0="hf",
  187. name_1="aphrodite",
  188. )
  189. @pytest.mark.parametrize("model", models)
  190. @pytest.mark.parametrize(
  191. "size_factors",
  192. [
  193. # No image
  194. [],
  195. # Single-scale
  196. [1.0],
  197. # Single-scale, batched
  198. [1.0, 1.0, 1.0],
  199. # Multi-scale
  200. [0.25, 0.5, 1.0],
  201. ],
  202. )
  203. @pytest.mark.parametrize("dtype", ["half"])
  204. @pytest.mark.parametrize("max_tokens", [128])
  205. @pytest.mark.parametrize("num_logprobs", [5])
  206. def test_models(hf_runner, aphrodite_runner, image_assets, model, size_factors,
  207. dtype: str, max_tokens: int, num_logprobs: int) -> None:
  208. run_test(
  209. hf_runner,
  210. aphrodite_runner,
  211. image_assets,
  212. model,
  213. size_factors=size_factors,
  214. dtype=dtype,
  215. max_tokens=max_tokens,
  216. num_logprobs=num_logprobs,
  217. tensor_parallel_size=1,
  218. )
  219. @pytest.mark.parametrize("model", models)
  220. @pytest.mark.parametrize("dtype", ["half"])
  221. @pytest.mark.parametrize("max_tokens", [128])
  222. @pytest.mark.parametrize("num_logprobs", [5])
  223. def test_models_multiple_image_inputs(hf_runner, aphrodite_runner, image_assets,
  224. model, dtype, max_tokens,
  225. num_logprobs) -> None:
  226. stop_sign = image_assets[0].pil_image
  227. cherry_blossom = image_assets[1].pil_image
  228. inputs = [(
  229. [
  230. "USER: <image><image>\nDescribe 2 images.\nASSISTANT:",
  231. "USER: <image><image>\nDescribe 2 images.\nASSISTANT:",
  232. "USER: <image><image><image><image>\nDescribe 4 images.\nASSISTANT:", # noqa: E501
  233. "USER: <image>\nWhat is the season?\nASSISTANT:",
  234. ],
  235. [
  236. [stop_sign, cherry_blossom],
  237. # Images with different sizes and aspect-ratios
  238. [
  239. rescale_image_size(stop_sign, 0.1),
  240. stop_sign,
  241. ],
  242. [
  243. stop_sign,
  244. rescale_image_size(stop_sign, 0.25),
  245. cherry_blossom.resize((183, 488)),
  246. cherry_blossom.resize((488, 183))
  247. ],
  248. cherry_blossom,
  249. ])]
  250. _run_test(
  251. hf_runner,
  252. aphrodite_runner,
  253. inputs,
  254. model,
  255. dtype=dtype,
  256. max_tokens=max_tokens,
  257. num_logprobs=num_logprobs,
  258. tensor_parallel_size=1,
  259. )
  260. @pytest.mark.parametrize("model", models)
  261. def test_context_length_too_short(aphrodite_runner, image_assets, model):
  262. images = [asset.pil_image for asset in image_assets]
  263. with pytest.raises(ValueError, match="too long to fit into the model"):
  264. aphrodite_model = aphrodite_runner(
  265. model,
  266. max_model_len=128, # LLaVA has a feature size of 576
  267. enforce_eager=True,
  268. )
  269. with aphrodite_model:
  270. aphrodite_model.generate_greedy([HF_IMAGE_PROMPTS[0]],
  271. max_tokens=1,
  272. images=[images[0]])