test_qwen.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401
  1. import pathlib
  2. from typing import Dict, List, Optional, Tuple, Type, Union
  3. import pytest
  4. import torch
  5. from PIL.Image import Image
  6. from aphrodite.common.config import ModelConfig
  7. from aphrodite.inputs import InputContext, LLMInputs
  8. from aphrodite.multimodal.base import MultiModalInputs
  9. from aphrodite.multimodal.utils import cached_get_tokenizer, rescale_image_size
  10. from ....conftest import (IMAGE_ASSETS, HfRunner, ImageAsset, PromptImageInput,
  11. AphroditeRunner, _ImageAssets)
  12. from ...utils import check_logprobs_close
  13. text_only_models = [
  14. "Qwen/Qwen-7B-Chat" # Has no visual component
  15. ]
  16. multimodal_models = ["Qwen/Qwen-VL"]
  17. HF_IMAGE_PROMPTS = IMAGE_ASSETS.prompts({
  18. "stop_sign":
  19. "Picture 1: <img></img>\nWhat's the content of the image?: ",
  20. "cherry_blossom":
  21. "Picture 1: <img></img>\nWhat is the season?: ",
  22. })
  23. HF_MULTIIMAGE_IMAGE_PROMPT = "Picture 1: <img></img>\nPicture 2: <img></img>\nCan you compare these images?\n" # noqa: E501
  24. HF_MULTIIMAGE_IMAGE_PROMPT = "Picture 1: <img></img>\nPicture 2: <img></img>\nDescribe the two images in detail.\n" # noqa: E501
  25. ### Multimodal preprocessing tests
  26. SAMPLE_IMAGE = IMAGE_ASSETS[0].pil_image
  27. # These values are specific to Qwen-VL/Chat; we can get these from the model
  28. # config also, but they are hardcoded here to keep the parameterize/fixtures
  29. # easy to read.
  30. IMG_START_ID = 151857
  31. IMG_END_ID = 151858
  32. IMG_PAD_ID = 151859
  33. TOKS_PER_IMG = 256
  34. VIS_ENC_DIM = 4096
  35. IMG_SIZE = 448
  36. def build_model_context(model_name: str,
  37. tokenizer_name: Optional[str] = None,
  38. trust_remote_code: bool = False):
  39. """Creates an InputContext for a given model.
  40. Args:
  41. model_name: Name of the model being considered.
  42. tokenizer_name: Name of the tokenizer being considered.
  43. trust_remote_code: Whether or not to allow loading remote code.
  44. Returns:
  45. InputContext for the model being considered.
  46. """
  47. if tokenizer_name is None:
  48. tokenizer_name = model_name
  49. model_config = ModelConfig(
  50. model_name,
  51. tokenizer_name,
  52. tokenizer_mode="auto",
  53. trust_remote_code=trust_remote_code,
  54. dtype="float32",
  55. seed=0,
  56. )
  57. return InputContext(model_config)
  58. @pytest.fixture()
  59. def input_mapper_for_qwen():
  60. # Lazy import to avoid initializing CUDA during test collection
  61. from aphrodite.modeling.models.qwen import input_mapper_for_qwen
  62. return input_mapper_for_qwen
  63. @pytest.fixture()
  64. def input_processor_for_qwen():
  65. # Lazy import to avoid initializing CUDA during test collection
  66. from aphrodite.modeling.models.qwen import input_processor_for_qwen
  67. return input_processor_for_qwen
  68. @pytest.fixture()
  69. def qwen_vl_context() -> InputContext:
  70. """Get an InputContext for Qwen-VL."""
  71. return build_model_context(model_name="Qwen/Qwen-VL",
  72. trust_remote_code=True)
  73. # Happy path tests for single/multi-image scenarios for the multimodal
  74. # input processor and mapper, respectively
  75. @pytest.mark.parametrize("num_images", [1, 2])
  76. def test_input_processor_valid_mm_data(input_processor_for_qwen,
  77. qwen_vl_context: InputContext,
  78. num_images: int):
  79. """Happy cases for image inputs to Qwen's multimodal input processor."""
  80. prompt = "".join(
  81. [f"Picture {num}: <img></img>\n" for num in range(1, num_images + 1)])
  82. inputs = LLMInputs(
  83. prompt=prompt,
  84. # When processing multimodal data for a multimodal model, the qwen
  85. # input processor will overwrite the provided prompt_token_ids with
  86. # the image prompts
  87. prompt_token_ids=None,
  88. multi_modal_data={"image": torch.rand(num_images, TOKS_PER_IMG, 4096)},
  89. )
  90. proc_inputs = input_processor_for_qwen(qwen_vl_context, inputs)
  91. assert isinstance(proc_inputs, dict)
  92. # Each image should have one start / stop and a fixed context of 256
  93. proc_tokens = proc_inputs["prompt_token_ids"]
  94. assert proc_tokens.count(IMG_START_ID) == num_images
  95. assert proc_tokens.count(IMG_END_ID) == num_images
  96. assert proc_tokens.count(IMG_PAD_ID) == num_images * TOKS_PER_IMG
  97. @pytest.mark.parametrize(
  98. "img_data,expected_shape",
  99. [
  100. # single / multi-image
  101. (SAMPLE_IMAGE, (1, 3, IMG_SIZE, IMG_SIZE)),
  102. (2 * [SAMPLE_IMAGE], (2, 3, IMG_SIZE, IMG_SIZE)),
  103. # single / multi-image embeddings
  104. (torch.rand(
  105. (TOKS_PER_IMG, VIS_ENC_DIM)), (1, TOKS_PER_IMG, VIS_ENC_DIM)),
  106. (torch.rand(
  107. (1, TOKS_PER_IMG, VIS_ENC_DIM)), (1, TOKS_PER_IMG, VIS_ENC_DIM)),
  108. (torch.rand(
  109. (2, TOKS_PER_IMG, VIS_ENC_DIM)), (2, TOKS_PER_IMG, VIS_ENC_DIM)),
  110. ])
  111. def test_input_mapper_valid_mm_data(input_mapper_for_qwen,
  112. qwen_vl_context: InputContext,
  113. img_data: Union[torch.Tensor, List[Image],
  114. Image],
  115. expected_shape: List[int]):
  116. """Happy cases for image inputs to Qwen's multimodal input mapper."""
  117. mapped_img_data = input_mapper_for_qwen(qwen_vl_context, img_data)
  118. # Ensure that we get the appropriately shaped pixel_values
  119. # for images and image embeddings, respectively.
  120. assert isinstance(mapped_img_data, MultiModalInputs)
  121. assert "pixel_values" in mapped_img_data
  122. assert mapped_img_data["pixel_values"].shape == expected_shape
  123. # Sad path tests for the multimodal input processor and mapper, respectively
  124. @pytest.mark.parametrize("mm_data", [
  125. {
  126. "image": torch.rand((5))
  127. },
  128. {
  129. "image": torch.rand((5, 5, 5, 5, 5))
  130. },
  131. ])
  132. def test_input_processor_invalid_mm_data(input_processor_for_qwen,
  133. qwen_vl_context: InputContext,
  134. mm_data: Dict[str, torch.Tensor]):
  135. """Test sad cases validated in Qwen's multimodal input processor."""
  136. tokenizer = cached_get_tokenizer(qwen_vl_context.model_config.tokenizer,
  137. trust_remote_code=True)
  138. prompt = "Picture 1: <img></img>\n"
  139. prompt_token_ids = tokenizer.encode(prompt)
  140. inputs = LLMInputs(prompt=prompt,
  141. prompt_token_ids=prompt_token_ids,
  142. multi_modal_data=mm_data)
  143. # Should fail since we have too many or too few dimensions for embeddings
  144. with pytest.raises(ValueError):
  145. input_processor_for_qwen(qwen_vl_context, inputs)
  146. @pytest.mark.parametrize(
  147. "img_data",
  148. [
  149. # Wrong context length
  150. torch.rand((1, TOKS_PER_IMG + 10, VIS_ENC_DIM)),
  151. # Wrong visual encoder output size
  152. torch.rand((1, TOKS_PER_IMG, VIS_ENC_DIM + 10)),
  153. ])
  154. def test_input_mapper_invalid_mm_data(
  155. input_mapper_for_qwen,
  156. qwen_vl_context: InputContext,
  157. img_data: Union[torch.Tensor, List[Image], Image],
  158. ):
  159. """Sad cases validated in Qwen VL's multimodal input mapper."""
  160. with pytest.raises(ValueError):
  161. input_mapper_for_qwen(qwen_vl_context, img_data)
  162. ### End-to-end generation tests
  163. def get_prompt_with_path(tmp_path: pathlib.PosixPath, prompt: str,
  164. assets: Union[_ImageAssets, List[ImageAsset]]) -> str:
  165. """Given a temporary dir path, export one or more image assets into the
  166. tempdir & replace its contents with the local path to the string so that
  167. the HF version of Qwen-VL can resolve the path and load the image ni its
  168. forward() call.
  169. Args:
  170. tmp_path: Tempdir for test under consideration.
  171. prompt: Prompt with image placeholders.
  172. assets: List of image assets whose len equals the num placeholders.
  173. """
  174. # Ensure that the number of placeholders matches the number of assets;
  175. # If this is not true, the test is probably written incorrectly.
  176. assert prompt.count("<img></img>") == len(assets)
  177. # Replace the placeholders with local paths to the exported assets
  178. for asset in assets:
  179. image_tmp_path = tmp_path / f"{asset.name}.jpg"
  180. asset.pil_image.save(image_tmp_path)
  181. prompt = prompt.replace(
  182. "<img></img>",
  183. f"<img>{image_tmp_path}</img>",
  184. 1,
  185. )
  186. return prompt
  187. def run_test(
  188. hf_runner: Type[HfRunner],
  189. aphrodite_runner: Type[AphroditeRunner],
  190. inputs: List[Tuple[List[str], PromptImageInput]],
  191. model: str,
  192. *,
  193. dtype: str,
  194. max_tokens: int,
  195. num_logprobs: int,
  196. mm_limit: int,
  197. tensor_parallel_size: int,
  198. distributed_executor_backend: Optional[str] = None,
  199. ):
  200. """Inference result should be the same between hf and aphrodite.
  201. All the image fixtures for the test is under tests/images.
  202. For huggingface runner, we provide the PIL images as input.
  203. For aphrodite runner, we provide MultiModalDataDict objects
  204. and corresponding MultiModalConfig as input.
  205. Note, the text input is also adjusted to abide by aphrodite contract.
  206. The text output is sanitized to be able to compare with hf.
  207. """
  208. # NOTE: take care of the order. run Aphrodite first, and then run HF.
  209. # Aphrodite needs a fresh new process without cuda initialization.
  210. # if we run HF first, the cuda initialization will be done and it
  211. # will hurt multiprocessing backend with fork method (the default method).
  212. # max_model_len should be greater than image_feature_size
  213. # Qwen encodes each image into a fixed content size of 256
  214. with aphrodite_runner(model,
  215. max_model_len=1024,
  216. max_num_seqs=1,
  217. dtype=dtype,
  218. limit_mm_per_prompt={"image": mm_limit},
  219. tensor_parallel_size=tensor_parallel_size,
  220. distributed_executor_backend=distributed_executor_backend,
  221. enforce_eager=True) as aphrodite_model:
  222. aphrodite_outputs_per_image = [
  223. aphrodite_model.generate_greedy_logprobs(prompts,
  224. max_tokens,
  225. num_logprobs=num_logprobs,
  226. images=images)
  227. for prompts, images in inputs
  228. ]
  229. with hf_runner(model, dtype=dtype) as hf_model:
  230. hf_outputs_per_image = [
  231. hf_model.generate_greedy_logprobs_limit(prompts,
  232. max_tokens,
  233. num_logprobs=num_logprobs,
  234. images=images)
  235. for prompts, images in inputs
  236. ]
  237. for hf_outputs, aphrodite_outputs in zip(hf_outputs_per_image,
  238. aphrodite_outputs_per_image):
  239. check_logprobs_close(
  240. outputs_0_lst=hf_outputs,
  241. outputs_1_lst=aphrodite_outputs,
  242. name_0="hf",
  243. name_1="aphrodite",
  244. )
  245. @pytest.mark.parametrize("model", multimodal_models)
  246. @pytest.mark.parametrize(
  247. "size_factors",
  248. [
  249. # No image
  250. [],
  251. # Single-scale
  252. [1.0],
  253. # Single-scale, batched
  254. [1.0, 1.0, 1.0],
  255. # Multi-scale
  256. [0.25, 0.5, 1.0],
  257. ],
  258. )
  259. @pytest.mark.parametrize("dtype", ["bfloat16"])
  260. @pytest.mark.parametrize("max_tokens", [8])
  261. @pytest.mark.parametrize("num_logprobs", [5])
  262. def test_multimodal_models_single_image(tmp_path: pathlib.PosixPath,
  263. hf_runner: Type[HfRunner],
  264. aphrodite_runner: Type[AphroditeRunner],
  265. image_assets: _ImageAssets, model: str,
  266. size_factors: List[float], dtype: str,
  267. max_tokens: int,
  268. num_logprobs: int) -> None:
  269. """Tests multimodal models with single image prompts."""
  270. images = [asset.pil_image for asset in image_assets]
  271. prompts = [
  272. get_prompt_with_path(tmp_path, prompt, [asset])
  273. for prompt, asset in zip(HF_IMAGE_PROMPTS, image_assets)
  274. ]
  275. inputs = [(
  276. [prompt for _ in size_factors],
  277. [rescale_image_size(image, factor) for factor in size_factors],
  278. ) for image, prompt in zip(images, prompts)]
  279. run_test(
  280. hf_runner,
  281. aphrodite_runner,
  282. inputs,
  283. model,
  284. dtype=dtype,
  285. max_tokens=max_tokens,
  286. num_logprobs=num_logprobs,
  287. mm_limit=1,
  288. tensor_parallel_size=1,
  289. )
  290. @pytest.mark.parametrize("model", multimodal_models)
  291. @pytest.mark.parametrize(
  292. "size_factors",
  293. [
  294. # No image
  295. [],
  296. # Single-scale
  297. [1.0],
  298. # Single-scale, batched
  299. [1.0, 1.0, 1.0],
  300. # Multi-scale
  301. [0.25, 0.5, 1.0],
  302. ],
  303. )
  304. @pytest.mark.parametrize("dtype", ["bfloat16"])
  305. @pytest.mark.parametrize("max_tokens", [128])
  306. @pytest.mark.parametrize("num_logprobs", [5])
  307. def test_multimodal_models_multi_image(tmp_path: pathlib.PosixPath,
  308. hf_runner: Type[HfRunner],
  309. aphrodite_runner: Type[AphroditeRunner],
  310. image_assets: _ImageAssets, model: str,
  311. size_factors: List[float], dtype: str,
  312. max_tokens: int,
  313. num_logprobs: int) -> None:
  314. """Tests multimodal models with multi-image prompts."""
  315. images = [asset.pil_image for asset in image_assets]
  316. # Put all of the images into one prompt.
  317. prompt = get_prompt_with_path(tmp_path, HF_MULTIIMAGE_IMAGE_PROMPT,
  318. image_assets)
  319. inputs = [([prompt for _ in size_factors],
  320. [[rescale_image_size(image, factor) for image in images]
  321. for factor in size_factors])]
  322. run_test(
  323. hf_runner,
  324. aphrodite_runner,
  325. inputs,
  326. model,
  327. dtype=dtype,
  328. max_tokens=max_tokens,
  329. num_logprobs=num_logprobs,
  330. mm_limit=2,
  331. tensor_parallel_size=1,
  332. )
  333. # Ensure that a text-only Qwen model can still be loaded and
  334. # used for inference in Aphrodite without throwing.
  335. @pytest.mark.parametrize("model", text_only_models)
  336. @pytest.mark.parametrize("dtype", ["bfloat16"])
  337. @pytest.mark.parametrize("max_tokens", [32])
  338. @pytest.mark.parametrize("num_logprobs", [5])
  339. def test_text_only_qwen_model_can_be_loaded_and_run(
  340. aphrodite_runner: Type[AphroditeRunner],
  341. example_prompts: List[str],
  342. model: str,
  343. *,
  344. dtype: str,
  345. max_tokens: int,
  346. num_logprobs: int,
  347. ):
  348. with aphrodite_runner(model, dtype=dtype) as aphrodite_model:
  349. aphrodite_model.generate_greedy_logprobs(
  350. example_prompts,
  351. max_tokens,
  352. num_logprobs=num_logprobs,
  353. )