llava_next.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648
  1. from typing import (Iterable, List, Literal, Mapping, Optional, Tuple,
  2. TypedDict, Union)
  3. import torch
  4. import torch.nn as nn
  5. from PIL import Image
  6. from transformers import CLIPVisionConfig, LlavaNextConfig, SiglipVisionConfig
  7. from transformers.models.llava_next.modeling_llava_next import (
  8. get_anyres_image_grid_shape, unpad_image)
  9. from typing_extensions import NotRequired
  10. from aphrodite.attention import AttentionMetadata
  11. from aphrodite.common.config import CacheConfig, MultiModalConfig
  12. from aphrodite.common.sequence import IntermediateTensors
  13. from aphrodite.common.utils import is_list_of
  14. from aphrodite.inputs import INPUT_REGISTRY, InputContext, LLMInputs
  15. from aphrodite.modeling.layers.sampler import SamplerOutput
  16. from aphrodite.modeling.model_loader.weight_utils import default_weight_loader
  17. from aphrodite.modeling.sampling_metadata import SamplingMetadata
  18. from aphrodite.multimodal import MULTIMODAL_REGISTRY
  19. from aphrodite.quantization.base_config import QuantizationConfig
  20. from .clip import (CLIPVisionModel, dummy_image_for_clip,
  21. dummy_seq_data_for_clip, get_clip_image_feature_size,
  22. get_clip_patch_grid_length, input_processor_for_clip)
  23. from .interfaces import SupportsMultiModal
  24. from .llava import LlavaMultiModalProjector
  25. from .siglip import (SiglipVisionModel, dummy_image_for_siglip,
  26. dummy_seq_data_for_siglip, get_siglip_image_feature_size,
  27. get_siglip_patch_grid_length, input_processor_for_siglip)
  28. from .utils import (flatten_bn, group_weights_with_prefix,
  29. init_aphrodite_registered_model,
  30. merge_multimodal_embeddings)
  31. # Result in the max possible feature size (2x2 grid of 336x336px tiles)
  32. MAX_IMAGE_FEATURE_SIZE_HEIGHT = MAX_IMAGE_FEATURE_SIZE_WIDTH = 448
  33. class LlavaNextImagePixelInputs(TypedDict):
  34. type: Literal["pixel_values"]
  35. data: Union[torch.Tensor, List[torch.Tensor]]
  36. """
  37. Shape:
  38. `(batch_size * num_images, 1 + num_patches, num_channels, height, width)`
  39. Note that `num_patches` may be different per batch and image,
  40. in which case the data is passed as a list instead of a batched tensor.
  41. """
  42. image_sizes: NotRequired[torch.Tensor]
  43. """
  44. Shape: `(batch_size * num_images, 2)`
  45. This should be in `(height, width)` format.
  46. """
  47. class LlavaNextImageEmbeddingInputs(TypedDict):
  48. type: Literal["image_embeds"]
  49. data: torch.Tensor
  50. """Shape: `(batch_size * num_images, image_feature_size, hidden_size)`
  51. `hidden_size` must match the hidden size of language model backbone.
  52. """
  53. LlavaNextImageInputs = Union[LlavaNextImagePixelInputs,
  54. LlavaNextImageEmbeddingInputs]
  55. # Based on: https://github.com/huggingface/text-generation-inference/blob/v2.2.0/server/text_generation_server/models/vlm_causal_lm.py#L79
  56. def _get_llava_next_num_unpadded_features(
  57. original_height: int,
  58. original_width: int,
  59. npatches: int,
  60. num_patch_height: int,
  61. num_patch_width: int,
  62. ) -> Tuple[int, int]:
  63. current_height = npatches * num_patch_height
  64. current_width = npatches * num_patch_width
  65. original_aspect_ratio = original_width / original_height
  66. current_aspect_ratio = current_width / current_height
  67. if original_aspect_ratio > current_aspect_ratio:
  68. scale_factor = current_width / original_width
  69. new_height = int(original_height * scale_factor)
  70. padding = (current_height - new_height) // 2
  71. current_height -= 2 * padding
  72. else:
  73. scale_factor = current_height / original_height
  74. new_width = int(original_width * scale_factor)
  75. padding = (current_width - new_width) // 2
  76. current_width -= 2 * padding
  77. unpadded_features = current_height * current_width
  78. newline_features = current_height
  79. return (unpadded_features, newline_features)
  80. # Based on: https://github.com/huggingface/text-generation-inference/blob/v2.2.0/server/text_generation_server/models/vlm_causal_lm.py#L106
  81. def get_llava_next_image_feature_size(
  82. hf_config: LlavaNextConfig,
  83. *,
  84. input_height: int,
  85. input_width: int,
  86. ) -> int:
  87. vision_config = hf_config.vision_config
  88. if isinstance(vision_config, CLIPVisionConfig):
  89. num_patches = get_clip_patch_grid_length(
  90. image_size=vision_config.image_size,
  91. patch_size=vision_config.patch_size,
  92. )
  93. base_feature_size = get_clip_image_feature_size(vision_config)
  94. elif isinstance(vision_config, SiglipVisionConfig):
  95. num_patches = get_siglip_patch_grid_length(
  96. image_size=vision_config.image_size,
  97. patch_size=vision_config.patch_size,
  98. )
  99. base_feature_size = get_siglip_image_feature_size(vision_config)
  100. else:
  101. msg = f"Unsupported vision config: {type(vision_config)}"
  102. raise NotImplementedError(msg)
  103. strategy = hf_config.vision_feature_select_strategy
  104. if strategy == "default":
  105. base_feature_size -= 1
  106. elif strategy == "full":
  107. pass
  108. else:
  109. raise ValueError(f"Unexpected select feature strategy: {strategy}")
  110. num_patch_height, num_patch_width = get_anyres_image_grid_shape(
  111. image_size=(input_height, input_width),
  112. grid_pinpoints=hf_config.image_grid_pinpoints,
  113. patch_size=vision_config.image_size,
  114. )
  115. (
  116. unpadded_feature_size,
  117. newline_feature_size,
  118. ) = _get_llava_next_num_unpadded_features(input_height, input_width,
  119. num_patches, num_patch_height,
  120. num_patch_width)
  121. return unpadded_feature_size + newline_feature_size + base_feature_size
  122. def get_max_llava_next_image_tokens(ctx: InputContext):
  123. return get_llava_next_image_feature_size(
  124. ctx.get_hf_config(LlavaNextConfig),
  125. input_height=MAX_IMAGE_FEATURE_SIZE_HEIGHT,
  126. input_width=MAX_IMAGE_FEATURE_SIZE_WIDTH,
  127. )
  128. def dummy_data_for_llava_next(ctx: InputContext, seq_len: int,
  129. mm_counts: Mapping[str, int]):
  130. hf_config = ctx.get_hf_config(LlavaNextConfig)
  131. vision_config = hf_config.vision_config
  132. num_images = mm_counts["image"]
  133. image_feature_size = get_max_llava_next_image_tokens(ctx)
  134. if isinstance(vision_config, CLIPVisionConfig):
  135. seq_data = dummy_seq_data_for_clip(
  136. vision_config,
  137. seq_len,
  138. num_images,
  139. image_token_id=hf_config.image_token_index,
  140. image_feature_size_override=image_feature_size,
  141. )
  142. mm_data = dummy_image_for_clip(
  143. vision_config,
  144. num_images,
  145. image_width_override=MAX_IMAGE_FEATURE_SIZE_WIDTH,
  146. image_height_override=MAX_IMAGE_FEATURE_SIZE_HEIGHT,
  147. )
  148. return seq_data, mm_data
  149. elif isinstance(vision_config, SiglipVisionConfig):
  150. seq_data = dummy_seq_data_for_siglip(
  151. vision_config,
  152. seq_len,
  153. num_images,
  154. image_token_id=hf_config.image_token_index,
  155. image_feature_size_override=image_feature_size,
  156. )
  157. mm_data = dummy_image_for_siglip(
  158. vision_config,
  159. num_images,
  160. image_width_override=MAX_IMAGE_FEATURE_SIZE_WIDTH,
  161. image_height_override=MAX_IMAGE_FEATURE_SIZE_HEIGHT,
  162. )
  163. return seq_data, mm_data
  164. msg = f"Unsupported vision config: {type(vision_config)}"
  165. raise NotImplementedError(msg)
  166. def input_processor_for_llava_next(ctx: InputContext, llm_inputs: LLMInputs):
  167. multi_modal_data = llm_inputs.get("multi_modal_data")
  168. if multi_modal_data is None or "image" not in multi_modal_data:
  169. return llm_inputs
  170. model_config = ctx.model_config
  171. hf_config = ctx.get_hf_config(LlavaNextConfig)
  172. vision_config = hf_config.vision_config
  173. image_data = multi_modal_data["image"]
  174. if isinstance(image_data, Image.Image):
  175. width, height = image_data.size
  176. image_feature_size = get_llava_next_image_feature_size(
  177. hf_config,
  178. input_height=height,
  179. input_width=width,
  180. )
  181. elif is_list_of(image_data, Image.Image):
  182. image_feature_size = [
  183. get_llava_next_image_feature_size(hf_config,
  184. input_height=img.height,
  185. input_width=img.width)
  186. for img in image_data
  187. ]
  188. elif isinstance(image_data, torch.Tensor):
  189. num_images, image_feature_size, hidden_size = image_data.shape
  190. elif is_list_of(image_data, torch.Tensor):
  191. image_feature_size = [item.shape[1] for item in image_data]
  192. else:
  193. raise TypeError(f"Invalid image type: {type(image_data)}")
  194. vision_config = hf_config.vision_config
  195. if isinstance(vision_config, CLIPVisionConfig):
  196. return input_processor_for_clip(
  197. model_config,
  198. vision_config,
  199. llm_inputs,
  200. image_token_id=hf_config.image_token_index,
  201. image_feature_size_override=image_feature_size,
  202. )
  203. elif isinstance(vision_config, SiglipVisionConfig):
  204. return input_processor_for_siglip(
  205. model_config,
  206. vision_config,
  207. llm_inputs,
  208. image_token_id=hf_config.image_token_index,
  209. image_feature_size_override=image_feature_size,
  210. )
  211. msg = f"Unsupported vision config: {type(vision_config)}"
  212. raise NotImplementedError(msg)
  213. def _init_vision_tower(hf_config: LlavaNextConfig):
  214. vision_config = hf_config.vision_config
  215. # Initialize the vision tower only up to the required feature layer
  216. vision_feature_layer = hf_config.vision_feature_layer
  217. if vision_feature_layer < 0:
  218. num_hidden_layers = hf_config.vision_config.num_hidden_layers \
  219. + vision_feature_layer + 1
  220. else:
  221. num_hidden_layers = vision_feature_layer + 1
  222. if isinstance(vision_config, CLIPVisionConfig):
  223. return CLIPVisionModel(
  224. vision_config,
  225. num_hidden_layers_override=num_hidden_layers,
  226. )
  227. elif isinstance(vision_config, SiglipVisionConfig):
  228. return SiglipVisionModel(
  229. vision_config,
  230. num_hidden_layers_override=num_hidden_layers,
  231. )
  232. msg = f"Unsupported vision config: {type(vision_config)}"
  233. raise NotImplementedError(msg)
  234. @MULTIMODAL_REGISTRY.register_image_input_mapper()
  235. @MULTIMODAL_REGISTRY.register_max_image_tokens(get_max_llava_next_image_tokens)
  236. @INPUT_REGISTRY.register_dummy_data(dummy_data_for_llava_next)
  237. @INPUT_REGISTRY.register_input_processor(input_processor_for_llava_next)
  238. class LlavaNextForConditionalGeneration(nn.Module, SupportsMultiModal):
  239. def __init__(self,
  240. config: LlavaNextConfig,
  241. multimodal_config: MultiModalConfig,
  242. cache_config: Optional[CacheConfig] = None,
  243. quant_config: Optional[QuantizationConfig] = None) -> None:
  244. super().__init__()
  245. self.config = config
  246. self.multimodal_config = multimodal_config
  247. # TODO: Optionally initializes this for supporting embeddings.
  248. self.vision_tower = _init_vision_tower(config)
  249. self.multi_modal_projector = LlavaMultiModalProjector(
  250. vision_hidden_size=config.vision_config.hidden_size,
  251. text_hidden_size=config.text_config.hidden_size,
  252. projector_hidden_act=config.projector_hidden_act)
  253. self.language_model = init_aphrodite_registered_model(
  254. config.text_config, cache_config, quant_config)
  255. self.image_newline = nn.Parameter(
  256. torch.empty(config.text_config.hidden_size))
  257. def _validate_image_sizes(self, data: torch.Tensor) -> torch.Tensor:
  258. expected_dims = (2, )
  259. def _validate_shape(d: torch.Tensor):
  260. actual_dims = tuple(d.shape)
  261. if actual_dims != expected_dims:
  262. expected_expr = str(expected_dims)
  263. raise ValueError(
  264. f"The expected shape of image sizes per image per batch "
  265. f"is {expected_expr}. You supplied {tuple(d.shape)}.")
  266. for d in data:
  267. _validate_shape(d)
  268. return data
  269. def _validate_pixel_values(
  270. self, data: Union[torch.Tensor, List[torch.Tensor]]
  271. ) -> Union[torch.Tensor, List[torch.Tensor]]:
  272. h = w = self.config.vision_config.image_size
  273. expected_dims = (3, h, w)
  274. def _validate_shape(d: torch.Tensor):
  275. actual_dims = tuple(d.shape[1:])
  276. if actual_dims != expected_dims:
  277. expected_expr = ("num_patches", *map(str, expected_dims))
  278. raise ValueError(
  279. "The expected shape of pixel values per image per batch "
  280. f"is {expected_expr}. You supplied {tuple(d.shape)}.")
  281. for d in data:
  282. _validate_shape(d)
  283. return data
  284. def _parse_and_validate_image_input(
  285. self, **kwargs: object) -> Optional[LlavaNextImageInputs]:
  286. pixel_values = kwargs.pop("pixel_values", None)
  287. image_sizes = kwargs.pop("image_sizes", None)
  288. image_embeds = kwargs.pop("image_embeds", None)
  289. if pixel_values is None and image_embeds is None:
  290. return None
  291. if pixel_values is not None:
  292. if not isinstance(pixel_values, (torch.Tensor, list)):
  293. raise ValueError("Incorrect type of pixel values. "
  294. f"Got type: {type(pixel_values)}")
  295. if not isinstance(image_sizes, (torch.Tensor, list)):
  296. raise ValueError("Incorrect type of image sizes. "
  297. f"Got type: {type(image_sizes)}")
  298. return LlavaNextImagePixelInputs(
  299. type="pixel_values",
  300. data=self._validate_pixel_values(flatten_bn(pixel_values)),
  301. image_sizes=self._validate_image_sizes(
  302. flatten_bn(image_sizes, concat=True)),
  303. )
  304. if image_embeds is not None:
  305. if not isinstance(image_embeds, torch.Tensor):
  306. raise ValueError("Incorrect type of image embeds. "
  307. f"Got type: {type(image_embeds)}")
  308. return LlavaNextImageEmbeddingInputs(
  309. type="image_embeds",
  310. data=flatten_bn(image_embeds),
  311. )
  312. raise AssertionError("This line should be unreachable.")
  313. def _select_image_features(self, image_features: torch.Tensor, *,
  314. strategy: str) -> torch.Tensor:
  315. # Copied from https://github.com/huggingface/transformers/blob/39c3c0a72af6fbda5614dde02ff236069bb79827/src/transformers/models/llava/modeling_llava.py#L421 # noqa
  316. if strategy == "default":
  317. return image_features[:, 1:]
  318. elif strategy == "full":
  319. return image_features
  320. raise ValueError(f"Unexpected select feature strategy: {strategy}")
  321. def _image_pixels_to_features(
  322. self,
  323. vision_tower: Union[CLIPVisionModel, SiglipVisionModel],
  324. pixel_values: torch.Tensor,
  325. ) -> torch.Tensor:
  326. # NOTE: we skip the step to select the vision feature layer since
  327. # this is already done inside the vision tower
  328. image_features = vision_tower(pixel_values)
  329. return self._select_image_features(
  330. image_features,
  331. strategy=self.config.vision_feature_select_strategy,
  332. )
  333. # Based on: https://github.com/haotian-liu/LLaVA/blob/main/llava/model/llava_arch.py
  334. def _merge_image_patch_embeddings(self, image_size: torch.Tensor,
  335. patch_embeddings: torch.Tensor, *,
  336. strategy: str) -> torch.Tensor:
  337. if strategy == "flat":
  338. return patch_embeddings.flatten(0, 1)
  339. if strategy.startswith("spatial"):
  340. height = width = self.config.vision_config.image_size \
  341. // self.config.vision_config.patch_size
  342. base_patch_embeds = patch_embeddings[0]
  343. if height * width != base_patch_embeds.shape[0]:
  344. raise ValueError(
  345. "The number of patches is not consistent with the "
  346. "image size.")
  347. if patch_embeddings.shape[0] > 1:
  348. other_patch_embeds = patch_embeddings[1:]
  349. # Move to CPU to avoid floating-point errors
  350. orig_height, orig_width = image_size.tolist()
  351. # image_aspect_ratio == "anyres"
  352. num_patch_height, num_patch_width = get_anyres_image_grid_shape(
  353. (orig_height, orig_width),
  354. self.config.image_grid_pinpoints,
  355. self.config.vision_config.image_size,
  356. )
  357. num_patches = num_patch_height * num_patch_width
  358. # Image patches might be padded for batch processing
  359. other_patch_embeds = other_patch_embeds[:num_patches] \
  360. .view(num_patch_height, num_patch_width, height, width, -1)
  361. if "unpad" in strategy:
  362. other_patch_embeds = other_patch_embeds \
  363. .permute(4, 0, 2, 1, 3).contiguous() \
  364. .flatten(1, 2).flatten(2, 3)
  365. other_patch_embeds = unpad_image(other_patch_embeds,
  366. (orig_height, orig_width))
  367. other_patch_embeds = torch.cat((
  368. other_patch_embeds,
  369. self.image_newline[:, None, None] \
  370. .expand(*other_patch_embeds.shape[:-1], 1) \
  371. .to(other_patch_embeds.device),
  372. ), dim=-1)
  373. other_patch_embeds = other_patch_embeds \
  374. .flatten(1, 2).transpose(0, 1)
  375. else:
  376. other_patch_embeds = other_patch_embeds \
  377. .permute(0, 2, 1, 3, 4).contiguous() \
  378. .flatten(0, 3)
  379. merged_patch_embeddings = torch.cat(
  380. (base_patch_embeds, other_patch_embeds), dim=0)
  381. else:
  382. if "unpad" in strategy:
  383. merged_patch_embeddings = torch.cat(
  384. (base_patch_embeds,
  385. self.image_newline[None] \
  386. .to(base_patch_embeds.device)
  387. ), dim=0)
  388. else:
  389. merged_patch_embeddings = base_patch_embeds
  390. return merged_patch_embeddings
  391. raise ValueError(f"Unexpected patch merge strategy: {strategy}")
  392. def _process_image_pixels(
  393. self,
  394. inputs: LlavaNextImagePixelInputs,
  395. ) -> Union[torch.Tensor, List[torch.Tensor]]:
  396. assert self.vision_tower is not None
  397. pixel_values = inputs["data"]
  398. if isinstance(pixel_values, torch.Tensor):
  399. b, num_patches, c, h, w = pixel_values.shape
  400. stacked_pixel_values = pixel_values.view(b * num_patches, c, h, w)
  401. stacked_image_features = self._image_pixels_to_features(
  402. self.vision_tower, stacked_pixel_values)
  403. stacked_patch_embeddings = self.multi_modal_projector(
  404. stacked_image_features)
  405. return stacked_patch_embeddings.view(
  406. b, num_patches, *stacked_patch_embeddings.shape[1:])
  407. num_patches_per_batch = [v.shape[0] for v in pixel_values]
  408. stacked_pixel_values = torch.cat(pixel_values)
  409. stacked_image_features = self._image_pixels_to_features(
  410. self.vision_tower, stacked_pixel_values)
  411. return [
  412. self.multi_modal_projector(image_features) for image_features in
  413. torch.split(stacked_image_features, num_patches_per_batch)
  414. ]
  415. def _process_image_input(
  416. self,
  417. image_input: LlavaNextImageInputs,
  418. ) -> Union[torch.Tensor, List[torch.Tensor]]:
  419. if image_input["type"] == "image_embeds":
  420. return [image_input["data"]]
  421. patch_embeddings = self._process_image_pixels(image_input)
  422. image_sizes = image_input.get("image_sizes")
  423. if image_sizes is None:
  424. batch_size = len(image_input["data"])
  425. vision_config = self.config.vision_config
  426. default_height = default_width = vision_config.image_size
  427. image_sizes = torch.as_tensor([[default_height, default_width]
  428. for _ in range(batch_size)])
  429. return [
  430. self._merge_image_patch_embeddings(image_sizes[i],
  431. patch_features_batch,
  432. strategy="spatial_unpad")
  433. for i, patch_features_batch in enumerate(patch_embeddings)
  434. ]
  435. def forward(
  436. self,
  437. input_ids: torch.Tensor,
  438. positions: torch.Tensor,
  439. kv_caches: List[torch.Tensor],
  440. attn_metadata: AttentionMetadata,
  441. intermediate_tensors: Optional[IntermediateTensors] = None,
  442. **kwargs: object,
  443. ) -> SamplerOutput:
  444. """Run forward pass for LlaVA-NeXT.
  445. One key thing to understand is the `input_ids` already accounts for the
  446. positions of the to-be-inserted image embeddings.
  447. Concretely, consider a text prompt:
  448. `"A chat between a curious human and an artificial intelligence
  449. assistant. The assistant gives helpful, detailed, and polite answers to
  450. the human's questions.
  451. USER: <image>\\nWhat is shown in this image? ASSISTANT:"`.
  452. Tokenizer outputs:
  453. `[1, 319, 13563, 1546, 263, 12758, 5199, 322, 385, 23116, 21082, 20255,
  454. 29889, 450, 20255, 4076, 8444, 29892, 13173, 29892, 322, 1248, 568,
  455. 6089, 304, 278, 5199, 29915, 29879, 5155, 29889, 3148, 1001, 29901,
  456. 29871, 32000, 13, 5618, 338, 4318, 297, 445, 1967, 29973, 319, 1799,
  457. 9047, 13566, 29901]`.
  458. To reserve space in KV cache, we have to insert placeholder tokens
  459. before they are inputted to the model, so the input processor prepends
  460. additional image tokens (denoted as `32000`), resulting in:
  461. `[1, 319, 13563, 1546, 263, 12758, 5199, 322, 385, 23116, 21082, 20255,
  462. 29889, 450, 20255, 4076, 8444, 29892, 13173, 29892, 322, 1248, 568,
  463. 6089, 304, 278, 5199, 29915, 29879, 5155, 29889, 3148, 1001, 29901,
  464. 29871, 32000, ..., 32000, 13, 5618, 338, 4318, 297, 445, 1967, 29973,
  465. 319, 1799, 9047, 13566, 29901]`.
  466. Unlike in LLaVA-1.5, the number of image tokens inputted to the language
  467. model depends on the original size of the input image. Including the
  468. original image token in the input, the required number of image tokens
  469. is given by :func:`get_llava_next_image_feature_size`.
  470. This way, the `positions` and `attn_metadata` are consistent
  471. with the `input_ids`.
  472. Args:
  473. input_ids: Flattened (concatenated) input_ids corresponding to a
  474. batch.
  475. pixel_values: The pixels in each grid patch for each input image.
  476. image_sizes: The original `(height, width)` for each input image.
  477. See also:
  478. :class:`LlavaNextImageInputs`
  479. """
  480. image_input = self._parse_and_validate_image_input(**kwargs)
  481. if image_input is not None:
  482. vision_embeddings = self._process_image_input(image_input)
  483. inputs_embeds = self.language_model.model.get_input_embeddings(
  484. input_ids)
  485. inputs_embeds = merge_multimodal_embeddings(
  486. input_ids, inputs_embeds, vision_embeddings,
  487. self.config.image_token_index)
  488. input_ids = None
  489. else:
  490. inputs_embeds = None
  491. hidden_states = self.language_model.model(input_ids,
  492. positions,
  493. kv_caches,
  494. attn_metadata,
  495. None,
  496. inputs_embeds=inputs_embeds)
  497. return hidden_states
  498. def compute_logits(
  499. self,
  500. hidden_states: torch.Tensor,
  501. sampling_metadata: SamplingMetadata,
  502. ) -> Optional[torch.Tensor]:
  503. return self.language_model.compute_logits(hidden_states,
  504. sampling_metadata)
  505. def sample(
  506. self,
  507. logits: torch.Tensor,
  508. sampling_metadata: SamplingMetadata,
  509. ) -> Optional[SamplerOutput]:
  510. return self.language_model.sample(logits, sampling_metadata)
  511. def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]):
  512. # prepare weight iterators for components
  513. weights_group = group_weights_with_prefix(weights)
  514. # load vision encoder
  515. self.vision_tower.load_weights(weights_group["vision_tower"])
  516. # load mlp projector
  517. mlp_params_dict = dict(self.multi_modal_projector.named_parameters())
  518. for name, loaded_weight in weights_group["multi_modal_projector"]:
  519. param = mlp_params_dict[name]
  520. weight_loader = getattr(param, "weight_loader",
  521. default_weight_loader)
  522. weight_loader(param, loaded_weight)
  523. # load newline
  524. for name, loaded_weight in weights_group["image_newline"]:
  525. assert name == ""
  526. param = self.image_newline
  527. weight_loader = getattr(param, "weight_loader",
  528. default_weight_loader)
  529. weight_loader(param, loaded_weight)
  530. # load llm backbone
  531. self.language_model.load_weights(weights_group["language_model"])