idefics2_vision_model.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. # coding=utf-8
  2. # adapted from https://github.com/huggingface/transformers/blob/v4.43.2/src/transformers/models/idefics2/modeling_idefics2.py
  3. # Copyright 2024 The PygmalionAI team.
  4. # Copyright 2024 The vLLM team.
  5. # Copyright 2024 the HuggingFace Inc. team. All rights reserved.
  6. #
  7. # Licensed under the Apache License, Version 2.0 (the "License");
  8. # you may not use this file except in compliance with the License.
  9. # You may obtain a copy of the License at
  10. #
  11. # http://www.apache.org/licenses/LICENSE-2.0
  12. #
  13. # Unless required by applicable law or agreed to in writing, software
  14. # distributed under the License is distributed on an "AS IS" BASIS,
  15. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  16. # See the License for the specific language governing permissions and
  17. # limitations under the License.
  18. """PyTorch Idefics2 model."""
  19. from typing import Optional
  20. import torch
  21. from torch import nn
  22. from transformers.models.idefics2.configuration_idefics2 import (
  23. Idefics2Config, Idefics2VisionConfig)
  24. from xformers import ops as xops
  25. from aphrodite.distributed import divide, get_tensor_model_parallel_world_size
  26. from aphrodite.modeling.layers.activation import get_act_fn
  27. from aphrodite.modeling.layers.linear import (ColumnParallelLinear,
  28. QKVParallelLinear,
  29. RowParallelLinear)
  30. from aphrodite.quantization import QuantizationConfig
  31. class Idefics2VisionEmbeddings(nn.Module):
  32. """
  33. This is a modified version of `siglip.modelign_siglip.SiglipVisionEmbeddings
  34. ` to enable images of variable
  35. resolution.
  36. The modifications are adapted from [Patch n' Pack: NaViT, a Vision
  37. Transformer for any Aspect Ratio and Resolution](https://arxiv.org/abs/2307.06304)
  38. which allows treating images in their native aspect ratio and without the
  39. need to resize them to the same fixed size. In particular, we start from the
  40. original pre-trained SigLIP model(which uses images of fixed-size square
  41. images) and adapt it by training on images of variable resolutions.
  42. """
  43. def __init__(self, config: Idefics2VisionConfig):
  44. super().__init__()
  45. self.embed_dim = config.hidden_size
  46. self.image_size = config.image_size
  47. self.patch_size = config.patch_size
  48. self.patch_embedding = nn.Conv2d(
  49. in_channels=config.num_channels,
  50. out_channels=self.embed_dim,
  51. kernel_size=self.patch_size,
  52. stride=self.patch_size,
  53. padding="valid",
  54. )
  55. self.num_patches_per_side = self.image_size // self.patch_size
  56. self.num_patches = self.num_patches_per_side**2
  57. self.num_positions = self.num_patches
  58. self.position_embedding = nn.Embedding(self.num_positions,
  59. self.embed_dim)
  60. def forward(
  61. self,
  62. pixel_values: torch.FloatTensor,
  63. patch_attention_mask: torch.BoolTensor,
  64. ) -> torch.Tensor:
  65. batch_size, _, max_im_h, max_im_w = pixel_values.shape
  66. patch_embeds = self.patch_embedding(pixel_values)
  67. embeddings = patch_embeds.flatten(2).transpose(1, 2)
  68. max_nb_patches_h, max_nb_patches_w = (
  69. max_im_h // self.patch_size,
  70. max_im_w // self.patch_size,
  71. )
  72. boundaries = torch.arange(1 / self.num_patches_per_side, 1.0,
  73. 1 / self.num_patches_per_side)
  74. position_ids = torch.full(size=(batch_size,
  75. max_nb_patches_h * max_nb_patches_w),
  76. fill_value=0)
  77. for batch_idx, p_attn_mask in enumerate(patch_attention_mask):
  78. nb_patches_h = p_attn_mask[:, 0].sum()
  79. nb_patches_w = p_attn_mask[0].sum()
  80. fractional_coords_h = torch.arange(0, 1 - 1e-6, 1 / nb_patches_h)
  81. fractional_coords_w = torch.arange(0, 1 - 1e-6, 1 / nb_patches_w)
  82. bucket_coords_h = torch.bucketize(fractional_coords_h,
  83. boundaries,
  84. right=True)
  85. bucket_coords_w = torch.bucketize(fractional_coords_w,
  86. boundaries,
  87. right=True)
  88. pos_ids = (bucket_coords_h[:, None] * self.num_patches_per_side +
  89. bucket_coords_w).flatten()
  90. position_ids[batch_idx][p_attn_mask.view(-1).cpu()] = pos_ids
  91. position_ids = position_ids.to(self.position_embedding.weight.device)
  92. embeddings = embeddings + self.position_embedding(position_ids)
  93. return embeddings
  94. class Idefics2VisionAttention(nn.Module):
  95. """Multi-headed attention from 'Attention Is All You Need' paper"""
  96. def __init__(
  97. self,
  98. config: Idefics2Config,
  99. quant_config: Optional[QuantizationConfig] = None,
  100. ):
  101. super().__init__()
  102. self.config = config
  103. self.embed_dim = config.hidden_size
  104. self.num_heads = config.num_attention_heads
  105. self.head_dim = self.embed_dim // self.num_heads
  106. if self.head_dim * self.num_heads != self.embed_dim:
  107. raise ValueError(
  108. f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" # noqa: E501
  109. f" {self.num_heads}).")
  110. self.scale = self.head_dim**-0.5
  111. self.dropout = config.attention_dropout
  112. self.qkv_proj = QKVParallelLinear(
  113. self.embed_dim,
  114. self.head_dim,
  115. self.num_heads,
  116. quant_config=quant_config,
  117. )
  118. self.out_proj = RowParallelLinear(
  119. self.embed_dim,
  120. self.embed_dim,
  121. bias=True,
  122. quant_config=quant_config,
  123. )
  124. self.tp_size = get_tensor_model_parallel_world_size()
  125. self.num_heads_per_partition = divide(self.num_heads, self.tp_size)
  126. self.is_causal = False
  127. def forward(
  128. self,
  129. hidden_states: torch.Tensor,
  130. ) -> torch.Tensor:
  131. batch_size, q_len, _ = hidden_states.size()
  132. qkv, _ = self.qkv_proj(
  133. hidden_states
  134. ) # batch_size, q_len, 3 * num_heads_per_partition * head_dim
  135. query_states, key_states, value_states = qkv.chunk(3, dim=-1)
  136. query_states = query_states.view(batch_size, q_len,
  137. self.num_heads_per_partition,
  138. self.head_dim)
  139. key_states = key_states.view(batch_size, q_len,
  140. self.num_heads_per_partition,
  141. self.head_dim)
  142. value_states = value_states.view(batch_size, q_len,
  143. self.num_heads_per_partition,
  144. self.head_dim)
  145. # see: https://facebookresearch.github.io/xformers/components/ops.html
  146. out = xops.memory_efficient_attention_forward(
  147. query_states,
  148. key_states,
  149. value_states,
  150. p=self.dropout,
  151. scale=self.scale,
  152. )
  153. out = out.view(batch_size, q_len, -1)
  154. attn_output, _ = self.out_proj(out)
  155. return attn_output
  156. class Idefics2VisionMLP(nn.Module):
  157. def __init__(
  158. self,
  159. config: Idefics2Config,
  160. quant_config: Optional[QuantizationConfig] = None,
  161. ):
  162. super().__init__()
  163. self.config = config
  164. self.activation_fn = get_act_fn(config.hidden_act)
  165. self.fc1 = ColumnParallelLinear(
  166. config.hidden_size,
  167. config.intermediate_size,
  168. bias=True,
  169. quant_config=quant_config,
  170. )
  171. self.fc2 = RowParallelLinear(
  172. config.intermediate_size,
  173. config.hidden_size,
  174. bias=True,
  175. quant_config=quant_config,
  176. )
  177. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  178. hidden_states, _ = self.fc1(hidden_states)
  179. hidden_states = self.activation_fn(hidden_states)
  180. hidden_states, _ = self.fc2(hidden_states)
  181. return hidden_states
  182. class Idefics2EncoderLayer(nn.Module):
  183. def __init__(self, config: Idefics2Config):
  184. super().__init__()
  185. self.embed_dim = config.hidden_size
  186. self.self_attn = Idefics2VisionAttention(config)
  187. self.layer_norm1 = nn.LayerNorm(self.embed_dim,
  188. eps=config.layer_norm_eps)
  189. self.mlp = Idefics2VisionMLP(config)
  190. self.layer_norm2 = nn.LayerNorm(self.embed_dim,
  191. eps=config.layer_norm_eps)
  192. def forward(
  193. self,
  194. hidden_states: torch.Tensor,
  195. ) -> torch.Tensor:
  196. """
  197. Args:
  198. hidden_states (`torch.FloatTensor`):
  199. Input to the layer of shape `(batch, seq_len, embed_dim)`.
  200. """
  201. residual = hidden_states
  202. hidden_states = self.layer_norm1(hidden_states)
  203. hidden_states = self.self_attn(hidden_states)
  204. hidden_states = residual + hidden_states
  205. residual = hidden_states
  206. hidden_states = self.layer_norm2(hidden_states)
  207. hidden_states = self.mlp(hidden_states)
  208. hidden_states = residual + hidden_states
  209. return hidden_states
  210. class Idefics2Encoder(nn.Module):
  211. """
  212. Transformer encoder consisting of `config.num_hidden_layers` self attention
  213. layers. Each layer is a
  214. [`Idefics2EncoderLayer`].
  215. Args:
  216. config: Idefics2Config
  217. """
  218. def __init__(self, config: Idefics2Config):
  219. super().__init__()
  220. self.config = config
  221. self.layers = nn.ModuleList([
  222. Idefics2EncoderLayer(config)
  223. for _ in range(config.num_hidden_layers)
  224. ])
  225. def forward(
  226. self,
  227. inputs_embeds: torch.Tensor,
  228. ) -> torch.Tensor:
  229. r"""
  230. Args:
  231. inputs_embeds (torch.Tensor):
  232. Optionally, instead of passing `input_ids` you can choose to
  233. directly pass an embedded representation.
  234. This is useful if you want more control over how to convert
  235. `input_ids` indices into associated vectorsthan the model's
  236. internal embedding lookup matrix.
  237. """
  238. hidden_states = inputs_embeds
  239. for encoder_layer in self.layers:
  240. layer_outputs = encoder_layer(hidden_states)
  241. hidden_states = layer_outputs
  242. return hidden_states
  243. class Idefics2VisionTransformer(nn.Module):
  244. def __init__(self, config: Idefics2VisionConfig):
  245. super().__init__()
  246. embed_dim = config.hidden_size
  247. self.config = config
  248. self.embeddings = Idefics2VisionEmbeddings(config)
  249. self.encoder = Idefics2Encoder(config)
  250. self.post_layernorm = nn.LayerNorm(embed_dim,
  251. eps=config.layer_norm_eps)
  252. def get_input_embeddings(self):
  253. return self.embeddings
  254. def forward(
  255. self,
  256. pixel_values,
  257. patch_attention_mask: Optional[torch.BoolTensor] = None,
  258. ) -> torch.tensor:
  259. hidden_states = self.embeddings(
  260. pixel_values=pixel_values,
  261. patch_attention_mask=patch_attention_mask)
  262. encoder_outputs = self.encoder(hidden_states)
  263. last_hidden_state = self.post_layernorm(encoder_outputs)
  264. return last_hidden_state