clip.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299
  1. """Minimal implementation of CLIPVisionModel intended to be only used
  2. within a vision language model."""
  3. from typing import Optional
  4. import torch
  5. import torch.nn as nn
  6. from PIL import Image
  7. from transformers import CLIPVisionConfig
  8. from transformers.models.clip.modeling_clip import CLIPAttention
  9. from aphrodite.common.config import ModelConfig
  10. from aphrodite.common.sequence import SequenceData
  11. from aphrodite.inputs import LLMInputs
  12. from aphrodite.modeling.layers.activation import get_act_fn
  13. from aphrodite.modeling.layers.linear import (ColumnParallelLinear,
  14. RowParallelLinear)
  15. from aphrodite.multimodal.image import (cached_get_tokenizer,
  16. repeat_and_pad_image_tokens)
  17. from aphrodite.quantization import QuantizationConfig
  18. def get_clip_patch_grid_length(*, image_size: int, patch_size: int) -> int:
  19. assert image_size % patch_size == 0
  20. return image_size // patch_size
  21. def get_clip_num_patches(*, image_size: int, patch_size: int) -> int:
  22. grid_length = get_clip_patch_grid_length(image_size=image_size,
  23. patch_size=patch_size)
  24. return grid_length * grid_length
  25. def get_clip_image_feature_size(hf_config: CLIPVisionConfig) -> int:
  26. return get_clip_num_patches(image_size=hf_config.image_size,
  27. patch_size=hf_config.patch_size)
  28. def get_max_clip_image_tokens(hf_config: CLIPVisionConfig) -> int:
  29. return get_clip_image_feature_size(hf_config)
  30. def dummy_seq_data_for_clip(
  31. hf_config: CLIPVisionConfig,
  32. seq_len: int,
  33. *,
  34. image_token_id: int,
  35. image_feature_size_override: Optional[int] = None,
  36. ):
  37. if image_feature_size_override is None:
  38. image_feature_size = get_clip_image_feature_size(hf_config)
  39. else:
  40. image_feature_size = image_feature_size_override
  41. token_ids = [image_token_id] * image_feature_size
  42. token_ids += [0] * (seq_len - image_feature_size)
  43. return SequenceData(token_ids)
  44. def dummy_image_for_clip(
  45. hf_config: CLIPVisionConfig,
  46. *,
  47. image_width_override: Optional[int] = None,
  48. image_height_override: Optional[int] = None,
  49. ):
  50. width = height = hf_config.image_size
  51. if image_width_override is not None:
  52. width = image_width_override
  53. if image_height_override is not None:
  54. height = image_height_override
  55. image = Image.new("RGB", (width, height), color=0)
  56. return {"image": image}
  57. def input_processor_for_clip(
  58. model_config: ModelConfig,
  59. hf_config: CLIPVisionConfig,
  60. llm_inputs: LLMInputs,
  61. *,
  62. image_token_id: int,
  63. image_feature_size_override: Optional[int] = None,
  64. ):
  65. multi_modal_data = llm_inputs.get("multi_modal_data")
  66. if multi_modal_data is None or "image" not in multi_modal_data:
  67. return llm_inputs
  68. tokenizer = cached_get_tokenizer(model_config.tokenizer)
  69. if image_feature_size_override is None:
  70. image_data = multi_modal_data["image"]
  71. if isinstance(image_data, Image.Image):
  72. image_feature_size = get_clip_image_feature_size(hf_config)
  73. elif isinstance(image_data, torch.Tensor):
  74. image_feature_size = image_data.shape[0]
  75. else:
  76. raise TypeError(f"Invalid image type: {type(image_data)}")
  77. else:
  78. image_feature_size = image_feature_size_override
  79. new_prompt, new_token_ids = repeat_and_pad_image_tokens(
  80. tokenizer,
  81. llm_inputs.get("prompt"),
  82. llm_inputs["prompt_token_ids"],
  83. image_token_id=image_token_id,
  84. repeat_count=image_feature_size,
  85. )
  86. # NOTE: Create a defensive copy of the original inputs
  87. return LLMInputs(prompt_token_ids=new_token_ids,
  88. prompt=new_prompt,
  89. multi_modal_data=multi_modal_data)
  90. # Adapted from https://github.com/huggingface/transformers/blob/v4.39.0/src/transformers/models/clip/modeling_clip.py#L164 # noqa
  91. class CLIPVisionEmbeddings(nn.Module):
  92. def __init__(self, config: CLIPVisionConfig):
  93. super().__init__()
  94. self.config = config
  95. self.embed_dim = config.hidden_size
  96. self.image_size = config.image_size
  97. self.patch_size = config.patch_size
  98. self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
  99. self.patch_embedding = nn.Conv2d(
  100. in_channels=config.num_channels,
  101. out_channels=self.embed_dim,
  102. kernel_size=self.patch_size,
  103. stride=self.patch_size,
  104. bias=False,
  105. )
  106. self.num_patches = get_clip_num_patches(image_size=self.image_size,
  107. patch_size=self.patch_size)
  108. self.num_positions = self.num_patches + 1
  109. self.position_embedding = nn.Embedding(self.num_positions,
  110. self.embed_dim)
  111. self.register_buffer("position_ids",
  112. torch.arange(self.num_positions).expand((1, -1)),
  113. persistent=False)
  114. def forward(self, pixel_values: torch.Tensor) -> torch.Tensor:
  115. batch_size = pixel_values.shape[0]
  116. target_dtype = self.patch_embedding.weight.dtype
  117. patch_embeds = self.patch_embedding(pixel_values.to(
  118. dtype=target_dtype)) # shape = [*, width, grid, grid]
  119. patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
  120. class_embeds = self.class_embedding.expand(batch_size, 1, -1)
  121. embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
  122. embeddings = embeddings + self.position_embedding(self.position_ids)
  123. return embeddings
  124. class CLIPMLP(nn.Module):
  125. def __init__(self,
  126. config: CLIPVisionConfig,
  127. quant_config: Optional[QuantizationConfig] = None):
  128. super().__init__()
  129. self.config = config
  130. self.activation_fn = get_act_fn(config.hidden_act)
  131. self.fc1 = ColumnParallelLinear(config.hidden_size,
  132. config.intermediate_size,
  133. bias=True,
  134. quant_config=quant_config)
  135. self.fc2 = RowParallelLinear(config.intermediate_size,
  136. config.hidden_size,
  137. bias=True,
  138. quant_config=quant_config)
  139. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  140. hidden_states, _ = self.fc1(hidden_states)
  141. hidden_states = self.activation_fn(hidden_states)
  142. hidden_states, _ = self.fc2(hidden_states)
  143. return hidden_states
  144. class CLIPEncoderLayer(nn.Module):
  145. def __init__(self,
  146. config: CLIPVisionConfig,
  147. quant_config: Optional[QuantizationConfig] = None):
  148. super().__init__()
  149. self.self_attn = CLIPAttention(config)
  150. self.layer_norm1 = nn.LayerNorm(config.hidden_size,
  151. eps=config.layer_norm_eps)
  152. self.mlp = CLIPMLP(config, quant_config=quant_config)
  153. self.layer_norm2 = nn.LayerNorm(config.hidden_size,
  154. eps=config.layer_norm_eps)
  155. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  156. residual = hidden_states
  157. hidden_states = self.layer_norm1(hidden_states)
  158. hidden_states, _ = self.self_attn(hidden_states=hidden_states)
  159. hidden_states = residual + hidden_states
  160. residual = hidden_states
  161. hidden_states = self.layer_norm2(hidden_states)
  162. hidden_states = self.mlp(hidden_states)
  163. hidden_states = residual + hidden_states
  164. return hidden_states
  165. class CLIPEncoder(nn.Module):
  166. """
  167. Transformer encoder consisting of `config.num_hidden_layers` self
  168. attention layers. Each layer is a [`CLIPEncoderLayer`].
  169. Args:
  170. config: CLIPConfig
  171. """
  172. def __init__(self,
  173. config: CLIPVisionConfig,
  174. quant_config: Optional[QuantizationConfig] = None,
  175. num_hidden_layers_override: Optional[int] = None):
  176. super().__init__()
  177. self.config = config
  178. if num_hidden_layers_override is None:
  179. num_hidden_layers = config.num_hidden_layers
  180. else:
  181. num_hidden_layers = num_hidden_layers_override
  182. self.layers = nn.ModuleList([
  183. CLIPEncoderLayer(config=config, quant_config=quant_config)
  184. for _ in range(num_hidden_layers)
  185. ])
  186. def forward(self, inputs_embeds: torch.Tensor):
  187. hidden_states = inputs_embeds
  188. for encoder_layer in self.layers:
  189. hidden_states = encoder_layer(hidden_states)
  190. return hidden_states
  191. class CLIPVisionTransformer(nn.Module):
  192. def __init__(self,
  193. config: CLIPVisionConfig,
  194. quant_config: Optional[QuantizationConfig] = None,
  195. num_hidden_layers_override: Optional[int] = None):
  196. super().__init__()
  197. self.config = config
  198. embed_dim = config.hidden_size
  199. self.embeddings = CLIPVisionEmbeddings(config)
  200. # NOTE: This typo of "layrnorm" is not fixed on purpose to match
  201. # the original transformers code and name of the model weights.
  202. self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
  203. self.encoder = CLIPEncoder(
  204. config=config,
  205. quant_config=quant_config,
  206. num_hidden_layers_override=num_hidden_layers_override)
  207. def forward(
  208. self,
  209. pixel_values: torch.Tensor,
  210. ) -> torch.Tensor:
  211. hidden_states = self.embeddings(pixel_values)
  212. hidden_states = self.pre_layrnorm(hidden_states)
  213. hidden_states = self.encoder(inputs_embeds=hidden_states)
  214. return hidden_states
  215. class CLIPVisionModel(nn.Module):
  216. config_class = CLIPVisionConfig
  217. main_input_name = "pixel_values"
  218. def __init__(self,
  219. config: CLIPVisionConfig,
  220. quant_config: Optional[QuantizationConfig] = None,
  221. num_hidden_layers_override: Optional[int] = None):
  222. super().__init__()
  223. self.vision_model = CLIPVisionTransformer(
  224. config=config,
  225. quant_config=quant_config,
  226. num_hidden_layers_override=num_hidden_layers_override)
  227. def forward(self, pixel_values: Optional[torch.Tensor] = None):
  228. return self.vision_model(pixel_values=pixel_values)
  229. @property
  230. def device(self):
  231. return next(self.parameters()).device