gpt2.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. # coding=utf-8
  2. # Adapted from
  3. # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/gpt2/modeling_gpt2.py
  4. # Copyright 2023 The vLLM team.
  5. # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team.
  6. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
  7. #
  8. # Licensed under the Apache License, Version 2.0 (the "License");
  9. # you may not use this file except in compliance with the License.
  10. # You may obtain a copy of the License at
  11. #
  12. # http://www.apache.org/licenses/LICENSE-2.0
  13. #
  14. # Unless required by applicable law or agreed to in writing, software
  15. # distributed under the License is distributed on an "AS IS" BASIS,
  16. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  17. # See the License for the specific language governing permissions and
  18. # limitations under the License.
  19. """Inference-only GPT-2 model compatible with HuggingFace weights."""
  20. from typing import Iterable, List, Optional, Tuple
  21. import torch
  22. from torch import nn
  23. from transformers import GPT2Config
  24. from aphrodite.attention import Attention, AttentionMetadata
  25. from aphrodite.common.config import CacheConfig
  26. from aphrodite.common.sequence import IntermediateTensors, SamplerOutput
  27. from aphrodite.common.utils import progress_bar
  28. from aphrodite.distributed import get_tensor_model_parallel_world_size
  29. from aphrodite.modeling.layers.activation import get_act_fn
  30. from aphrodite.modeling.layers.linear import (ColumnParallelLinear,
  31. QKVParallelLinear,
  32. RowParallelLinear)
  33. from aphrodite.modeling.layers.logits_processor import LogitsProcessor
  34. from aphrodite.modeling.layers.sampler import Sampler
  35. from aphrodite.modeling.layers.vocab_parallel_embedding import (
  36. VocabParallelEmbedding)
  37. from aphrodite.modeling.model_loader.weight_utils import default_weight_loader
  38. from aphrodite.modeling.sampling_metadata import SamplingMetadata
  39. from aphrodite.quantization.base_config import QuantizationConfig
  40. class GPT2Attention(nn.Module):
  41. def __init__(
  42. self,
  43. config: GPT2Config,
  44. cache_config: Optional[CacheConfig] = None,
  45. quant_config: Optional[QuantizationConfig] = None,
  46. ):
  47. super().__init__()
  48. self.hidden_size = config.hidden_size
  49. total_num_heads = config.num_attention_heads
  50. tensor_model_parallel_world_size = (
  51. get_tensor_model_parallel_world_size())
  52. assert total_num_heads % tensor_model_parallel_world_size == 0
  53. self.num_heads = total_num_heads // tensor_model_parallel_world_size
  54. self.head_dim = self.hidden_size // total_num_heads
  55. self.scale = self.head_dim**-0.5
  56. self.c_attn = QKVParallelLinear(
  57. self.hidden_size,
  58. self.head_dim,
  59. total_num_heads,
  60. bias=True,
  61. quant_config=quant_config,
  62. )
  63. self.c_proj = RowParallelLinear(
  64. self.hidden_size,
  65. self.hidden_size,
  66. bias=True,
  67. quant_config=quant_config,
  68. )
  69. self.attn = Attention(self.num_heads,
  70. self.head_dim,
  71. scale=self.scale,
  72. cache_config=cache_config,
  73. quant_config=quant_config)
  74. def forward(
  75. self,
  76. hidden_states: torch.Tensor,
  77. kv_cache: torch.Tensor,
  78. attn_metadata: AttentionMetadata,
  79. ) -> torch.Tensor:
  80. qkv, _ = self.c_attn(hidden_states)
  81. q, k, v = qkv.chunk(chunks=3, dim=-1)
  82. attn_output = self.attn(q, k, v, kv_cache, attn_metadata)
  83. attn_output, _ = self.c_proj(attn_output)
  84. return attn_output
  85. class GPT2MLP(nn.Module):
  86. def __init__(
  87. self,
  88. intermediate_size: int,
  89. config: GPT2Config,
  90. quant_config: Optional[QuantizationConfig] = None,
  91. ):
  92. super().__init__()
  93. hidden_size = config.hidden_size
  94. self.c_fc = ColumnParallelLinear(
  95. hidden_size,
  96. intermediate_size,
  97. bias=True,
  98. quant_config=quant_config,
  99. )
  100. self.c_proj = RowParallelLinear(
  101. intermediate_size,
  102. hidden_size,
  103. bias=True,
  104. quant_config=quant_config,
  105. )
  106. self.act = get_act_fn(config.activation_function, quant_config,
  107. intermediate_size)
  108. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  109. hidden_states, _ = self.c_fc(hidden_states)
  110. hidden_states = self.act(hidden_states)
  111. hidden_states, _ = self.c_proj(hidden_states)
  112. return hidden_states
  113. class GPT2Block(nn.Module):
  114. def __init__(
  115. self,
  116. config: GPT2Config,
  117. cache_config: Optional[CacheConfig] = None,
  118. quant_config: Optional[QuantizationConfig] = None,
  119. ):
  120. super().__init__()
  121. hidden_size = config.hidden_size
  122. inner_dim = (config.n_inner if config.n_inner is not None else 4 *
  123. hidden_size)
  124. self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
  125. self.attn = GPT2Attention(config, cache_config, quant_config)
  126. self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon)
  127. self.mlp = GPT2MLP(inner_dim, config, quant_config)
  128. def forward(
  129. self,
  130. hidden_states: torch.Tensor,
  131. kv_cache: torch.Tensor,
  132. attn_metadata: AttentionMetadata,
  133. ) -> torch.Tensor:
  134. residual = hidden_states
  135. hidden_states = self.ln_1(hidden_states)
  136. attn_output = self.attn(
  137. hidden_states=hidden_states,
  138. kv_cache=kv_cache,
  139. attn_metadata=attn_metadata,
  140. )
  141. # residual connection
  142. hidden_states = attn_output + residual
  143. residual = hidden_states
  144. hidden_states = self.ln_2(hidden_states)
  145. feed_forward_hidden_states = self.mlp(hidden_states)
  146. # residual connection
  147. hidden_states = residual + feed_forward_hidden_states
  148. return hidden_states
  149. class GPT2Model(nn.Module):
  150. def __init__(
  151. self,
  152. config: GPT2Config,
  153. cache_config: Optional[CacheConfig] = None,
  154. quant_config: Optional[QuantizationConfig] = None,
  155. ):
  156. super().__init__()
  157. self.config = config
  158. assert not config.add_cross_attention
  159. assert not config.scale_attn_by_inverse_layer_idx
  160. assert not config.reorder_and_upcast_attn
  161. self.embed_dim = config.hidden_size
  162. self.wte = VocabParallelEmbedding(config.vocab_size, self.embed_dim)
  163. self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
  164. self.h = nn.ModuleList([
  165. GPT2Block(config, cache_config, quant_config)
  166. for _ in range(config.num_hidden_layers)
  167. ])
  168. self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
  169. def forward(
  170. self,
  171. input_ids: torch.Tensor,
  172. position_ids: torch.Tensor,
  173. kv_caches: List[torch.Tensor],
  174. attn_metadata: AttentionMetadata,
  175. ) -> torch.Tensor:
  176. inputs_embeds = self.wte(input_ids)
  177. position_embeds = self.wpe(position_ids)
  178. hidden_states = inputs_embeds + position_embeds
  179. for i in range(len(self.h)):
  180. layer = self.h[i]
  181. hidden_states = layer(hidden_states, kv_caches[i], attn_metadata)
  182. hidden_states = self.ln_f(hidden_states)
  183. return hidden_states
  184. class GPT2LMHeadModel(nn.Module):
  185. def __init__(
  186. self,
  187. config: GPT2Config,
  188. cache_config: Optional[CacheConfig] = None,
  189. quant_config: Optional[QuantizationConfig] = None,
  190. ):
  191. super().__init__()
  192. self.config = config
  193. self.quant_config = quant_config
  194. self.transformer = GPT2Model(config, cache_config, quant_config)
  195. self.lm_head = self.transformer.wte
  196. self.logits_processor = LogitsProcessor(config.vocab_size)
  197. self.sampler = Sampler()
  198. def forward(
  199. self,
  200. input_ids: torch.Tensor,
  201. positions: torch.Tensor,
  202. kv_caches: List[torch.Tensor],
  203. attn_metadata: AttentionMetadata,
  204. intermediate_tensors: Optional[IntermediateTensors] = None,
  205. ) -> torch.Tensor:
  206. hidden_states = self.transformer(input_ids, positions, kv_caches,
  207. attn_metadata)
  208. return hidden_states
  209. def compute_logits(
  210. self,
  211. hidden_states: torch.Tensor,
  212. sampling_metadata: SamplingMetadata,
  213. ) -> Optional[torch.Tensor]:
  214. logits = self.logits_processor(self.lm_head, hidden_states,
  215. sampling_metadata)
  216. return logits
  217. def sample(
  218. self,
  219. logits: torch.Tensor,
  220. sampling_metadata: SamplingMetadata,
  221. ) -> Optional[SamplerOutput]:
  222. next_tokens = self.sampler(logits, sampling_metadata)
  223. return next_tokens
  224. def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]):
  225. params_dict = dict(self.named_parameters(remove_duplicate=False))
  226. weights_list = list(weights)
  227. for name, loaded_weight in progress_bar(weights_list,
  228. desc="Loading modules..."):
  229. if "lm_head.weight" in name:
  230. # GPT-2 ties the weights of the embedding layer and the final
  231. # linear layer.
  232. continue
  233. if ".attn.bias" in name or ".attn.masked_bias" in name:
  234. # Skip attention mask.
  235. # NOTE: "c_attn.bias" should not be skipped.
  236. continue
  237. if not name.startswith("transformer."):
  238. name = "transformer." + name
  239. param = params_dict[name]
  240. # The HF's GPT-2 implementation uses Conv1D instead of Linear.
  241. # Because of this, we need to transpose the weights.
  242. # Note(zhuohan): the logic below might break quantized models.
  243. for conv1d_weight_name in ["c_attn", "c_proj", "c_fc"]:
  244. if conv1d_weight_name not in name:
  245. continue
  246. if not name.endswith(".weight"):
  247. continue
  248. loaded_weight = loaded_weight.t()
  249. weight_loader = getattr(param, "weight_loader",
  250. default_weight_loader)
  251. weight_loader(param, loaded_weight)