gpt_neox.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300
  1. # coding=utf-8
  2. # Adapted from
  3. # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/gpt_neox/modeling_gpt_neox.py
  4. # Copyright 2023 The PygmalionAI team.
  5. # Copyright 2023 The vLLM team.
  6. # Copyright 2022 EleutherAI The HuggingFace Inc. team. All rights reserved.
  7. #
  8. # Licensed under the Apache License, Version 2.0 (the "License");
  9. # you may not use this file except in compliance with the License.
  10. # You may obtain a copy of the License at
  11. #
  12. # http://www.apache.org/licenses/LICENSE-2.0
  13. #
  14. # Unless required by applicable law or agreed to in writing, software
  15. # distributed under the License is distributed on an "AS IS" BASIS,
  16. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  17. # See the License for the specific language governing permissions and
  18. # limitations under the License.
  19. """Inference-only GPT-NeoX model compatible with HuggingFace weights."""
  20. from typing import List, Optional, Tuple
  21. import torch
  22. from torch import nn
  23. from transformers import GPTNeoXConfig
  24. from aphrodite.modeling.metadata import InputMetadata
  25. from aphrodite.modeling.layers.activation import get_act_fn
  26. from aphrodite.modeling.layers.attention import PagedAttention
  27. from aphrodite.modeling.layers.linear import (ColumnParallelLinear,
  28. LinearMethodBase,
  29. QKVParallelLinear,
  30. RowParallelLinear)
  31. from aphrodite.modeling.layers.rotary_embedding import get_rope
  32. from aphrodite.modeling.layers.sampler import Sampler
  33. from aphrodite.modeling.layers.vocab_parallel_embedding import (
  34. VocabParallelEmbedding, ParallelLMHead)
  35. from aphrodite.modeling.megatron.parallel_state import (
  36. get_tensor_model_parallel_world_size)
  37. from aphrodite.modeling.sampling_metadata import SamplingMetadata
  38. from aphrodite.modeling.hf_downloader import (default_weight_loader,
  39. hf_model_weights_iterator)
  40. from aphrodite.common.sequence import SamplerOutput
  41. KVCache = Tuple[torch.Tensor, torch.Tensor]
  42. class GPTNeoXAttention(nn.Module):
  43. def __init__(
  44. self,
  45. config: GPTNeoXConfig,
  46. linear_method: Optional[LinearMethodBase] = None,
  47. ):
  48. super().__init__()
  49. self.total_num_heads = config.num_attention_heads
  50. self.hidden_size = config.hidden_size
  51. self.head_size = self.hidden_size // self.total_num_heads
  52. self.bias = getattr(config, "attention_bias", True)
  53. tensor_model_parallel_world_size = (
  54. get_tensor_model_parallel_world_size())
  55. assert self.total_num_heads % tensor_model_parallel_world_size == 0
  56. self.num_heads = (self.total_num_heads //
  57. tensor_model_parallel_world_size)
  58. self.query_key_value = QKVParallelLinear(
  59. config.hidden_size,
  60. self.head_size,
  61. self.total_num_heads,
  62. bias=self.bias,
  63. linear_method=linear_method,
  64. )
  65. self.dense = RowParallelLinear(
  66. config.hidden_size,
  67. config.hidden_size,
  68. bias=self.bias,
  69. linear_method=linear_method,
  70. )
  71. scaling = self.head_size**-0.5
  72. rotary_dim = int(self.head_size * config.rotary_pct)
  73. assert rotary_dim % 2 == 0
  74. rope_theta = getattr(config, "rope_theta", 10000)
  75. max_position_embeddings = getattr(config, "max_position_embeddings",
  76. 8192)
  77. is_neox_style = True if linear_method is None or linear_method.quant_config.rope_style(
  78. ) is None else linear_method.quant_config.rope_style()
  79. self.rotary_emb = get_rope(
  80. self.head_size,
  81. rotary_dim=rotary_dim,
  82. max_position=max_position_embeddings,
  83. base=rope_theta,
  84. is_neox_style=is_neox_style,
  85. )
  86. self.attn = PagedAttention(self.num_heads, self.head_size, scaling)
  87. def forward(
  88. self,
  89. position_ids: torch.Tensor,
  90. hidden_states: torch.Tensor,
  91. kv_cache: KVCache,
  92. input_metadata: InputMetadata,
  93. ) -> torch.Tensor:
  94. qkv, _ = self.query_key_value(hidden_states)
  95. q, k, v = qkv.chunk(chunks=3, dim=-1)
  96. q, k = self.rotary_emb(position_ids, q, k)
  97. k_cache, v_cache = kv_cache
  98. attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata)
  99. output, _ = self.dense(attn_output)
  100. return output
  101. class GPTNeoXMLP(nn.Module):
  102. def __init__(
  103. self,
  104. config: GPTNeoXConfig,
  105. linear_method: Optional[LinearMethodBase] = None,
  106. ):
  107. super().__init__()
  108. self.dense_h_to_4h = ColumnParallelLinear(
  109. config.hidden_size,
  110. config.intermediate_size,
  111. linear_method=linear_method,
  112. )
  113. self.dense_4h_to_h = RowParallelLinear(
  114. config.intermediate_size,
  115. config.hidden_size,
  116. linear_method=linear_method,
  117. )
  118. quant_config = getattr(linear_method, "quant_config", None)
  119. self.act = get_act_fn(config.hidden_act, quant_config,
  120. config.intermediate_size)
  121. def forward(self, hidden_states):
  122. hidden_states, _ = self.dense_h_to_4h(hidden_states)
  123. hidden_states = self.act(hidden_states)
  124. hidden_states, _ = self.dense_4h_to_h(hidden_states)
  125. return hidden_states
  126. class GPTNeoXLayer(nn.Module):
  127. def __init__(
  128. self,
  129. config: GPTNeoXConfig,
  130. linear_method: Optional[LinearMethodBase] = None,
  131. ):
  132. super().__init__()
  133. self.use_parallel_residual = config.use_parallel_residual
  134. self.input_layernorm = nn.LayerNorm(config.hidden_size,
  135. eps=config.layer_norm_eps)
  136. self.post_attention_layernorm = nn.LayerNorm(config.hidden_size,
  137. eps=config.layer_norm_eps)
  138. self.attention = GPTNeoXAttention(config, linear_method)
  139. self.mlp = GPTNeoXMLP(config, linear_method)
  140. def forward(
  141. self,
  142. position_ids: torch.Tensor,
  143. hidden_states: torch.Tensor,
  144. kv_cache: KVCache,
  145. input_metadata: InputMetadata,
  146. ) -> torch.Tensor:
  147. attn_input = self.input_layernorm(hidden_states)
  148. attn_output = self.attention(
  149. position_ids=position_ids,
  150. hidden_states=attn_input,
  151. kv_cache=kv_cache,
  152. input_metadata=input_metadata,
  153. )
  154. if self.use_parallel_residual:
  155. # pseudocode:
  156. # x = x + attn(ln1(x)) + mlp(ln2(x))
  157. mlp_input = self.post_attention_layernorm(hidden_states)
  158. mlp_output = self.mlp(mlp_input)
  159. hidden_states = mlp_output + attn_output + hidden_states
  160. else:
  161. # pseudocode:
  162. # x = x + attn(ln1(x))
  163. # x = x + mlp(ln2(x))
  164. attn_output = attn_output + hidden_states
  165. mlp_input = self.post_attention_layernorm(attn_output)
  166. mlp_output = self.mlp(mlp_input)
  167. hidden_states = mlp_output + attn_output
  168. return hidden_states
  169. class GPTNeoXModel(nn.Module):
  170. def __init__(
  171. self,
  172. config: GPTNeoXConfig,
  173. linear_method: Optional[LinearMethodBase] = None,
  174. ):
  175. super().__init__()
  176. self.config = config
  177. self.embed_in = VocabParallelEmbedding(
  178. config.vocab_size,
  179. config.hidden_size,
  180. linear_method=linear_method,
  181. )
  182. self.layers = nn.ModuleList([
  183. GPTNeoXLayer(config, linear_method)
  184. for _ in range(config.num_hidden_layers)
  185. ])
  186. self.final_layer_norm = nn.LayerNorm(config.hidden_size,
  187. eps=config.layer_norm_eps)
  188. def forward(
  189. self,
  190. input_ids: torch.Tensor,
  191. position_ids: torch.Tensor,
  192. kv_caches: List[KVCache],
  193. input_metadata: InputMetadata,
  194. ) -> torch.Tensor:
  195. hidden_states = self.embed_in(input_ids)
  196. for i in range(len(self.layers)):
  197. layer = self.layers[i]
  198. hidden_states = layer(
  199. position_ids,
  200. hidden_states,
  201. kv_caches[i],
  202. input_metadata,
  203. )
  204. hidden_states = self.final_layer_norm(hidden_states)
  205. return hidden_states
  206. class GPTNeoXForCausalLM(nn.Module):
  207. def __init__(
  208. self,
  209. config,
  210. linear_method: Optional[LinearMethodBase] = None,
  211. ):
  212. super().__init__()
  213. self.config = config
  214. self.linear_method = linear_method
  215. self.gpt_neox = GPTNeoXModel(config, linear_method)
  216. self.embed_out = ParallelLMHead(
  217. config.vocab_size,
  218. config.hidden_size,
  219. linear_method=linear_method,
  220. )
  221. self.sampler = Sampler(config.vocab_size)
  222. def forward(
  223. self,
  224. input_ids: torch.Tensor,
  225. positions: torch.Tensor,
  226. kv_caches: List[KVCache],
  227. input_metadata: InputMetadata,
  228. ) -> torch.Tensor:
  229. hidden_states = self.gpt_neox(input_ids, positions, kv_caches,
  230. input_metadata)
  231. return hidden_states
  232. def sample(
  233. self,
  234. hidden_states: torch.Tensor,
  235. sampling_metadata: SamplingMetadata,
  236. ) -> Optional[SamplerOutput]:
  237. next_tokens = self.sampler(self.embed_out(hidden_states),
  238. sampling_metadata)
  239. return next_tokens
  240. def load_weights(self,
  241. model_name_or_path: str,
  242. cache_dir: Optional[str] = None,
  243. load_format: str = "auto",
  244. revision: Optional[str] = None):
  245. params_dict = dict(self.named_parameters())
  246. for name, loaded_weight in hf_model_weights_iterator(
  247. model_name_or_path, cache_dir, load_format, revision):
  248. if ("attention.bias" in name or "attention.masked_bias" in name
  249. or "rotary_emb.inv_freq" in name):
  250. continue
  251. param = params_dict[name]
  252. if "query_key_value" in name:
  253. # NOTE: GPT-NeoX's fused QKV's output_dim has the shape of
  254. # (num_heads * 3 * head_size), while the
  255. # required shape is (3 * num_heads * head_size).
  256. # Thus, we need weight conversion.
  257. output_dim = getattr(param, "output_dim", None)
  258. num_heads = self.config.num_attention_heads
  259. if output_dim is not None:
  260. loaded_weight_shape = loaded_weight.shape
  261. loaded_weight = loaded_weight.view(
  262. loaded_weight_shape[:output_dim] + (num_heads, 3, -1) +
  263. loaded_weight_shape[output_dim + 1:])
  264. loaded_weight = loaded_weight.transpose(
  265. output_dim, output_dim + 1)
  266. loaded_weight = loaded_weight.reshape(loaded_weight_shape)
  267. weight_loader = getattr(param, "weight_loader",
  268. default_weight_loader)
  269. weight_loader(param, loaded_weight)