xverse.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372
  1. # coding=utf-8
  2. # Adapted from
  3. # https://huggingface.co/xverse/XVERSE-7B/blob/main/modeling_xverse.py
  4. # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
  5. #
  6. # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
  7. # and OPT implementations in this library. It has been modified from its
  8. # original forms to accommodate minor architectural differences compared
  9. # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
  10. #
  11. # Licensed under the Apache License, Version 2.0 (the "License");
  12. # you may not use this file except in compliance with the License.
  13. # You may obtain a copy of the License at
  14. #
  15. # http://www.apache.org/licenses/LICENSE-2.0
  16. #
  17. # Unless required by applicable law or agreed to in writing, software
  18. # distributed under the License is distributed on an "AS IS" BASIS,
  19. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  20. # See the License for the specific language governing permissions and
  21. # limitations under the License.
  22. """Inference-only Xverse model compatible with HuggingFace weights."""
  23. from typing import Any, Dict, Iterable, List, Optional, Tuple
  24. import torch
  25. from torch import nn
  26. from transformers import PretrainedConfig
  27. from aphrodite.attention import Attention, AttentionMetadata
  28. from aphrodite.common.config import CacheConfig, LoRAConfig
  29. from aphrodite.common.sequence import SamplerOutput
  30. from aphrodite.distributed import get_tensor_model_parallel_world_size
  31. from aphrodite.modeling.layers.activation import SiluAndMul
  32. from aphrodite.modeling.layers.layernorm import RMSNorm
  33. from aphrodite.modeling.layers.linear import (MergedColumnParallelLinear,
  34. QKVParallelLinear,
  35. RowParallelLinear)
  36. from aphrodite.modeling.layers.logits_processor import LogitsProcessor
  37. from aphrodite.modeling.layers.rotary_embedding import get_rope
  38. from aphrodite.modeling.layers.sampler import Sampler
  39. from aphrodite.modeling.layers.vocab_parallel_embedding import (
  40. ParallelLMHead, VocabParallelEmbedding)
  41. from aphrodite.modeling.model_loader.weight_utils import default_weight_loader
  42. from aphrodite.modeling.sampling_metadata import SamplingMetadata
  43. from aphrodite.quantization.base_config import QuantizationConfig
  44. class XverseMLP(nn.Module):
  45. def __init__(
  46. self,
  47. hidden_size: int,
  48. intermediate_size: int,
  49. hidden_act: str,
  50. quant_config: Optional[QuantizationConfig] = None,
  51. ) -> None:
  52. super().__init__()
  53. self.gate_up_proj = MergedColumnParallelLinear(
  54. hidden_size, [intermediate_size] * 2,
  55. bias=False,
  56. quant_config=quant_config)
  57. self.down_proj = RowParallelLinear(intermediate_size,
  58. hidden_size,
  59. bias=False,
  60. quant_config=quant_config)
  61. if hidden_act != "silu":
  62. raise ValueError(f"Unsupported activation: {hidden_act}. "
  63. "Only silu is supported for now.")
  64. self.act_fn = SiluAndMul()
  65. def forward(self, x):
  66. gate, _ = self.gate_up_proj(x)
  67. x = self.act_fn(gate)
  68. x, _ = self.down_proj(x)
  69. return x
  70. class XverseAttention(nn.Module):
  71. def __init__(
  72. self,
  73. hidden_size: int,
  74. num_heads: int,
  75. num_kv_heads: int,
  76. rope_theta: float = 10000,
  77. rope_scaling: Optional[Dict[str, Any]] = None,
  78. max_position_embeddings: int = 8192,
  79. quant_config: Optional[QuantizationConfig] = None,
  80. bias: bool = False,
  81. sliding_window: Optional[int] = None,
  82. cache_config: Optional[CacheConfig] = None,
  83. ) -> None:
  84. super().__init__()
  85. self.hidden_size = hidden_size
  86. tp_size = get_tensor_model_parallel_world_size()
  87. self.total_num_heads = num_heads
  88. assert self.total_num_heads % tp_size == 0
  89. self.num_heads = self.total_num_heads // tp_size
  90. self.total_num_kv_heads = num_kv_heads
  91. # partition the KV heads across multiple tensor parallel GPUs.
  92. assert self.total_num_kv_heads % tp_size == 0
  93. self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
  94. self.head_dim = hidden_size // self.total_num_heads
  95. self.q_size = self.num_heads * self.head_dim
  96. self.kv_size = self.num_kv_heads * self.head_dim
  97. self.scaling = self.head_dim**-0.5
  98. self.rope_theta = rope_theta
  99. self.max_position_embeddings = max_position_embeddings
  100. self.qkv_proj = QKVParallelLinear(
  101. hidden_size,
  102. self.head_dim,
  103. self.total_num_heads,
  104. self.total_num_kv_heads,
  105. bias=bias,
  106. quant_config=quant_config,
  107. )
  108. self.o_proj = RowParallelLinear(
  109. self.total_num_heads * self.head_dim,
  110. hidden_size,
  111. bias=bias,
  112. quant_config=quant_config,
  113. )
  114. self.rotary_emb = get_rope(
  115. self.head_dim,
  116. rotary_dim=self.head_dim,
  117. max_position=max_position_embeddings,
  118. base=rope_theta,
  119. rope_scaling=rope_scaling,
  120. )
  121. self.attn = Attention(self.num_heads,
  122. self.head_dim,
  123. self.scaling,
  124. num_kv_heads=self.num_kv_heads,
  125. sliding_window=sliding_window,
  126. cache_config=cache_config,
  127. quant_config=quant_config)
  128. def forward(
  129. self,
  130. positions: torch.Tensor,
  131. hidden_states: torch.Tensor,
  132. kv_cache: torch.Tensor,
  133. attn_metadata: AttentionMetadata,
  134. ) -> torch.Tensor:
  135. qkv, _ = self.qkv_proj(hidden_states)
  136. q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
  137. q, k = self.rotary_emb(positions, q, k)
  138. attn_output = self.attn(q, k, v, kv_cache, attn_metadata)
  139. output, _ = self.o_proj(attn_output)
  140. return output
  141. class XverseDecoderLayer(nn.Module):
  142. def __init__(
  143. self,
  144. config: PretrainedConfig,
  145. cache_config: Optional[CacheConfig] = None,
  146. quant_config: Optional[QuantizationConfig] = None,
  147. ) -> None:
  148. super().__init__()
  149. self.hidden_size = config.hidden_size
  150. rope_theta = getattr(config, "rope_theta", 10000)
  151. rope_scaling = getattr(config, "rope_scaling", None)
  152. max_position_embeddings = getattr(config, "max_position_embeddings",
  153. 8192)
  154. sliding_window = getattr(config, "sliding_window", None)
  155. self.self_attn = XverseAttention(
  156. hidden_size=self.hidden_size,
  157. num_heads=config.num_attention_heads,
  158. num_kv_heads=getattr(config, "num_key_value_heads",
  159. config.num_attention_heads),
  160. rope_theta=rope_theta,
  161. rope_scaling=rope_scaling,
  162. max_position_embeddings=max_position_embeddings,
  163. quant_config=quant_config,
  164. bias=getattr(config, "bias", False),
  165. sliding_window=sliding_window,
  166. cache_config=cache_config,
  167. )
  168. self.mlp = XverseMLP(
  169. hidden_size=self.hidden_size,
  170. intermediate_size=config.intermediate_size,
  171. hidden_act=config.hidden_act,
  172. quant_config=quant_config,
  173. )
  174. self.input_layernorm = RMSNorm(config.hidden_size,
  175. eps=config.rms_norm_eps)
  176. self.post_attention_layernorm = RMSNorm(config.hidden_size,
  177. eps=config.rms_norm_eps)
  178. def forward(
  179. self,
  180. positions: torch.Tensor,
  181. hidden_states: torch.Tensor,
  182. kv_cache: torch.Tensor,
  183. attn_metadata: AttentionMetadata,
  184. residual: Optional[torch.Tensor],
  185. ) -> Tuple[torch.Tensor, torch.Tensor]:
  186. # Self Attention
  187. if residual is None:
  188. residual = hidden_states
  189. hidden_states = self.input_layernorm(hidden_states)
  190. else:
  191. hidden_states, residual = self.input_layernorm(
  192. hidden_states, residual)
  193. hidden_states = self.self_attn(
  194. positions=positions,
  195. hidden_states=hidden_states,
  196. kv_cache=kv_cache,
  197. attn_metadata=attn_metadata,
  198. )
  199. # Fully Connected
  200. hidden_states, residual = self.post_attention_layernorm(
  201. hidden_states, residual)
  202. hidden_states = self.mlp(hidden_states)
  203. return hidden_states, residual
  204. class XverseModel(nn.Module):
  205. def __init__(
  206. self,
  207. config: PretrainedConfig,
  208. cache_config: Optional[CacheConfig] = None,
  209. quant_config: Optional[QuantizationConfig] = None,
  210. lora_config: Optional[LoRAConfig] = None,
  211. ) -> None:
  212. super().__init__()
  213. self.config = config
  214. self.padding_idx = config.pad_token_id
  215. lora_vocab = (lora_config.lora_extra_vocab_size *
  216. (lora_config.max_loras or 1)) if lora_config else 0
  217. self.vocab_size = config.vocab_size + lora_vocab
  218. self.org_vocab_size = config.vocab_size
  219. self.embed_tokens = VocabParallelEmbedding(
  220. self.vocab_size,
  221. config.hidden_size,
  222. org_num_embeddings=config.vocab_size,
  223. )
  224. self.layers = nn.ModuleList([
  225. XverseDecoderLayer(config, cache_config, quant_config)
  226. for _ in range(config.num_hidden_layers)
  227. ])
  228. self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
  229. def forward(
  230. self,
  231. input_ids: torch.Tensor,
  232. positions: torch.Tensor,
  233. kv_caches: List[torch.Tensor],
  234. attn_metadata: AttentionMetadata,
  235. ) -> torch.Tensor:
  236. hidden_states = self.embed_tokens(input_ids)
  237. residual = None
  238. for i in range(len(self.layers)):
  239. layer = self.layers[i]
  240. hidden_states, residual = layer(
  241. positions,
  242. hidden_states,
  243. kv_caches[i],
  244. attn_metadata,
  245. residual,
  246. )
  247. hidden_states, _ = self.norm(hidden_states, residual)
  248. return hidden_states
  249. class XverseForCausalLM(nn.Module):
  250. packed_modules_mapping = {
  251. "qkv_proj": [
  252. "q_proj",
  253. "k_proj",
  254. "v_proj",
  255. ],
  256. "gate_up_proj": [
  257. "gate_proj",
  258. "up_proj",
  259. ],
  260. }
  261. # LoRA specific attributes
  262. supported_lora_modules = [
  263. "qkv_proj",
  264. "o_proj",
  265. "gate_up_proj",
  266. "down_proj",
  267. "embed_tokens",
  268. "lm_head",
  269. ]
  270. embedding_modules = {
  271. "embed_tokens": "input_embeddings",
  272. "lm_head": "output_embeddings",
  273. }
  274. embedding_padding_modules = ["lm_head"]
  275. def __init__(
  276. self,
  277. config: PretrainedConfig,
  278. cache_config: Optional[CacheConfig] = None,
  279. quant_config: Optional[QuantizationConfig] = None,
  280. lora_config=None,
  281. ) -> None:
  282. super().__init__()
  283. self.config = config
  284. self.quant_config = quant_config
  285. self.model = XverseModel(config, cache_config, quant_config)
  286. self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size)
  287. self.logits_processor = LogitsProcessor(config.vocab_size)
  288. self.sampler = Sampler()
  289. def forward(
  290. self,
  291. input_ids: torch.Tensor,
  292. positions: torch.Tensor,
  293. kv_caches: List[torch.Tensor],
  294. attn_metadata: AttentionMetadata,
  295. ) -> torch.Tensor:
  296. hidden_states = self.model(input_ids, positions, kv_caches,
  297. attn_metadata)
  298. return hidden_states
  299. def compute_logits(self, hidden_states: torch.Tensor,
  300. sampling_metadata: SamplingMetadata) -> torch.Tensor:
  301. logits = self.logits_processor(self.lm_head.weight, hidden_states,
  302. sampling_metadata)
  303. return logits
  304. def sample(
  305. self,
  306. logits: torch.Tensor,
  307. sampling_metadata: SamplingMetadata,
  308. ) -> Optional[SamplerOutput]:
  309. next_tokens = self.sampler(logits, sampling_metadata)
  310. return next_tokens
  311. def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]):
  312. stacked_params_mapping = [
  313. ("qkv_proj", "q_proj", "q"),
  314. ("qkv_proj", "k_proj", "k"),
  315. ("qkv_proj", "v_proj", "v"),
  316. ("gate_up_proj", "gate_proj", 0),
  317. ("gate_up_proj", "up_proj", 1),
  318. ]
  319. params_dict = dict(self.named_parameters())
  320. for name, loaded_weight in weights:
  321. if ("rotary_emb.inv_freq" in name
  322. or "rotary_emb.cos_cached" in name
  323. or "rotary_emb.sin_cached" in name):
  324. continue
  325. for (param_name, weight_name, shard_id) in stacked_params_mapping:
  326. if weight_name not in name:
  327. continue
  328. name = name.replace(weight_name, param_name)
  329. # Skip loading extra bias for GPTQ models.
  330. if name.endswith(".bias") and name not in params_dict:
  331. continue
  332. param = params_dict[name]
  333. weight_loader = param.weight_loader
  334. weight_loader(param, loaded_weight, shard_id)
  335. break
  336. else:
  337. # Skip loading extra bias for GPTQ models.
  338. if name.endswith(".bias") and name not in params_dict:
  339. continue
  340. param = params_dict[name]
  341. weight_loader = getattr(param, "weight_loader",
  342. default_weight_loader)
  343. weight_loader(param, loaded_weight)