1
0

baichuan.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435
  1. # coding=utf-8
  2. # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
  3. #
  4. # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
  5. # and OPT implementations in this library. It has been modified from its
  6. # original forms to accommodate minor architectural differences compared
  7. # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
  8. #
  9. # Licensed under the Apache License, Version 2.0 (the "License");
  10. # you may not use this file except in compliance with the License.
  11. # You may obtain a copy of the License at
  12. #
  13. # http://www.apache.org/licenses/LICENSE-2.0
  14. #
  15. # Unless required by applicable law or agreed to in writing, software
  16. # distributed under the License is distributed on an "AS IS" BASIS,
  17. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  18. # See the License for the specific language governing permissions and
  19. # limitations under the License.
  20. """Inference-only BaiChuan model compatible with HuggingFace weights."""
  21. import math
  22. from typing import Iterable, List, Optional, Tuple
  23. import torch
  24. from torch import nn
  25. from transformers import PretrainedConfig
  26. from aphrodite.attention import Attention, AttentionMetadata
  27. from aphrodite.common.config import CacheConfig, LoRAConfig
  28. from aphrodite.common.sequence import IntermediateTensors
  29. from aphrodite.distributed import (get_tensor_model_parallel_rank,
  30. get_tensor_model_parallel_world_size)
  31. from aphrodite.modeling.layers.activation import SiluAndMul
  32. from aphrodite.modeling.layers.layernorm import RMSNorm
  33. from aphrodite.modeling.layers.linear import (MergedColumnParallelLinear,
  34. QKVParallelLinear,
  35. RowParallelLinear)
  36. from aphrodite.modeling.layers.logits_processor import LogitsProcessor
  37. from aphrodite.modeling.layers.rotary_embedding import get_rope
  38. from aphrodite.modeling.layers.sampler import Sampler, SamplerOutput
  39. from aphrodite.modeling.layers.vocab_parallel_embedding import (
  40. ParallelLMHead, VocabParallelEmbedding)
  41. from aphrodite.modeling.model_loader.weight_utils import default_weight_loader
  42. from aphrodite.modeling.models.interfaces import SupportsLoRA
  43. from aphrodite.modeling.sampling_metadata import SamplingMetadata
  44. from aphrodite.quantization.base_config import QuantizationConfig
  45. def _get_alibi_slopes(total_num_heads: int) -> torch.Tensor:
  46. closest_power_of_2 = 2**math.floor(math.log2(total_num_heads))
  47. base = torch.tensor(
  48. 2**(-(2**-(math.log2(closest_power_of_2) - 3))),
  49. dtype=torch.float32,
  50. )
  51. powers = torch.arange(1, 1 + closest_power_of_2, dtype=torch.int32)
  52. slopes = torch.pow(base, powers)
  53. if closest_power_of_2 != total_num_heads:
  54. extra_base = torch.tensor(
  55. 2**(-(2**-(math.log2(2 * closest_power_of_2) - 3))),
  56. dtype=torch.float32,
  57. )
  58. num_remaining_heads = min(closest_power_of_2,
  59. total_num_heads - closest_power_of_2)
  60. extra_powers = torch.arange(start=1,
  61. end=1 + 2 * num_remaining_heads,
  62. step=2,
  63. dtype=torch.int32)
  64. slopes = torch.cat(
  65. [slopes, torch.pow(extra_base, extra_powers)], dim=0)
  66. return slopes
  67. class BaiChuanMLP(nn.Module):
  68. def __init__(
  69. self,
  70. hidden_size: int,
  71. intermediate_size: int,
  72. hidden_act: str,
  73. quant_config: Optional[QuantizationConfig] = None,
  74. ):
  75. super().__init__()
  76. self.gate_up_proj = MergedColumnParallelLinear(
  77. hidden_size, [intermediate_size] * 2,
  78. bias=False,
  79. quant_config=quant_config)
  80. self.down_proj = RowParallelLinear(intermediate_size,
  81. hidden_size,
  82. bias=False,
  83. quant_config=quant_config)
  84. if hidden_act != "silu":
  85. raise ValueError(f"Unsupported activation: {hidden_act}. "
  86. "Only silu is supported for now.")
  87. self.act_fn = SiluAndMul()
  88. def forward(self, x):
  89. gate_up, _ = self.gate_up_proj(x)
  90. x = self.act_fn(gate_up)
  91. x, _ = self.down_proj(x)
  92. return x
  93. class BaiChuanAttention(nn.Module):
  94. """Multi-headed attention from 'Attention Is All You Need' paper"""
  95. def __init__(
  96. self,
  97. hidden_size: int,
  98. num_heads: int,
  99. position_embedding: str,
  100. rope_theta: float = 10000,
  101. max_position_embeddings: int = 8192,
  102. cache_config: Optional[CacheConfig] = None,
  103. quant_config: Optional[QuantizationConfig] = None,
  104. ):
  105. super().__init__()
  106. self.hidden_size = hidden_size
  107. tensor_model_parallel_world_size = get_tensor_model_parallel_world_size(
  108. )
  109. self.total_num_heads = num_heads
  110. assert self.total_num_heads % tensor_model_parallel_world_size == 0
  111. self.num_heads = (self.total_num_heads //
  112. tensor_model_parallel_world_size)
  113. self.head_dim = hidden_size // self.total_num_heads
  114. self.postion_embedding = position_embedding
  115. self.rope_theta = rope_theta
  116. self.max_position_embeddings = max_position_embeddings
  117. # pylint: disable=invalid-name
  118. self.W_pack = QKVParallelLinear(
  119. hidden_size,
  120. self.head_dim,
  121. self.total_num_heads,
  122. self.total_num_heads,
  123. bias=False,
  124. quant_config=quant_config,
  125. )
  126. self.o_proj = RowParallelLinear(
  127. self.total_num_heads * self.head_dim,
  128. hidden_size,
  129. bias=False,
  130. quant_config=quant_config,
  131. )
  132. # Create the alibi slopes and slice them.
  133. if self.postion_embedding == "ALIBI":
  134. tp_rank = get_tensor_model_parallel_rank()
  135. head_start = tp_rank * self.num_heads
  136. head_end = (tp_rank + 1) * self.num_heads
  137. alibi_slopes = _get_alibi_slopes(self.total_num_heads)
  138. alibi_slopes = alibi_slopes[head_start:head_end].tolist()
  139. scaling = self.head_dim**-0.5
  140. self.attn = Attention(self.num_heads,
  141. self.head_dim,
  142. scaling,
  143. alibi_slopes=alibi_slopes,
  144. quant_config=quant_config)
  145. else:
  146. self.rotary_emb = get_rope(
  147. self.head_dim,
  148. rotary_dim=self.head_dim,
  149. max_position=self.max_position_embeddings,
  150. base=self.rope_theta,
  151. )
  152. self.scaling = self.head_dim**-0.5
  153. self.attn = Attention(self.num_heads,
  154. self.head_dim,
  155. self.scaling,
  156. cache_config=cache_config,
  157. quant_config=quant_config)
  158. def forward(
  159. self,
  160. positions: torch.Tensor,
  161. hidden_states: torch.Tensor,
  162. kv_cache: torch.Tensor,
  163. attn_metadata: AttentionMetadata,
  164. ) -> torch.Tensor:
  165. qkv, _ = self.W_pack(hidden_states)
  166. q, k, v = qkv.chunk(chunks=3, dim=-1)
  167. if self.postion_embedding != "ALIBI":
  168. q, k = self.rotary_emb(positions, q, k)
  169. attn_output = self.attn(q, k, v, kv_cache, attn_metadata)
  170. output, _ = self.o_proj(attn_output)
  171. return output
  172. class BaiChuanDecoderLayer(nn.Module):
  173. def __init__(self,
  174. config: PretrainedConfig,
  175. position_embedding: str,
  176. cache_config: Optional[CacheConfig] = None,
  177. quant_config: Optional[QuantizationConfig] = None):
  178. super().__init__()
  179. self.hidden_size = config.hidden_size
  180. rope_theta = getattr(config, "rope_theta", 10000)
  181. max_position_embeddings = getattr(config, "max_position_embeddings",
  182. 8192)
  183. self.self_attn = BaiChuanAttention(
  184. hidden_size=self.hidden_size,
  185. num_heads=config.num_attention_heads,
  186. position_embedding=position_embedding,
  187. rope_theta=rope_theta,
  188. max_position_embeddings=max_position_embeddings,
  189. cache_config=cache_config,
  190. quant_config=quant_config,
  191. )
  192. self.mlp = BaiChuanMLP(
  193. hidden_size=self.hidden_size,
  194. intermediate_size=config.intermediate_size,
  195. hidden_act=config.hidden_act,
  196. quant_config=quant_config,
  197. )
  198. self.input_layernorm = RMSNorm(config.hidden_size,
  199. eps=config.rms_norm_eps)
  200. self.post_attention_layernorm = RMSNorm(config.hidden_size,
  201. eps=config.rms_norm_eps)
  202. def forward(
  203. self,
  204. positions: torch.Tensor,
  205. hidden_states: torch.Tensor,
  206. kv_cache: torch.Tensor,
  207. attn_metadata: AttentionMetadata,
  208. residual: Optional[torch.Tensor],
  209. ) -> Tuple[torch.Tensor, torch.Tensor]:
  210. # Self Attention
  211. if residual is None:
  212. residual = hidden_states
  213. hidden_states = self.input_layernorm(hidden_states)
  214. else:
  215. hidden_states, residual = self.input_layernorm(
  216. hidden_states, residual)
  217. hidden_states = self.self_attn(
  218. positions=positions,
  219. hidden_states=hidden_states,
  220. kv_cache=kv_cache,
  221. attn_metadata=attn_metadata,
  222. )
  223. # Fully Connected
  224. hidden_states, residual = self.post_attention_layernorm(
  225. hidden_states, residual)
  226. hidden_states = self.mlp(hidden_states)
  227. return hidden_states, residual
  228. class BaiChuanModel(nn.Module):
  229. def __init__(self,
  230. config: PretrainedConfig,
  231. position_embedding: str,
  232. cache_config: Optional[CacheConfig] = None,
  233. quant_config: Optional[QuantizationConfig] = None):
  234. super().__init__()
  235. self.config = config
  236. self.padding_idx = config.pad_token_id
  237. self.vocab_size = config.vocab_size
  238. self.embed_tokens = VocabParallelEmbedding(
  239. config.vocab_size,
  240. config.hidden_size,
  241. )
  242. self.layers = nn.ModuleList([
  243. BaiChuanDecoderLayer(config, position_embedding, cache_config,
  244. quant_config)
  245. for _ in range(config.num_hidden_layers)
  246. ])
  247. self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps)
  248. def forward(
  249. self,
  250. input_ids: torch.Tensor,
  251. positions: torch.Tensor,
  252. kv_caches: List[torch.Tensor],
  253. attn_metadata: AttentionMetadata,
  254. ) -> torch.Tensor:
  255. hidden_states = self.embed_tokens(input_ids)
  256. residual = None
  257. for i in range(len(self.layers)):
  258. layer = self.layers[i]
  259. hidden_states, residual = layer(
  260. positions,
  261. hidden_states,
  262. kv_caches[i],
  263. attn_metadata,
  264. residual,
  265. )
  266. hidden_states, _ = self.norm(hidden_states, residual)
  267. return hidden_states
  268. class BaiChuanBaseForCausalLM(nn.Module, SupportsLoRA):
  269. packed_modules_mapping = {
  270. "W_pack": ["W_pack"],
  271. "gate_up_proj": [
  272. "gate_proj",
  273. "up_proj",
  274. ],
  275. }
  276. # LoRA specific attributes
  277. supported_lora_modules = [
  278. "W_pack",
  279. "o_proj",
  280. "gate_up_proj",
  281. "down_proj",
  282. ]
  283. embedding_modules = {}
  284. embedding_padding_modules = []
  285. def __init__(
  286. self,
  287. config: PretrainedConfig,
  288. position_embedding: str,
  289. cache_config: Optional[CacheConfig] = None,
  290. quant_config: Optional[QuantizationConfig] = None,
  291. lora_config: Optional[LoRAConfig] = None,
  292. ):
  293. super().__init__()
  294. self.config = config
  295. self.lora_config: Optional[LoRAConfig] = lora_config
  296. self.quant_config = quant_config
  297. self.model = BaiChuanModel(config, position_embedding, cache_config,
  298. quant_config)
  299. self.lm_head = ParallelLMHead(config.vocab_size,
  300. config.hidden_size,
  301. quant_config=quant_config)
  302. self.logits_processor = LogitsProcessor(config.vocab_size)
  303. self.sampler = Sampler()
  304. def forward(
  305. self,
  306. input_ids: torch.Tensor,
  307. positions: torch.Tensor,
  308. kv_caches: List[torch.Tensor],
  309. attn_metadata: AttentionMetadata,
  310. intermediate_tensors: Optional[IntermediateTensors] = None,
  311. ) -> torch.Tensor:
  312. hidden_states = self.model(input_ids, positions, kv_caches,
  313. attn_metadata)
  314. return hidden_states
  315. def compute_logits(
  316. self,
  317. hidden_states: torch.Tensor,
  318. sampling_metadata: SamplingMetadata,
  319. ) -> Optional[torch.Tensor]:
  320. logits = self.logits_processor(self.lm_head, hidden_states,
  321. sampling_metadata)
  322. return logits
  323. def sample(
  324. self,
  325. logits: torch.Tensor,
  326. sampling_metadata: SamplingMetadata,
  327. ) -> Optional[SamplerOutput]:
  328. next_tokens = self.sampler(logits, sampling_metadata)
  329. return next_tokens
  330. def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]):
  331. stacked_params_mapping = [
  332. # (param_name, shard_name, shard_id)
  333. ("gate_up_proj", "gate_proj", 0),
  334. ("gate_up_proj", "up_proj", 1),
  335. ]
  336. params_dict = dict(self.named_parameters())
  337. for name, loaded_weight in weights:
  338. if "rotary_emb.inv_freq" in name:
  339. continue
  340. if name == "lm_head.weight":
  341. # Unlike Baichuan, Baichuan2 normalizes the head weights.
  342. # Refer to:
  343. # https://huggingface.co/baichuan-inc/Baichuan2-7B-Chat/blob/84603cde5ebffb6084e476cfaeceaf0b8b91fe54/modeling_baichuan.py#L508
  344. # Distinguish between Baichuan and Baichuan2 by checking the
  345. # vocab size. This is suggested by
  346. # https://github.com/vllm-project/vllm/pull/1022#discussion_r1325652704
  347. is_baichuan2 = self.config.vocab_size == 125696
  348. if is_baichuan2:
  349. loaded_weight = torch.nn.functional.normalize(
  350. loaded_weight)
  351. for (param_name, weight_name, shard_id) in stacked_params_mapping:
  352. if weight_name not in name:
  353. continue
  354. name = name.replace(weight_name, param_name)
  355. # Skip loading extra bias for GPTQ models.
  356. if name.endswith(".bias") and name not in params_dict:
  357. continue
  358. param = params_dict[name]
  359. weight_loader = param.weight_loader
  360. weight_loader(param, loaded_weight, shard_id)
  361. break
  362. else:
  363. # Skip loading extra bias for GPTQ models.
  364. if name.endswith(".bias") and name not in params_dict:
  365. continue
  366. param = params_dict[name]
  367. weight_loader = getattr(param, "weight_loader",
  368. default_weight_loader)
  369. weight_loader(param, loaded_weight)
  370. class BaichuanForCausalLM(BaiChuanBaseForCausalLM):
  371. """Baichuan 13B and Baichuan2 7B/13B."""
  372. def __init__(
  373. self,
  374. config,
  375. cache_config: Optional[CacheConfig] = None,
  376. quant_config: Optional[QuantizationConfig] = None,
  377. lora_config: Optional[LoRAConfig] = None,
  378. ):
  379. if config.hidden_size == 4096: # baichuan2 7b
  380. super().__init__(config, "ROPE", cache_config, quant_config,
  381. lora_config)
  382. else: # baichuan 13b, baichuan2 13b
  383. super().__init__(config, "ALIBI", cache_config, quant_config,
  384. lora_config)
  385. class BaiChuanForCausalLM(BaiChuanBaseForCausalLM):
  386. """Baichuan 7B."""
  387. def __init__(
  388. self,
  389. config,
  390. cache_config: Optional[CacheConfig] = None,
  391. quant_config: Optional[QuantizationConfig] = None,
  392. lora_config: Optional[LoRAConfig] = None,
  393. ):
  394. super().__init__(config, "ROPE", cache_config, quant_config,
  395. lora_config)