# coding=utf-8 # Adapted from # https://github.com/huggingface/transformers/blob/v4.28.0/src/transformers/models/llama/modeling_llama.py # Copyright 2023 The PygmalionAI team. # Copyright 2023 The vLLM team. # Copyright 2023 DeepSeek-AI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Inference-only Deepseek model.""" from typing import Any, Dict, List, Optional, Tuple import torch from torch import nn import torch.nn.functional as F from transformers import PretrainedConfig from aphrodite.modeling.metadata import InputMetadata from aphrodite.modeling.layers.activation import SiluAndMul from aphrodite.modeling.layers.attention import PagedAttention from aphrodite.modeling.layers.triton_kernel.fused_moe import fused_moe from aphrodite.modeling.layers.layernorm import RMSNorm from aphrodite.modeling.layers.linear import ( LinearMethodBase, MergedColumnParallelLinear, ReplicatedLinear, QKVParallelLinear, RowParallelLinear, ColumnParallelLinear) from aphrodite.modeling.layers.rotary_embedding import get_rope from aphrodite.modeling.layers.sampler import Sampler from aphrodite.modeling.layers.vocab_parallel_embedding import ( VocabParallelEmbedding, ParallelLMHead) from aphrodite.modeling.megatron.communication_op import ( tensor_model_parallel_all_reduce) from aphrodite.modeling.megatron.parallel_state import ( get_tensor_model_parallel_rank, get_tensor_model_parallel_world_size) from aphrodite.modeling.sampling_metadata import SamplingMetadata from aphrodite.modeling.hf_downloader import (default_weight_loader, hf_model_weights_iterator) from aphrodite.common.sequence import SamplerOutput KVCache = Tuple[torch.Tensor, torch.Tensor] class DeepseekMLP(nn.Module): def __init__( self, hidden_size: int, intermediate_size: int, hidden_act: str, linear_method: Optional[LinearMethodBase] = None, reduce_results: bool = True, ) -> None: super().__init__() if linear_method is not None and not linear_method.quant_config.merge_weight( ): self.merge_weight = False self.gate_proj = ColumnParallelLinear(hidden_size, intermediate_size, bias=False, linear_method=linear_method) self.up_proj = ColumnParallelLinear(hidden_size, intermediate_size, bias=False, linear_method=linear_method) else: self.merge_weight = True self.gate_up_proj = MergedColumnParallelLinear( hidden_size, [intermediate_size] * 2, bias=False, linear_method=linear_method) self.down_proj = RowParallelLinear(intermediate_size, hidden_size, bias=False, linear_method=linear_method, reduce_results=reduce_results) if hidden_act != "silu": raise ValueError(f"Unsupported activation: {hidden_act}. " "Only silu is supported for now.") self.act_fn = SiluAndMul() def forward(self, x): if self.merge_weight: gate_up, _ = self.gate_up_proj(x) else: up, _ = self.up_proj(x) gate, _ = self.gate_proj(x) gate_up = torch.cat([gate, up], dim=-1) x = self.act_fn(gate_up) x, _ = self.down_proj(x) return x class DeepseekMoE(nn.Module): def __init__( self, config: PretrainedConfig, linear_method: Optional[LinearMethodBase] = None, ): super().__init__() self.config = config self.rank = get_tensor_model_parallel_rank() self.tp_size = get_tensor_model_parallel_world_size() self.n_routed_experts = config.n_routed_experts self.top_k = config.num_experts_per_tok if self.tp_size > self.n_routed_experts: raise ValueError( f"Tensor parallel size {self.tp_size} is greater than " f"the number of experts {self.n_routed_experts}.") self.experts = nn.ModuleList([ DeepseekMLP(hidden_size=config.hidden_size, intermediate_size=config.moe_intermediate_size, hidden_act=config.hidden_act, linear_method=linear_method, reduce_results=False) for idx in range(self.n_routed_experts) ]) self.pack_params() self.gate = ReplicatedLinear(config.hidden_size, self.n_routed_experts, bias=False, linear_method=None) if config.n_shared_experts is not None: intermediate_size = config.moe_intermediate_size * config.n_shared_experts self.shared_experts = DeepseekMLP( hidden_size=config.hidden_size, intermediate_size=intermediate_size, hidden_act=config.hidden_act, linear_method=linear_method, reduce_results=False, ) def pack_params(self): w1 = [] w2 = [] for expert in self.experts: w1.append(expert.gate_up_proj.weight) w2.append(expert.down_proj.weight) self.w1 = torch._utils._flatten_dense_tensors(w1) w1s = torch._utils._unflatten_dense_tensors(self.w1, w1) for data, param in zip(w1s, w1): param.data = data self.w1 = self.w1.view(len(w1), *w1s[0].shape) self.w2 = torch._utils._flatten_dense_tensors(w2) w2s = torch._utils._unflatten_dense_tensors(self.w2, w2) for data, param in zip(w2s, w2): param.data = data self.w2 = self.w2.view(len(w2), *w2s[0].shape) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: batch_size, sequence_length, hidden_dim = hidden_states.shape hidden_states = hidden_states.view(-1, hidden_dim) if self.config.n_shared_experts is not None: shared_output = self.shared_experts(hidden_states) # router_logits: (batch * sequence_length, n_experts) router_logits, _ = self.gate(hidden_states) routing_weights = F.softmax(router_logits, dim=1, dtype=torch.float) routing_weights, selected_experts = torch.topk(routing_weights, self.top_k, dim=-1) if self.config.norm_topk_prob: routing_weights /= routing_weights.sum(dim=-1, keepdim=True) final_hidden_states = fused_moe(hidden_states, self.w1, self.w2, routing_weights, selected_experts, inplace=True) if self.config.n_shared_experts is not None: final_hidden_states = final_hidden_states + shared_output final_hidden_states = tensor_model_parallel_all_reduce( final_hidden_states) return final_hidden_states.view(batch_size, sequence_length, hidden_dim) class DeepseekAttention(nn.Module): def __init__( self, hidden_size: int, num_heads: int, num_kv_heads: int, rope_theta: float = 10000, rope_scaling: Optional[Dict[str, Any]] = None, max_position_embeddings: int = 8192, linear_method: Optional[LinearMethodBase] = None, ) -> None: super().__init__() self.hidden_size = hidden_size tp_size = get_tensor_model_parallel_world_size() self.total_num_heads = num_heads assert self.total_num_heads % tp_size == 0 self.num_heads = self.total_num_heads // tp_size self.total_num_kv_heads = num_kv_heads if self.total_num_kv_heads >= tp_size: # Number of KV heads is greater than TP size, so we partition # the KV heads across multiple tensor parallel GPUs. assert self.total_num_kv_heads % tp_size == 0 else: # Number of KV heads is less than TP size, so we replicate # the KV heads across multiple tensor parallel GPUs. assert tp_size % self.total_num_kv_heads == 0 self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size) self.head_dim = hidden_size // self.total_num_heads self.q_size = self.num_heads * self.head_dim self.kv_size = self.num_kv_heads * self.head_dim self.scaling = self.head_dim**-0.5 self.rope_theta = rope_theta self.max_position_embeddings = max_position_embeddings if linear_method is not None and not linear_method.quant_config.merge_weight( ): self.merge_weight = False self.q_proj = ColumnParallelLinear(hidden_size, self.q_size, bias=False, linear_method=linear_method) self.k_proj = ColumnParallelLinear(hidden_size, self.kv_size, bias=False, linear_method=linear_method) self.v_proj = ColumnParallelLinear(hidden_size, self.kv_size, bias=False, linear_method=linear_method) else: self.merge_weight = True self.qkv_proj = QKVParallelLinear( hidden_size, self.head_dim, self.total_num_heads, self.total_num_kv_heads, bias=False, linear_method=linear_method, ) self.o_proj = RowParallelLinear( self.total_num_heads * self.head_dim, hidden_size, bias=False, linear_method=linear_method, ) is_neox_style = True if linear_method is None or linear_method.quant_config.rope_style( ) is None else linear_method.quant_config.rope_style() self.rotary_emb = get_rope( self.head_dim, rotary_dim=self.head_dim, max_position=max_position_embeddings, base=rope_theta, rope_scaling=rope_scaling, is_neox_style=is_neox_style, ) self.attn = PagedAttention(self.num_heads, self.head_dim, self.scaling, num_kv_heads=self.num_kv_heads) def forward( self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata, ) -> torch.Tensor: if self.merge_weight: qkv, _ = self.qkv_proj(hidden_states) q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1) else: q, _ = self.q_proj(hidden_states) k, _ = self.k_proj(hidden_states) v, _ = self.v_proj(hidden_states) q, k = self.rotary_emb(positions, q, k) k_cache, v_cache = kv_cache attn_output = self.attn(q, k, v, k_cache, v_cache, input_metadata) output, _ = self.o_proj(attn_output) return output class DeepseekDecoderLayer(nn.Module): def __init__( self, config: PretrainedConfig, layer_idx: int, linear_method: Optional[LinearMethodBase] = None, ) -> None: super().__init__() self.hidden_size = config.hidden_size rope_theta = getattr(config, "rope_theta", 10000) rope_scaling = getattr(config, "rope_scaling", None) max_position_embeddings = getattr(config, "max_position_embeddings", 8192) self.self_attn = DeepseekAttention( hidden_size=self.hidden_size, num_heads=config.num_attention_heads, num_kv_heads=config.num_key_value_heads, rope_theta=rope_theta, rope_scaling=rope_scaling, max_position_embeddings=max_position_embeddings, linear_method=linear_method, ) if (config.n_routed_experts is not None and \ layer_idx >= config.first_k_dense_replace and layer_idx % config.moe_layer_freq == 0): self.mlp = DeepseekMoE(config=config, linear_method=linear_method) else: self.mlp = DeepseekMLP( hidden_size=config.hidden_size, intermediate_size=config.intermediate_size, hidden_act=config.hidden_act, linear_method=linear_method, ) self.input_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, positions: torch.Tensor, hidden_states: torch.Tensor, kv_cache: KVCache, input_metadata: InputMetadata, residual: Optional[torch.Tensor], ) -> torch.Tensor: # Self Attention if residual is None: residual = hidden_states hidden_states = self.input_layernorm(hidden_states) else: hidden_states, residual = self.input_layernorm( hidden_states, residual) hidden_states = self.self_attn( positions=positions, hidden_states=hidden_states, kv_cache=kv_cache, input_metadata=input_metadata, ) # Fully Connected hidden_states, residual = self.post_attention_layernorm( hidden_states, residual) hidden_states = self.mlp(hidden_states) return hidden_states, residual class DeepseekModel(nn.Module): def __init__( self, config: PretrainedConfig, linear_method: Optional[LinearMethodBase] = None, ) -> None: super().__init__() self.padding_idx = config.pad_token_id self.vocab_size = config.vocab_size self.embed_tokens = VocabParallelEmbedding( config.vocab_size, config.hidden_size, linear_method=linear_method, ) self.layers = nn.ModuleList([ DeepseekDecoderLayer(config, layer_idx, linear_method=linear_method) for layer_idx in range(config.num_hidden_layers) ]) self.norm = RMSNorm(config.hidden_size, eps=config.rms_norm_eps) def forward( self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata, ) -> torch.Tensor: hidden_states = self.embed_tokens(input_ids) residual = None for i in range(len(self.layers)): layer = self.layers[i] hidden_states, residual = layer(positions, hidden_states, kv_caches[i], input_metadata, residual) hidden_states, _ = self.norm(hidden_states, residual) return hidden_states class DeepseekForCausalLM(nn.Module): def __init__( self, config: PretrainedConfig, linear_method: Optional[LinearMethodBase] = None, ) -> None: super().__init__() self.config = config self.linear_method = linear_method self.model = DeepseekModel(config, linear_method) self.lm_head = ParallelLMHead(config.vocab_size, config.hidden_size, linear_method=linear_method) self.sampler = Sampler(config.vocab_size) def forward( self, input_ids: torch.Tensor, positions: torch.Tensor, kv_caches: List[KVCache], input_metadata: InputMetadata, ) -> torch.Tensor: hidden_states = self.model(input_ids, positions, kv_caches, input_metadata) return hidden_states def sample( self, hidden_states: Optional[torch.Tensor], sampling_metadata: SamplingMetadata, ) -> Optional[SamplerOutput]: next_tokens = self.sampler(self.lm_head(hidden_states), sampling_metadata) return next_tokens def load_weights(self, model_name_or_path: str, cache_dir: Optional[str] = None, load_format: str = "auto", revision: Optional[str] = None): stacked_params_mapping = [ # (param_name, shard_name, shard_id) ("qkv_proj", "q_proj", "q"), ("qkv_proj", "k_proj", "k"), ("qkv_proj", "v_proj", "v"), ("gate_up_proj", "gate_proj", 0), ("gate_up_proj", "up_proj", 1), ] if self.linear_method is not None and not self.linear_method.quant_config.merge_weight( ): stacked_params_mapping = [] params_dict = dict(self.named_parameters()) for name, loaded_weight in hf_model_weights_iterator( model_name_or_path, cache_dir, load_format, revision, fall_back_to_pt=False): if "rotary_emb.inv_freq" in name: continue for (param_name, weight_name, shard_id) in stacked_params_mapping: if weight_name not in name: continue name = name.replace(weight_name, param_name) # Skip loading extra bias for GPTQ models. if name.endswith(".bias") and name not in params_dict: continue # Skip experts that are not assigned to this worker. if (("mlp.experts." in name or "mlp.shared_experts." in name) and name not in params_dict): continue param = params_dict[name] weight_loader = param.weight_loader weight_loader(param, loaded_weight, shard_id) break else: # Skip loading extra bias for GPTQ models. if name.endswith(".bias") and name not in params_dict: continue # Skip experts that are not assigned to this worker. if (("mlp.experts." in name or "mlp.shared_experts." in name) and name not in params_dict): continue param = params_dict[name] weight_loader = getattr(param, "weight_loader", default_weight_loader) weight_loader(param, loaded_weight)