|
@@ -0,0 +1,410 @@
|
|
|
+# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
+# you may not use this file except in compliance with the License.
|
|
|
+# You may obtain a copy of the License at
|
|
|
+#
|
|
|
+# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
+#
|
|
|
+# Unless required by applicable law or agreed to in writing, software
|
|
|
+# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
+# See the License for the specific language governing permissions and
|
|
|
+# limitations under the License.
|
|
|
+"""Inference-only OLMoE model compatible with HuggingFace weights."""
|
|
|
+from typing import Any, Dict, Iterable, List, Optional, Tuple
|
|
|
+
|
|
|
+import torch
|
|
|
+from torch import nn
|
|
|
+from transformers import PretrainedConfig
|
|
|
+
|
|
|
+from aphrodite.attention import Attention, AttentionMetadata
|
|
|
+from aphrodite.common.config import CacheConfig
|
|
|
+from aphrodite.common.sequence import IntermediateTensors, SamplerOutput
|
|
|
+from aphrodite.common.utils import progress_bar
|
|
|
+from aphrodite.distributed import get_tensor_model_parallel_world_size
|
|
|
+from aphrodite.modeling.layers.fused_moe import FusedMoE
|
|
|
+from aphrodite.modeling.layers.layernorm import RMSNorm
|
|
|
+from aphrodite.modeling.layers.linear import (QKVParallelLinear,
|
|
|
+ ReplicatedLinear,
|
|
|
+ RowParallelLinear)
|
|
|
+from aphrodite.modeling.layers.logits_processor import LogitsProcessor
|
|
|
+from aphrodite.modeling.layers.rotary_embedding import get_rope
|
|
|
+from aphrodite.modeling.layers.sampler import Sampler
|
|
|
+from aphrodite.modeling.layers.vocab_parallel_embedding import (
|
|
|
+ ParallelLMHead, VocabParallelEmbedding)
|
|
|
+from aphrodite.modeling.model_loader.weight_utils import default_weight_loader
|
|
|
+from aphrodite.modeling.sampling_metadata import SamplingMetadata
|
|
|
+from aphrodite.quantization.base_config import QuantizationConfig
|
|
|
+
|
|
|
+
|
|
|
+class OlmoeMoE(nn.Module):
|
|
|
+ """A tensor-parallel MoE implementation for Olmoe that shards each expert
|
|
|
+ across all ranks.
|
|
|
+
|
|
|
+ Each expert's weights are sharded across all ranks and a fused MoE
|
|
|
+ kernel is used for the forward pass, and finally we reduce the outputs
|
|
|
+ across ranks.
|
|
|
+ """
|
|
|
+
|
|
|
+ def __init__(self,
|
|
|
+ num_experts: int,
|
|
|
+ top_k: int,
|
|
|
+ hidden_size: int,
|
|
|
+ intermediate_size: int,
|
|
|
+ params_dtype: Optional[torch.dtype] = None,
|
|
|
+ quant_config: Optional[QuantizationConfig] = None,
|
|
|
+ tp_size: Optional[int] = None,
|
|
|
+ prefix: str = ""):
|
|
|
+ super().__init__()
|
|
|
+ self.hidden_size = hidden_size
|
|
|
+
|
|
|
+ # Gate always runs at half / full precision for now.
|
|
|
+ self.gate = ReplicatedLinear(hidden_size,
|
|
|
+ num_experts,
|
|
|
+ bias=False,
|
|
|
+ quant_config=None)
|
|
|
+
|
|
|
+ self.experts = FusedMoE(num_experts=num_experts,
|
|
|
+ top_k=top_k,
|
|
|
+ hidden_size=hidden_size,
|
|
|
+ intermediate_size=intermediate_size,
|
|
|
+ reduce_results=True,
|
|
|
+ renormalize=False,
|
|
|
+ quant_config=quant_config,
|
|
|
+ tp_size=tp_size)
|
|
|
+
|
|
|
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
|
|
+ # NOTE: hidden_states can have either 1D or 2D shape.
|
|
|
+ orig_shape = hidden_states.shape
|
|
|
+ hidden_dim = hidden_states.shape[-1]
|
|
|
+ hidden_states = hidden_states.view(-1, hidden_dim)
|
|
|
+ # router_logits: (num_tokens, n_experts)
|
|
|
+ router_logits, _ = self.gate(hidden_states)
|
|
|
+ final_hidden_states = self.experts(hidden_states=hidden_states,
|
|
|
+ router_logits=router_logits)
|
|
|
+ return final_hidden_states.view(orig_shape)
|
|
|
+
|
|
|
+class OlmoeAttention(nn.Module):
|
|
|
+
|
|
|
+ def __init__(
|
|
|
+ self,
|
|
|
+ hidden_size: int,
|
|
|
+ num_heads: int,
|
|
|
+ num_kv_heads: int,
|
|
|
+ rope_theta: float = 10000,
|
|
|
+ rope_scaling: Optional[Dict[str, Any]] = None,
|
|
|
+ max_position_embeddings: int = 4096,
|
|
|
+ cache_config: Optional[CacheConfig] = None,
|
|
|
+ quant_config: Optional[QuantizationConfig] = None,
|
|
|
+ ) -> None:
|
|
|
+ super().__init__()
|
|
|
+ self.hidden_size = hidden_size
|
|
|
+ tp_size = get_tensor_model_parallel_world_size()
|
|
|
+ self.total_num_heads = num_heads
|
|
|
+ assert self.total_num_heads % tp_size == 0
|
|
|
+ self.num_heads = self.total_num_heads // tp_size
|
|
|
+ self.total_num_kv_heads = num_kv_heads
|
|
|
+ if self.total_num_kv_heads >= tp_size:
|
|
|
+ # Number of KV heads is greater than TP size, so we partition
|
|
|
+ # the KV heads across multiple tensor parallel GPUs.
|
|
|
+ assert self.total_num_kv_heads % tp_size == 0
|
|
|
+ else:
|
|
|
+ # Number of KV heads is less than TP size, so we replicate
|
|
|
+ # the KV heads across multiple tensor parallel GPUs.
|
|
|
+ assert tp_size % self.total_num_kv_heads == 0
|
|
|
+ self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
|
|
|
+ self.head_dim = hidden_size // self.total_num_heads
|
|
|
+ self.q_size = self.num_heads * self.head_dim
|
|
|
+ self.kv_size = self.num_kv_heads * self.head_dim
|
|
|
+ self.scaling = self.head_dim**-0.5
|
|
|
+ self.rope_theta = rope_theta
|
|
|
+ self.max_position_embeddings = max_position_embeddings
|
|
|
+
|
|
|
+ self.qkv_proj = QKVParallelLinear(
|
|
|
+ hidden_size,
|
|
|
+ self.head_dim,
|
|
|
+ self.total_num_heads,
|
|
|
+ self.total_num_kv_heads,
|
|
|
+ bias=False,
|
|
|
+ quant_config=quant_config,
|
|
|
+ )
|
|
|
+ self.q_norm = RMSNorm(hidden_size, eps=1e-5)
|
|
|
+ self.k_norm = RMSNorm(hidden_size, eps=1e-5)
|
|
|
+ self.o_proj = RowParallelLinear(
|
|
|
+ self.total_num_heads * self.head_dim,
|
|
|
+ hidden_size,
|
|
|
+ bias=False,
|
|
|
+ quant_config=quant_config,
|
|
|
+ )
|
|
|
+
|
|
|
+ self.rotary_emb = get_rope(
|
|
|
+ self.head_dim,
|
|
|
+ rotary_dim=self.head_dim,
|
|
|
+ max_position=max_position_embeddings,
|
|
|
+ base=rope_theta,
|
|
|
+ rope_scaling=rope_scaling,
|
|
|
+ is_neox_style=True,
|
|
|
+ )
|
|
|
+ self.attn = Attention(self.num_heads,
|
|
|
+ self.head_dim,
|
|
|
+ self.scaling,
|
|
|
+ num_kv_heads=self.num_kv_heads,
|
|
|
+ cache_config=cache_config,
|
|
|
+ quant_config=quant_config)
|
|
|
+
|
|
|
+ def forward(
|
|
|
+ self,
|
|
|
+ positions: torch.Tensor,
|
|
|
+ hidden_states: torch.Tensor,
|
|
|
+ kv_cache: torch.Tensor,
|
|
|
+ attn_metadata: AttentionMetadata,
|
|
|
+ ) -> torch.Tensor:
|
|
|
+ qkv, _ = self.qkv_proj(hidden_states)
|
|
|
+ q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
|
|
|
+ q, k = self.q_norm(q.contiguous()), self.k_norm(k.contiguous())
|
|
|
+ q, k = self.rotary_emb(positions, q, k)
|
|
|
+ attn_output = self.attn(q, k, v, kv_cache, attn_metadata)
|
|
|
+ output, _ = self.o_proj(attn_output)
|
|
|
+ return output
|
|
|
+
|
|
|
+
|
|
|
+class OlmoeDecoderLayer(nn.Module):
|
|
|
+
|
|
|
+ def __init__(
|
|
|
+ self,
|
|
|
+ config: PretrainedConfig,
|
|
|
+ layer_idx: int,
|
|
|
+ cache_config: Optional[CacheConfig] = None,
|
|
|
+ quant_config: Optional[QuantizationConfig] = None,
|
|
|
+ ) -> None:
|
|
|
+ super().__init__()
|
|
|
+ self.hidden_size = config.hidden_size
|
|
|
+ rope_theta = getattr(config, "rope_theta", 10000)
|
|
|
+ rope_scaling = getattr(config, "rope_scaling", None)
|
|
|
+ max_position_embeddings = getattr(config, "max_position_embeddings",
|
|
|
+ 4096)
|
|
|
+
|
|
|
+ self.self_attn = OlmoeAttention(
|
|
|
+ hidden_size=self.hidden_size,
|
|
|
+ num_heads=config.num_attention_heads,
|
|
|
+ num_kv_heads=config.num_key_value_heads,
|
|
|
+ rope_theta=rope_theta,
|
|
|
+ rope_scaling=rope_scaling,
|
|
|
+ max_position_embeddings=max_position_embeddings,
|
|
|
+ cache_config=cache_config,
|
|
|
+ quant_config=quant_config,
|
|
|
+ )
|
|
|
+
|
|
|
+ self.mlp = OlmoeMoE(
|
|
|
+ num_experts=config.num_experts,
|
|
|
+ top_k=config.num_experts_per_tok,
|
|
|
+ hidden_size=config.hidden_size,
|
|
|
+ intermediate_size=config.intermediate_size,
|
|
|
+ quant_config=quant_config,
|
|
|
+ )
|
|
|
+ self.input_layernorm = RMSNorm(config.hidden_size, eps=1e-5)
|
|
|
+ self.post_attention_layernorm = RMSNorm(config.hidden_size, eps=1e-5)
|
|
|
+
|
|
|
+ def forward(
|
|
|
+ self,
|
|
|
+ positions: torch.Tensor,
|
|
|
+ hidden_states: torch.Tensor,
|
|
|
+ kv_cache: torch.Tensor,
|
|
|
+ attn_metadata: AttentionMetadata,
|
|
|
+ residual: Optional[torch.Tensor],
|
|
|
+ ) -> torch.Tensor:
|
|
|
+ # Self Attention
|
|
|
+ if residual is None:
|
|
|
+ residual = hidden_states
|
|
|
+ hidden_states = self.input_layernorm(hidden_states)
|
|
|
+ else:
|
|
|
+ hidden_states, residual = self.input_layernorm(
|
|
|
+ hidden_states, residual)
|
|
|
+
|
|
|
+ hidden_states = self.self_attn(
|
|
|
+ positions=positions,
|
|
|
+ hidden_states=hidden_states,
|
|
|
+ kv_cache=kv_cache,
|
|
|
+ attn_metadata=attn_metadata,
|
|
|
+ )
|
|
|
+
|
|
|
+ # Fully Connected
|
|
|
+ hidden_states, residual = self.post_attention_layernorm(
|
|
|
+ hidden_states, residual)
|
|
|
+ hidden_states = self.mlp(hidden_states)
|
|
|
+ return hidden_states, residual
|
|
|
+
|
|
|
+
|
|
|
+class OlmoeModel(nn.Module):
|
|
|
+
|
|
|
+ def __init__(
|
|
|
+ self,
|
|
|
+ config: PretrainedConfig,
|
|
|
+ cache_config: Optional[CacheConfig] = None,
|
|
|
+ quant_config: Optional[QuantizationConfig] = None,
|
|
|
+ ) -> None:
|
|
|
+ super().__init__()
|
|
|
+ self.padding_idx = config.pad_token_id
|
|
|
+ self.vocab_size = config.vocab_size
|
|
|
+
|
|
|
+ self.embed_tokens = VocabParallelEmbedding(
|
|
|
+ config.vocab_size,
|
|
|
+ config.hidden_size,
|
|
|
+ )
|
|
|
+ self.layers = nn.ModuleList([
|
|
|
+ OlmoeDecoderLayer(config,
|
|
|
+ layer_idx,
|
|
|
+ cache_config,
|
|
|
+ quant_config=quant_config)
|
|
|
+ for layer_idx in range(config.num_hidden_layers)
|
|
|
+ ])
|
|
|
+ self.norm = RMSNorm(config.hidden_size, eps=1e-5)
|
|
|
+
|
|
|
+ def forward(
|
|
|
+ self,
|
|
|
+ input_ids: torch.Tensor,
|
|
|
+ positions: torch.Tensor,
|
|
|
+ kv_caches: List[torch.Tensor],
|
|
|
+ attn_metadata: AttentionMetadata,
|
|
|
+ ) -> torch.Tensor:
|
|
|
+ hidden_states = self.embed_tokens(input_ids)
|
|
|
+ residual = None
|
|
|
+ for i in range(len(self.layers)):
|
|
|
+ layer = self.layers[i]
|
|
|
+ hidden_states, residual = layer(positions, hidden_states,
|
|
|
+ kv_caches[i], attn_metadata,
|
|
|
+ residual)
|
|
|
+ hidden_states, _ = self.norm(hidden_states, residual)
|
|
|
+ return hidden_states
|
|
|
+
|
|
|
+
|
|
|
+class OlmoeForCausalLM(nn.Module):
|
|
|
+
|
|
|
+ fall_back_to_pt_during_load = False
|
|
|
+
|
|
|
+ def __init__(
|
|
|
+ self,
|
|
|
+ config: PretrainedConfig,
|
|
|
+ cache_config: Optional[CacheConfig] = None,
|
|
|
+ quant_config: Optional[QuantizationConfig] = None,
|
|
|
+ ) -> None:
|
|
|
+ super().__init__()
|
|
|
+ self.config = config
|
|
|
+ self.quant_config = quant_config
|
|
|
+ self.model = OlmoeModel(config, cache_config, quant_config)
|
|
|
+ self.lm_head = ParallelLMHead(config.vocab_size,
|
|
|
+ config.hidden_size,
|
|
|
+ quant_config=quant_config)
|
|
|
+ self.logits_processor = LogitsProcessor(config.vocab_size)
|
|
|
+ self.sampler = Sampler()
|
|
|
+
|
|
|
+ def forward(
|
|
|
+ self,
|
|
|
+ input_ids: torch.Tensor,
|
|
|
+ positions: torch.Tensor,
|
|
|
+ kv_caches: List[torch.Tensor],
|
|
|
+ attn_metadata: AttentionMetadata,
|
|
|
+ intermediate_tensors: Optional[IntermediateTensors] = None,
|
|
|
+ ) -> torch.Tensor:
|
|
|
+ hidden_states = self.model(input_ids, positions, kv_caches,
|
|
|
+ attn_metadata)
|
|
|
+ return hidden_states
|
|
|
+
|
|
|
+ def compute_logits(self, hidden_states: torch.Tensor,
|
|
|
+ sampling_metadata: SamplingMetadata) -> torch.Tensor:
|
|
|
+ logits = self.logits_processor(self.lm_head, hidden_states,
|
|
|
+ sampling_metadata)
|
|
|
+ return logits
|
|
|
+
|
|
|
+ def sample(
|
|
|
+ self,
|
|
|
+ logits: Optional[torch.Tensor],
|
|
|
+ sampling_metadata: SamplingMetadata,
|
|
|
+ ) -> Optional[SamplerOutput]:
|
|
|
+ next_tokens = self.sampler(logits, sampling_metadata)
|
|
|
+ return next_tokens
|
|
|
+
|
|
|
+ def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]):
|
|
|
+ stacked_params_mapping = [
|
|
|
+ # (param_name, shard_name, shard_id)
|
|
|
+ ("qkv_proj", "q_proj", "q"),
|
|
|
+ ("qkv_proj", "k_proj", "k"),
|
|
|
+ ("qkv_proj", "v_proj", "v"),
|
|
|
+ ("gate_up_proj", "gate_proj", 0),
|
|
|
+ ("gate_up_proj", "up_proj", 1),
|
|
|
+ ]
|
|
|
+
|
|
|
+ # Params for weights, fp8 weight scales, fp8 activation scales
|
|
|
+ # (param_name, weight_name, expert_id, shard_id)
|
|
|
+ expert_params_mapping = FusedMoE.make_expert_params_mapping(
|
|
|
+ ckpt_gate_proj_name="gate_proj",
|
|
|
+ ckpt_down_proj_name="down_proj",
|
|
|
+ ckpt_up_proj_name="up_proj",
|
|
|
+ num_experts=self.config.num_experts)
|
|
|
+
|
|
|
+ params_dict = dict(self.named_parameters())
|
|
|
+ weights_list = list(weights)
|
|
|
+ for name, loaded_weight in progress_bar(
|
|
|
+ weights_list,
|
|
|
+ desc="Loading modules..."
|
|
|
+ ):
|
|
|
+ if "rotary_emb.inv_freq" in name:
|
|
|
+ continue
|
|
|
+ for (param_name, weight_name, shard_id) in stacked_params_mapping:
|
|
|
+ # Skip non-stacked layers and experts (experts handled below).
|
|
|
+ if weight_name not in name:
|
|
|
+ continue
|
|
|
+ # We have mlp.experts[0].gate_proj in the checkpoint.
|
|
|
+ # Since we handle the experts below in expert_params_mapping,
|
|
|
+ # we need to skip here BEFORE we update the name, otherwise
|
|
|
+ # name will be updated to mlp.experts[0].gate_up_proj, which
|
|
|
+ # will then be updated below in expert_params_mapping
|
|
|
+ # for mlp.experts[0].gate_gate_up_proj, which breaks load.
|
|
|
+ if "mlp.experts" in name:
|
|
|
+ continue
|
|
|
+ name = name.replace(weight_name, param_name)
|
|
|
+ # Skip loading extra bias for GPTQ models.
|
|
|
+ if name.endswith(".bias") and name not in params_dict:
|
|
|
+ continue
|
|
|
+ if name not in params_dict:
|
|
|
+ continue
|
|
|
+
|
|
|
+ param = params_dict[name]
|
|
|
+ weight_loader = param.weight_loader
|
|
|
+ weight_loader(param, loaded_weight, shard_id)
|
|
|
+ break
|
|
|
+ else:
|
|
|
+ for mapping in expert_params_mapping:
|
|
|
+ param_name, weight_name, expert_id, shard_id = mapping
|
|
|
+ if weight_name not in name:
|
|
|
+ continue
|
|
|
+ name = name.replace(weight_name, param_name)
|
|
|
+ param = params_dict[name]
|
|
|
+ weight_loader = param.weight_loader
|
|
|
+ weight_loader(param,
|
|
|
+ loaded_weight,
|
|
|
+ name,
|
|
|
+ shard_id=shard_id,
|
|
|
+ expert_id=expert_id)
|
|
|
+ break
|
|
|
+ else:
|
|
|
+ # Skip loading extra bias for GPTQ models.
|
|
|
+ if name.endswith(".bias") and name not in params_dict:
|
|
|
+ continue
|
|
|
+ # Remapping the name of FP8 kv-scale.
|
|
|
+ if name.endswith("kv_scale"):
|
|
|
+ remapped_kv_scale_name = name.replace(
|
|
|
+ ".kv_scale", ".attn.kv_scale")
|
|
|
+ if remapped_kv_scale_name not in params_dict:
|
|
|
+ print(f"Warning: Found kv scale in the checkpoint "
|
|
|
+ f"(e.g. {name}), but not found the expected "
|
|
|
+ f"name in the model "
|
|
|
+ f"(e.g. {remapped_kv_scale_name}). "
|
|
|
+ "kv-scale is not loaded.")
|
|
|
+ continue
|
|
|
+ else:
|
|
|
+ name = remapped_kv_scale_name
|
|
|
+
|
|
|
+ param = params_dict[name]
|
|
|
+ weight_loader = getattr(param, "weight_loader",
|
|
|
+ default_weight_loader)
|
|
|
+ weight_loader(param, loaded_weight)
|