123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314 |
- # coding=utf-8
- # Copyright 2024 BigCode and the HuggingFace Inc. team. All rights reserved.
- #
- # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
- # and OPT implementations in this library. It has been modified from its
- # original forms to accommodate minor architectural differences compared
- # to GPT-NeoX and OPT used by the Meta AI team that trained the model.
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- """ PyTorch Starcoder2 model."""
- from typing import Iterable, List, Optional, Tuple
- import torch
- from torch import nn
- from transformers import Starcoder2Config
- from aphrodite.attention import Attention, AttentionMetadata
- from aphrodite.common.config import CacheConfig
- from aphrodite.common.sequence import IntermediateTensors, SamplerOutput
- from aphrodite.distributed import get_tensor_model_parallel_world_size
- from aphrodite.modeling.layers.activation import get_act_fn
- from aphrodite.modeling.layers.linear import (ColumnParallelLinear,
- QKVParallelLinear,
- RowParallelLinear)
- from aphrodite.modeling.layers.logits_processor import LogitsProcessor
- from aphrodite.modeling.layers.rotary_embedding import get_rope
- from aphrodite.modeling.layers.sampler import Sampler
- from aphrodite.modeling.layers.vocab_parallel_embedding import (
- DEFAULT_VOCAB_PADDING_SIZE, ParallelLMHead, VocabParallelEmbedding)
- from aphrodite.modeling.model_loader.weight_utils import default_weight_loader
- from aphrodite.modeling.sampling_metadata import SamplingMetadata
- from aphrodite.quantization.base_config import QuantizationConfig
- class Starcoder2Attention(nn.Module):
- def __init__(self,
- config: Starcoder2Config,
- cache_config: Optional[CacheConfig] = None,
- quant_config: Optional[QuantizationConfig] = None):
- super().__init__()
- self.config = config
- self.hidden_size = config.hidden_size
- tp_size = get_tensor_model_parallel_world_size()
- self.total_num_heads = config.num_attention_heads
- assert self.total_num_heads % tp_size == 0
- self.num_heads = self.total_num_heads // tp_size
- self.total_num_kv_heads = config.num_key_value_heads
- if self.total_num_kv_heads >= tp_size:
- # Number of KV heads is greater than TP size, so we partition
- # the KV heads across multiple tensor parallel GPUs.
- assert self.total_num_kv_heads % tp_size == 0
- else:
- # Number of KV heads is less than TP size, so we replicate
- # the KV heads across multiple tensor parallel GPUs.
- assert tp_size % self.total_num_kv_heads == 0
- self.num_kv_heads = max(1, self.total_num_kv_heads // tp_size)
- self.head_dim = self.hidden_size // self.total_num_heads
- self.q_size = self.num_heads * self.head_dim
- self.kv_size = self.num_kv_heads * self.head_dim
- self.scaling = self.head_dim**-0.5
- self.rope_theta = config.rope_theta
- self.max_position_embeddings = config.max_position_embeddings
- self.use_bias = config.use_bias
- self.qkv_proj = QKVParallelLinear(
- self.hidden_size,
- self.head_dim,
- self.total_num_heads,
- self.total_num_kv_heads,
- bias=self.use_bias,
- quant_config=quant_config,
- )
- self.o_proj = RowParallelLinear(
- self.total_num_heads * self.head_dim,
- self.hidden_size,
- bias=self.use_bias,
- quant_config=quant_config,
- )
- self.rotary_emb = get_rope(
- self.head_dim,
- rotary_dim=self.head_dim,
- max_position=self.max_position_embeddings,
- base=int(self.rope_theta),
- is_neox_style=True,
- )
- self.attn = Attention(self.num_heads,
- self.head_dim,
- self.scaling,
- num_kv_heads=self.num_kv_heads,
- cache_config=cache_config,
- quant_config=quant_config)
- def forward(
- self,
- positions: torch.Tensor,
- hidden_states: torch.Tensor,
- kv_cache: torch.Tensor,
- attn_metadata: AttentionMetadata,
- ) -> torch.Tensor:
- qkv, _ = self.qkv_proj(hidden_states)
- q, k, v = qkv.split([self.q_size, self.kv_size, self.kv_size], dim=-1)
- q, k = self.rotary_emb(positions, q, k)
- attn_output = self.attn(q, k, v, kv_cache, attn_metadata)
- output, _ = self.o_proj(attn_output)
- return output
- class Starcoder2MLP(nn.Module):
- def __init__(self,
- config: Starcoder2Config,
- quant_config: Optional[QuantizationConfig] = None):
- super().__init__()
- self.c_fc = ColumnParallelLinear(
- config.hidden_size,
- config.intermediate_size,
- bias=config.use_bias,
- quant_config=quant_config,
- )
- self.c_proj = RowParallelLinear(
- config.intermediate_size,
- config.hidden_size,
- bias=config.use_bias,
- quant_config=quant_config,
- )
- self.act = get_act_fn(config.hidden_act, quant_config,
- config.intermediate_size)
- def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
- hidden_states, _ = self.c_fc(hidden_states)
- hidden_states = self.act(hidden_states)
- hidden_states, _ = self.c_proj(hidden_states)
- return hidden_states
- class Starcoder2DecoderLayer(nn.Module):
- def __init__(self,
- config: Starcoder2Config,
- cache_config: Optional[CacheConfig] = None,
- quant_config: Optional[QuantizationConfig] = None):
- super().__init__()
- self.hidden_size = config.hidden_size
- self.self_attn = Starcoder2Attention(config,
- cache_config,
- quant_config=quant_config)
- self.mlp = Starcoder2MLP(config, quant_config=quant_config)
- self.input_layernorm = nn.LayerNorm(config.hidden_size,
- eps=config.norm_epsilon)
- self.post_attention_layernorm = nn.LayerNorm(config.hidden_size,
- eps=config.norm_epsilon)
- def forward(
- self,
- positions: torch.Tensor,
- hidden_states: torch.Tensor,
- kv_cache: torch.Tensor,
- attn_metadata: AttentionMetadata,
- ) -> torch.Tensor:
- # Self Attention
- residual = hidden_states
- hidden_states = self.input_layernorm(hidden_states)
- hidden_states = self.self_attn(
- positions=positions,
- hidden_states=hidden_states,
- kv_cache=kv_cache,
- attn_metadata=attn_metadata,
- )
- hidden_states = residual + hidden_states
- # Fully Connected
- residual = hidden_states
- hidden_states = self.post_attention_layernorm(hidden_states)
- hidden_states = self.mlp(hidden_states)
- hidden_states = residual + hidden_states
- return hidden_states
- class Starcoder2Model(nn.Module):
- def __init__(self,
- config: Starcoder2Config,
- cache_config: Optional[CacheConfig] = None,
- quant_config: Optional[QuantizationConfig] = None):
- super().__init__()
- self.config = config
- self.padding_idx = config.pad_token_id
- self.vocab_size = config.vocab_size
- # TODO: consider padding_idx (currently removed)
- self.embed_tokens = VocabParallelEmbedding(config.vocab_size,
- config.hidden_size)
- self.layers = nn.ModuleList([
- Starcoder2DecoderLayer(config,
- cache_config,
- quant_config=quant_config)
- for _ in range(config.num_hidden_layers)
- ])
- self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_epsilon)
- def forward(
- self,
- input_ids: torch.Tensor,
- positions: torch.Tensor,
- kv_caches: List[torch.Tensor],
- attn_metadata: AttentionMetadata,
- ) -> torch.Tensor:
- hidden_states = self.embed_tokens(input_ids)
- for i in range(len(self.layers)):
- layer = self.layers[i]
- hidden_states = layer(positions, hidden_states, kv_caches[i],
- attn_metadata)
- hidden_states = self.norm(hidden_states)
- return hidden_states
- class Starcoder2ForCausalLM(nn.Module):
- def __init__(self,
- config: Starcoder2Config,
- cache_config: Optional[CacheConfig] = None,
- quant_config: Optional[QuantizationConfig] = None):
- super().__init__()
- self.config = config
- self.model = Starcoder2Model(config,
- cache_config,
- quant_config=quant_config)
- self.vocab_size = config.vocab_size
- self.unpadded_vocab_size = config.vocab_size
- if config.tie_word_embeddings:
- self.lm_head = self.model.embed_tokens
- else:
- self.unpadded_vocab_size = config.vocab_size
- self.lm_head = ParallelLMHead(
- self.unpadded_vocab_size,
- config.hidden_size,
- org_num_embeddings=config.vocab_size,
- padding_size=DEFAULT_VOCAB_PADDING_SIZE,
- quant_config=quant_config,
- )
- self.logits_processor = LogitsProcessor(self.unpadded_vocab_size,
- config.vocab_size)
- self.sampler = Sampler()
- def forward(
- self,
- input_ids: torch.Tensor,
- positions: torch.Tensor,
- kv_caches: List[torch.Tensor],
- attn_metadata: AttentionMetadata,
- intermediate_tensors: Optional[IntermediateTensors] = None,
- ) -> torch.Tensor:
- hidden_states = self.model(input_ids, positions, kv_caches,
- attn_metadata)
- return hidden_states
- def compute_logits(
- self,
- hidden_states: torch.Tensor,
- sampling_metadata: SamplingMetadata,
- ) -> Optional[torch.Tensor]:
- logits = self.logits_processor(self.lm_head, hidden_states,
- sampling_metadata)
- return logits
- def sample(
- self,
- logits: Optional[torch.Tensor],
- sampling_metadata: SamplingMetadata,
- ) -> Optional[SamplerOutput]:
- next_tokens = self.sampler(logits, sampling_metadata)
- return next_tokens
- def load_weights(self, weights: Iterable[Tuple[str, torch.Tensor]]):
- stacked_params_mapping = [
- # (param_name, shard_name, shard_id)
- ("qkv_proj", "q_proj", "q"),
- ("qkv_proj", "k_proj", "k"),
- ("qkv_proj", "v_proj", "v"),
- ]
- params_dict = dict(self.named_parameters(remove_duplicate=False))
- for name, loaded_weight in weights:
- if "rotary_emb.inv_freq" in name:
- continue
- for (param_name, weight_name, shard_id) in stacked_params_mapping:
- if weight_name not in name:
- continue
- name = name.replace(weight_name, param_name)
- param = params_dict[name]
- weight_loader = param.weight_loader
- weight_loader(param, loaded_weight, shard_id)
- break
- else:
- if self.config.tie_word_embeddings and "lm_head.weight" in name:
- continue
- param = params_dict[name]
- weight_loader = getattr(param, "weight_loader",
- default_weight_loader)
- weight_loader(param, loaded_weight)
|