1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120 |
- from abc import abstractmethod
- from typing import Dict, List, Optional, Tuple
- import torch
- import torch.nn.functional as F
- from loguru import logger
- from torch.nn.parameter import Parameter, UninitializedParameter
- # yapf: disable
- from aphrodite.distributed import (divide,
- get_current_tp_rank_partition_offset,
- get_current_tp_rank_partition_size,
- get_tensor_model_parallel_rank,
- get_tensor_model_parallel_world_size,
- split_tensor_along_last_dim,
- tensor_model_parallel_all_gather,
- tensor_model_parallel_all_reduce)
- from aphrodite.modeling.parameter import (BaseAphroditeParameter,
- PackedAphroditeParameter,
- PerTensorScaleParameter)
- # yapf: enable
- from aphrodite.modeling.utils import set_weight_attrs
- from aphrodite.quantization.base_config import (QuantizationConfig,
- QuantizeMethodBase)
- WEIGHT_LOADER_V2_SUPPORTED = [
- "CompressedTensorsLinearMethod", "GPTQMarlinLinearMethod",
- "AWQMarlinLinearMethod", "AWQLinearMethod", "HQQMarlinMethod",
- ]
- def adjust_marlin_shard(param, shard_size, shard_offset):
- marlin_tile_size = getattr(param, "marlin_tile_size", None)
- if marlin_tile_size is None:
- return shard_size, shard_offset
- return shard_size * marlin_tile_size, shard_offset * marlin_tile_size
- def adjust_bitsandbytes_shard(param: Parameter,
- qkv_offsets: Dict[str, Tuple[int, int]],
- loaded_shard_id: str) -> Tuple[int, int]:
- """Adjust the quantization offsets and sizes for BitsAndBytes sharding."""
- total, _ = qkv_offsets["total"]
- orig_offset, orig_size = qkv_offsets[loaded_shard_id]
- quantized_total = param.data.shape[0]
- quantized_offset = orig_offset * quantized_total // total
- quantized_size = orig_size * quantized_total // total
- return quantized_size, quantized_offset
- def adjust_scalar_to_fused_array(param, loaded_weight, shard_id):
- """For fused modules (QKV and MLP) we have an array of length
- N that holds 1 scale for each "logical" matrix. So the param
- is an array of length N. The loaded_weight corresponds to
- one of the shards on disk. Here, we slice the param based on
- the shard_id for loading.
- """
- qkv_idxs = {"q": 0, "k": 1, "v": 2}
- if isinstance(shard_id, str):
- shard_id = qkv_idxs[shard_id]
- elif not isinstance(shard_id, int):
- raise ValueError(f"Unknown Shard Id {shard_id}")
- # AutoFP8 scales do not have a shape
- # compressed-tensors scales do have a shape
- if len(loaded_weight.shape) != 0:
- assert loaded_weight.shape[0] == 1
- loaded_weight = loaded_weight[0]
- return param[shard_id], loaded_weight
- class LinearMethodBase(QuantizeMethodBase):
- """Base class for different (maybe quantized) linear methods."""
- @abstractmethod
- def create_weights(self, layer: torch.nn.Module,
- input_size_per_partition: int,
- output_partition_sizes: List[int], input_size: int,
- output_size: int, params_dtype: torch.dtype,
- **extra_weight_attrs):
- """Create weights for a linear layer.
- The weights will be set as attributes of the layer.
- Args:
- layer: The layer that is using the LinearMethodBase factory.
- input_size_per_partition: Size of the weight input dim on rank X.
- output_partition_sizes: Sizes of the output dim of each logical
- weight on rank X. E.g., output_partition_sizes for QKVLinear
- is a list contains the width of Wq, Wk, Wv on rank X.
- input_size: Size of the input dim of the weight across all ranks.
- output_size: Size of the output dim of the weight across all ranks.
- params_dtype: Datatype of the parameters.
- """
- raise NotImplementedError
- @abstractmethod
- def apply(self,
- layer: torch.nn.Module,
- x: torch.Tensor,
- bias: Optional[torch.Tensor] = None) -> torch.Tensor:
- """Apply the weights in layer to the input tensor.
- Expects create_weights to have been called before on the layer."""
- raise NotImplementedError
- class UnquantizedLinearMethod(LinearMethodBase):
- """Linear method without quantization."""
- def create_weights(self, layer: torch.nn.Module,
- input_size_per_partition: int,
- output_partition_sizes: List[int], input_size: int,
- output_size: int, params_dtype: torch.dtype,
- **extra_weight_attrs):
- weight = Parameter(torch.empty(sum(output_partition_sizes),
- input_size_per_partition,
- dtype=params_dtype),
- requires_grad=False)
- set_weight_attrs(weight, {"input_dim": 1, "output_dim": 0})
- layer.register_parameter("weight", weight)
- set_weight_attrs(weight, extra_weight_attrs)
- def apply(self,
- layer: torch.nn.Module,
- x: torch.Tensor,
- bias: Optional[torch.Tensor] = None) -> torch.Tensor:
- return F.linear(x, layer.weight, bias)
- class LinearBase(torch.nn.Module):
- """Base linear layer.
- Args:
- input_size: input dimension of the linear layer.
- output_size: output dimension of the linear layer.
- bias: If true, add bias.
- skip_bias_add: If true, skip adding bias but instead return it.
- params_dtype: Data type for the parameters.
- quant_config: Quantization configure.
- """
- def __init__(
- self,
- input_size: int,
- output_size: int,
- skip_bias_add: bool = False,
- params_dtype: Optional[torch.dtype] = None,
- quant_config: Optional[QuantizationConfig] = None,
- prefix: str = "",
- ):
- super().__init__()
- # Keep input parameters
- self.input_size = input_size
- self.output_size = output_size
- self.skip_bias_add = skip_bias_add
- if params_dtype is None:
- params_dtype = torch.get_default_dtype()
- self.params_dtype = params_dtype
- if quant_config is None:
- self.quant_method: Optional[
- QuantizeMethodBase] = UnquantizedLinearMethod()
- else:
- self.quant_method = quant_config.get_quant_method(self,
- prefix=prefix)
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- raise NotImplementedError
- class ReplicatedLinear(LinearBase):
- """Replicated linear layer.
- Args:
- input_size: input dimension of the linear layer.
- output_size: output dimension of the linear layer.
- bias: If true, add bias.
- skip_bias_add: If true, skip adding bias but instead return it.
- params_dtype: Data type for the parameters.
- quant_config: Quantization configure.
- prefix: The name of the layer in the state dict, including all parents
- (e.g. model.layers.0.qkv_proj)
- """
- def __init__(self,
- input_size: int,
- output_size: int,
- bias: bool = True,
- skip_bias_add: bool = False,
- params_dtype: Optional[torch.dtype] = None,
- quant_config: Optional[QuantizationConfig] = None,
- prefix: str = ""):
- super().__init__(input_size,
- output_size,
- skip_bias_add,
- params_dtype,
- quant_config,
- prefix=prefix)
- # All the linear layer supports quant method.
- assert self.quant_method is not None
- self.quant_method.create_weights(self,
- self.input_size, [self.output_size],
- self.input_size,
- self.output_size,
- self.params_dtype,
- prefix=prefix)
- if bias:
- self.bias = Parameter(
- torch.empty(self.output_size, dtype=self.params_dtype))
- set_weight_attrs(self.bias, {"output_dim": 0})
- else:
- self.register_parameter("bias", None)
- def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
- # If the weight on disk does not have a shape, give it one
- # (such scales for AutoFp8).
- if len(loaded_weight.shape) == 0:
- loaded_weight = loaded_weight.reshape(1)
- assert param.size() == loaded_weight.size()
- param.data.copy_(loaded_weight)
- def forward(self, x: torch.Tensor) -> torch.Tensor:
- bias = self.bias if not self.skip_bias_add else None
- assert self.quant_method is not None
- output = self.quant_method.apply(self, x, bias)
- output_bias = self.bias if self.skip_bias_add else None
- return output, output_bias
- def extra_repr(self) -> str:
- s = f"in_features={self.input_size}"
- s += f", output_features={self.output_size}"
- s += f", bias={self.bias is not None}"
- return s
- class ColumnParallelLinear(LinearBase):
- """Linear layer with column parallelism.
- The linear layer is defined as Y = XA + b. A is parallelized along
- its second dimension as A = [A_1, ..., A_p].
- Args:
- input_size: first dimension of matrix A.
- output_size: second dimension of matrix A.
- bias: If true, add bias.
- gather_output: If true, call all-gather on output and make Y available
- to all GPUs, otherwise, every GPU will have its output
- which is Y_i = XA_i
- skip_bias_add: This was added to enable performance optimizations where
- bias can be fused with other element-wise operations. we
- skip adding bias but instead return it.
- params_dtype: Data type for the parameters.
- quant_config: Quantization configure.
- output_sizes: list of output sizes packed into one output, like for QKV
- the list would be size 3.
- prefix: The name of the layer in the state dict, including all parents
- (e.g. model.layers.0.qkv_proj)
- """
- def __init__(self,
- input_size: int,
- output_size: int,
- bias: bool = True,
- gather_output: bool = False,
- skip_bias_add: bool = False,
- params_dtype: Optional[torch.dtype] = None,
- quant_config: Optional[QuantizationConfig] = None,
- output_sizes: Optional[List[int]] = None,
- prefix: str = ""):
- super().__init__(input_size, output_size, skip_bias_add, params_dtype,
- quant_config, prefix)
- self.gather_output = gather_output
- # Divide the weight matrix along the last dimension.
- tp_rank = get_tensor_model_parallel_rank()
- tp_size = get_tensor_model_parallel_world_size()
- assert self.quant_method is not None
- if quant_config is None:
- self.output_size_per_partition = get_current_tp_rank_partition_size(
- output_size, tp_rank, tp_size)
- else:
- self.output_size_per_partition = divide(self.output_size, tp_size)
- self.output_partition_sizes = [self.output_size_per_partition]
- # If QKV or MergedColumn, use output size of each partition.
- if hasattr(self, "output_sizes"):
- if quant_config is None:
- self.output_partition_sizes = [
- get_current_tp_rank_partition_size(output_size, tp_rank,
- tp_size)
- for output_size in self.output_sizes
- ]
- else:
- self.output_partition_sizes = [
- divide(output_size, tp_size)
- for output_size in self.output_sizes
- ]
- if output_sizes is None:
- output_sizes = [output_size]
- self.quant_method.create_weights(
- layer=self,
- input_size_per_partition=self.input_size,
- output_partition_sizes=self.output_partition_sizes,
- input_size=self.input_size,
- output_size=self.output_size,
- params_dtype=self.params_dtype,
- weight_loader=(
- self.weight_loader_v2 if self.quant_method.__class__.__name__
- in WEIGHT_LOADER_V2_SUPPORTED else self.weight_loader),
- prefix=prefix)
- if bias:
- self.bias = Parameter(
- torch.empty(self.output_size_per_partition,
- dtype=params_dtype))
- set_weight_attrs(self.bias, {
- "output_dim": 0,
- "weight_loader": self.weight_loader,
- })
- else:
- self.register_parameter("bias", None)
- def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
- tp_rank = get_tensor_model_parallel_rank()
- output_dim = getattr(param, "output_dim", None)
- # Special case for GGUF
- is_gguf_weight = getattr(param, "is_gguf_weight", False)
- is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
- if is_gguf_weight_type:
- param.weight_type = loaded_weight.item()
- # Materialize GGUF UninitializedParameter
- if is_gguf_weight and isinstance(param, UninitializedParameter):
- param.materialize(loaded_weight.shape, dtype=loaded_weight.dtype)
- param_data = param.data
- if output_dim is not None:
- shard_size = param_data.shape[output_dim]
- start_idx = tp_rank * shard_size
- loaded_weight = loaded_weight.narrow(output_dim, start_idx,
- shard_size)
- # Special case for loading scales off disk, which often do not
- # have a shape (such as in the case of AutoFP8).
- if len(loaded_weight.shape) == 0:
- loaded_weight = loaded_weight.reshape(1)
- assert param_data.shape == loaded_weight.shape
- param_data.copy_(loaded_weight)
- def weight_loader_v2(self, param: Parameter, loaded_weight: torch.Tensor):
- param.load_column_parallel_weight(loaded_weight=loaded_weight)
- def forward(self, input_):
- bias = self.bias if not self.skip_bias_add else None
- # Matrix multiply.
- assert self.quant_method is not None
- output_parallel = self.quant_method.apply(self, input_, bias)
- if self.gather_output:
- # All-gather across the partitions.
- output = tensor_model_parallel_all_gather(output_parallel)
- else:
- output = output_parallel
- output_bias = self.bias if self.skip_bias_add else None
- return output, output_bias
- def extra_repr(self) -> str:
- s = f"in_features={self.input_size}"
- s += f", output_features={self.output_size_per_partition}"
- s += f", bias={self.bias is not None}"
- s += f", tp_size={get_tensor_model_parallel_world_size()}"
- s += f", gather_output={self.gather_output}"
- return s
- class MergedColumnParallelLinear(ColumnParallelLinear):
- """Packed linear layers with column parallelism.
- Similar to ColumnParallelLinear, but the weight matrix is concatenated
- along the output dimension. When the weight matrix is loaded, the
- different partitions are sharded separately.
- Args:
- input_size: input dimension of the linear layer.
- output_sizes: list of output dimensions of the linear layer.
- bias: If true, add bias.
- gather_output: If true, call all-gather on output and make the output
- available to all GPUs, otherwise, every GPU will have
- its own output.
- skip_bias_add: This was added to enable performance optimizations where
- bias can be fused with other element-wise operations. we
- skip adding bias but instead return it.
- params_dtype: Data type for the parameters.
- quant_config: Quantization configure.
- prefix: The name of the layer in the state dict, including all parents
- (e.g. model.layers.0.qkv_proj)
- """
- def __init__(self,
- input_size: int,
- output_sizes: List[int],
- bias: bool = True,
- gather_output: bool = False,
- skip_bias_add: bool = False,
- params_dtype: Optional[torch.dtype] = None,
- quant_config: Optional[QuantizationConfig] = None,
- prefix: str = ""):
- self.output_sizes = output_sizes
- self.quant_config = quant_config
- if quant_config is not None:
- tp_size = get_tensor_model_parallel_world_size()
- assert all(output_size % tp_size == 0
- for output_size in output_sizes)
- super().__init__(input_size=input_size,
- output_size=sum(output_sizes),
- bias=bias,
- gather_output=gather_output,
- skip_bias_add=skip_bias_add,
- params_dtype=params_dtype,
- quant_config=quant_config,
- prefix=prefix)
- def weight_loader(self,
- param: Parameter,
- loaded_weight: torch.Tensor,
- loaded_shard_id: Optional[int] = None):
- # Special case for GGUF
- # initialize GGUF param after we know the quantize type
- is_gguf_weight = getattr(param, "is_gguf_weight", False)
- is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
- if is_gguf_weight_type:
- param.data[loaded_shard_id].copy_(loaded_weight)
- param.shard_weight_type[loaded_shard_id] = loaded_weight.item()
- return
- if is_gguf_weight and isinstance(param, UninitializedParameter):
- from gguf.constants import GGML_QUANT_SIZES
- ori_shape = param.tensor_shape
- weight_types = self.qweight_type.shard_weight_type.values()
- row_size = []
- for weight_type in weight_types:
- block_size, type_size = GGML_QUANT_SIZES[weight_type]
- row_size.append(ori_shape[1] // block_size * type_size)
- q_shape = (ori_shape[0], max(row_size))
- param.materialize(q_shape, dtype=loaded_weight.dtype)
- param_data = param.data
- output_dim = getattr(param, "output_dim", None)
- # Special case for AQLM codebooks.
- is_metadata = getattr(param, "is_metadata", False)
- # Special case for per-tensor scale to load scalar into fused array.
- needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False)
- if loaded_shard_id is None:
- # Loaded weight is already fused on disk (qkv/mlp).
- if output_dim is None:
- if needs_scalar_to_array:
- param_data, loaded_weight = adjust_scalar_to_fused_array(
- param_data, loaded_weight, 0)
- assert param_data.shape == loaded_weight.shape
- param_data.copy_(loaded_weight)
- return
- current_shard_offset = 0
- shard_offsets: List[Tuple[int, int, int]] = []
- for i, output_size in enumerate(self.output_sizes):
- shard_offsets.append((i, current_shard_offset, output_size))
- current_shard_offset += output_size
- packed_dim = getattr(param, "packed_dim", None)
- for shard_id, shard_offset, shard_size in shard_offsets:
- # Special case for Quantization.
- # If quantized, we need to adjust the offset and size to account
- # for the packing.
- if packed_dim == output_dim:
- shard_size = shard_size // param.pack_factor
- shard_offset = shard_offset // param.pack_factor
- # Special case for Marlin.
- shard_size, shard_offset = adjust_marlin_shard(
- param, shard_size, shard_offset)
- loaded_weight_shard = loaded_weight.narrow(
- output_dim, shard_offset, shard_size)
- self.weight_loader(param, loaded_weight_shard, shard_id)
- return
- assert loaded_shard_id < len(self.output_sizes)
- tp_rank = get_tensor_model_parallel_rank()
- tp_size = get_tensor_model_parallel_world_size()
- if output_dim is not None:
- if self.quant_config is None:
- shard_offset = sum(
- get_current_tp_rank_partition_size(output_size, tp_rank,
- tp_size)
- for output_size in self.output_sizes[:loaded_shard_id])
- shard_size = get_current_tp_rank_partition_size(
- self.output_sizes[loaded_shard_id], tp_rank, tp_size)
- else:
- shard_offset = sum(
- self.output_sizes[:loaded_shard_id]) // tp_size
- shard_size = self.output_sizes[loaded_shard_id] // tp_size
- # Special case for quantization.
- # If quantized, we need to adjust the offset and size to account
- # for the packing.
- packed_dim = getattr(param, "packed_dim", None)
- if packed_dim == output_dim:
- shard_size = shard_size // param.pack_factor
- shard_offset = shard_offset // param.pack_factor
- # Special case for Marlin.
- shard_size, shard_offset = adjust_marlin_shard(
- param, shard_size, shard_offset)
- use_bitsandbytes = getattr(param, "use_bitsandbytes", False)
- if use_bitsandbytes:
- shard_size = loaded_weight.shape[output_dim]
- shard_offset = loaded_weight.shape[output_dim] * \
- loaded_shard_id
- if is_gguf_weight:
- tp_size = get_tensor_model_parallel_world_size()
- output_dim = getattr(param, "output_dim", None)
- shard_shape = list(loaded_weight.shape)
- shard_shape[output_dim] = shard_shape[output_dim] // tp_size
- param.shard_id.append(loaded_shard_id)
- param.shard_size[loaded_shard_id] = shard_shape
- input_dim = getattr(param, "input_dim", None)
- input_size = loaded_weight.shape[input_dim]
- param_data = param_data.narrow(input_dim, 0, input_size)
- param_data = param_data.narrow(output_dim, shard_offset,
- shard_size)
- if self.quant_config is None:
- start_idx = get_current_tp_rank_partition_offset(
- loaded_weight.shape[output_dim], tp_rank, tp_size)
- else:
- start_idx = tp_rank * shard_size
- loaded_weight = loaded_weight.narrow(output_dim, start_idx,
- shard_size)
- # Special case for AQLM codebooks.
- elif is_metadata:
- # metadata indicates fixed size concatenated along dim 0
- shard_size = loaded_weight.shape[0]
- shard_offset = loaded_shard_id * shard_size
- param_data = param_data.narrow(0, shard_offset, shard_size)
- # Special case for per-tensor scales in fused case.
- elif needs_scalar_to_array:
- param_data, loaded_weight = adjust_scalar_to_fused_array(
- param_data, loaded_weight, loaded_shard_id)
- else:
- ignore_warning = getattr(param, "ignore_warning", False)
- if not ignore_warning:
- logger.warning(
- "Loading a weight without `output_dim` attribute in "
- "MergedColumnParallelLinear, assume the weight is "
- "the same for all partitions.")
- assert param_data.shape == loaded_weight.shape
- param_data.copy_(loaded_weight)
- def _load_fused_module_from_checkpoint(self, param: BaseAphroditeParameter,
- loaded_weight: torch.Tensor):
- """
- Handle special case for models where MLP layers are already
- fused on disk. In this case, we have no shard id. This function
- determines the shard id by splitting these layers and then calls
- the weight loader using the shard id.
- An example of a model with these fused layers:
- https://huggingface.co/microsoft/Phi-3-mini-4k-instruct
- """
- current_shard_offset = 0
- shard_offsets: List[Tuple[int, int, int]] = []
- for i, output_size in enumerate(self.output_sizes):
- shard_offsets.append((i, current_shard_offset, output_size))
- current_shard_offset += output_size
- for shard_id, shard_offset, shard_size in shard_offsets:
- # Special case for Quantization.
- # If quantized, we need to adjust the offset and size to account
- # for the packing.
- if isinstance(param, PackedAphroditeParameter
- ) and param.packed_dim == param.output_dim:
- shard_size, shard_offset = \
- param.adjust_shard_indexes_for_packing(
- shard_size=shard_size, shard_offset=shard_offset)
- loaded_weight_shard = loaded_weight.narrow(param.output_dim,
- shard_offset,
- shard_size)
- self.weight_loader_v2(param, loaded_weight_shard, shard_id)
- def weight_loader_v2(self,
- param: BaseAphroditeParameter,
- loaded_weight: torch.Tensor,
- loaded_shard_id: Optional[int] = None):
- if loaded_shard_id is None:
- if isinstance(param, PerTensorScaleParameter):
- param.load_merged_column_weight(loaded_weight=loaded_weight,
- shard_id=0)
- return
- elif type(param) is BaseAphroditeParameter:
- param.load_merged_column_weight(loaded_weight=loaded_weight)
- return
- self._load_fused_module_from_checkpoint(param, loaded_weight)
- return
- assert loaded_shard_id < len(self.output_sizes)
- tp_size = get_tensor_model_parallel_world_size()
- shard_offset = sum(self.output_sizes[:loaded_shard_id]) // tp_size
- shard_size = self.output_sizes[loaded_shard_id] // tp_size
- param.load_merged_column_weight(loaded_weight=loaded_weight,
- shard_id=loaded_shard_id,
- shard_offset=shard_offset,
- shard_size=shard_size)
- class QKVParallelLinear(ColumnParallelLinear):
- """Linear layers for the attention's QKV transformation.
- Linear layers for the linear transformation of the query, key, and value
- vectors in the attention layer. The weight matrix is concatenated along
- the output dimension. The layer is parallelized along the head dimension.
- When the number of key/value heads is smaller than the number of query
- heads (e.g., multi-query/grouped-query attention), the key/value head may
- be replicated while the query heads are partitioned.
- Args:
- hidden_size: input hidden state size of the transformer.
- head_size: size of each attention head.
- total_num_heads: total number of attention query heads.
- total_num_kv_heads: total number of attention key/value heads. If
- None, assume total_num_kv_heads = total_num_heads.
- bias: If true, add bias.
- skip_bias_add: This was added to enable performance optimizations where
- bias can be fused with other element-wise operations. we
- skip adding bias but instead return it.
- params_dtype: Data type for the parameters.
- quant_config: Quantization configure.
- prefix: The name of the layer in the state dict, including all parents
- (e.g. model.layers.0.qkv_proj)
- """
- def __init__(self,
- hidden_size: int,
- head_size: int,
- total_num_heads: int,
- total_num_kv_heads: Optional[int] = None,
- bias: bool = True,
- skip_bias_add: bool = False,
- params_dtype: Optional[torch.dtype] = None,
- quant_config: Optional[QuantizationConfig] = None,
- prefix: str = ""):
- self.hidden_size = hidden_size
- self.head_size = head_size
- self.total_num_heads = total_num_heads
- self.quant_config = quant_config
- if total_num_kv_heads is None:
- total_num_kv_heads = total_num_heads
- self.total_num_kv_heads = total_num_kv_heads
- # Divide the weight matrix along the last dimension.
- tp_size = get_tensor_model_parallel_world_size()
- tp_rank = get_tensor_model_parallel_rank()
- if quant_config is None:
- self.num_heads_per_kv_head = (self.total_num_heads //
- self.total_num_kv_heads)
- self.num_kv_heads = get_current_tp_rank_partition_size(
- self.total_num_kv_heads, tp_rank, tp_size)
- self.num_heads = self.num_kv_heads * self.num_heads_per_kv_head
- self.num_kv_head_replicas = 1
- else:
- self.num_heads = divide(self.total_num_heads, tp_size)
- if tp_size >= self.total_num_kv_heads:
- self.num_kv_heads = 1
- self.num_kv_head_replicas = divide(tp_size,
- self.total_num_kv_heads)
- elif tp_size < self.total_num_kv_heads and quant_config is not None:
- self.num_kv_heads = divide(self.total_num_kv_heads, tp_size)
- self.num_kv_head_replicas = 1
- input_size = self.hidden_size
- output_size = (self.num_heads +
- 2 * self.num_kv_heads) * tp_size * self.head_size
- self.output_sizes = [
- self.num_heads * self.head_size * tp_size, # q_proj
- self.num_kv_heads * self.head_size * tp_size, # k_proj
- self.num_kv_heads * self.head_size * tp_size, # v_proj
- ]
- super().__init__(input_size=input_size,
- output_size=output_size,
- bias=bias,
- gather_output=False,
- skip_bias_add=skip_bias_add,
- params_dtype=params_dtype,
- quant_config=quant_config,
- prefix=prefix)
- def _get_shard_offset_mapping(self, loaded_shard_id: str):
- shard_offset_mapping = {
- "q": 0,
- "k": self.num_heads * self.head_size,
- "v": (self.num_heads + self.num_kv_heads) * self.head_size,
- "total": (self.num_heads + 2 * self.num_kv_heads) * self.head_size
- }
- return shard_offset_mapping.get(loaded_shard_id)
- def _get_shard_size_mapping(self, loaded_shard_id: str):
- shard_size_mapping = {
- "q": self.num_heads * self.head_size,
- "k": self.num_kv_heads * self.head_size,
- "v": self.num_kv_heads * self.head_size,
- }
- return shard_size_mapping.get(loaded_shard_id)
- def _load_fused_module_from_checkpoint(self, param: BaseAphroditeParameter,
- loaded_weight: torch.Tensor):
- """
- Handle special case for models where QKV layers are already
- fused on disk. In this case, we have no shard id. This function
- determmines the shard id by splitting these layers and then calls
- the weight loader using the shard id.
- An example of a model with these fused layers:
- https://huggingface.co/microsoft/Phi-3-mini-4k-instruct
- """
- shard_offsets = [
- # (shard_id, shard_offset, shard_size)
- ("q", 0, self.total_num_heads * self.head_size),
- ("k", self.total_num_heads * self.head_size,
- self.total_num_kv_heads * self.head_size),
- ("v",
- (self.total_num_heads + self.total_num_kv_heads) * self.head_size,
- self.total_num_kv_heads * self.head_size),
- ]
- for shard_id, shard_offset, shard_size in shard_offsets:
- # Special case for Quantization.
- # If quantized, we need to adjust the offset and size to account
- # for the packing.
- if isinstance(param, PackedAphroditeParameter
- ) and param.packed_dim == param.output_dim:
- shard_size, shard_offset = \
- param.adjust_shard_indexes_for_packing(
- shard_size=shard_size, shard_offset=shard_offset)
- loaded_weight_shard = loaded_weight.narrow(param.output_dim,
- shard_offset,
- shard_size)
- self.weight_loader_v2(param, loaded_weight_shard, shard_id)
- def weight_loader_v2(self,
- param: BaseAphroditeParameter,
- loaded_weight: torch.Tensor,
- loaded_shard_id: Optional[str] = None):
- if loaded_shard_id is None: # special case for certain models
- if isinstance(param, PerTensorScaleParameter):
- param.load_merged_column_weight(loaded_weight=loaded_weight,
- shard_id=0)
- return
- elif type(param) is BaseAphroditeParameter:
- param.load_merged_column_weight(loaded_weight=loaded_weight)
- return
- self._load_fused_module_from_checkpoint(param, loaded_weight)
- return
- assert loaded_shard_id in ["q", "k", "v"]
- shard_offset = self._get_shard_offset_mapping(loaded_shard_id)
- shard_size = self._get_shard_size_mapping(loaded_shard_id)
- param.load_qkv_weight(loaded_weight=loaded_weight,
- num_heads=self.num_kv_head_replicas,
- shard_id=loaded_shard_id,
- shard_offset=shard_offset,
- shard_size=shard_size)
- def weight_loader(self,
- param: Parameter,
- loaded_weight: torch.Tensor,
- loaded_shard_id: Optional[str] = None):
- # Special case for GGUF
- # initialize GGUF param after we know the quantize type
- is_gguf_weight = getattr(param, "is_gguf_weight", False)
- is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
- if is_gguf_weight_type and loaded_shard_id is not None:
- idx_map = {"q": 0, "k": 1, "v": 2}
- param.data[idx_map[loaded_shard_id]].copy_(loaded_weight)
- param.shard_weight_type[loaded_shard_id] = loaded_weight.item()
- return
- if is_gguf_weight and isinstance(param, UninitializedParameter):
- from gguf.constants import GGML_QUANT_SIZES
- ori_shape = param.tensor_shape
- weight_types = self.qweight_type.shard_weight_type.values()
- row_size = []
- for weight_type in weight_types:
- block_size, type_size = GGML_QUANT_SIZES[weight_type]
- row_size.append(ori_shape[1] // block_size * type_size)
- q_shape = (ori_shape[0], max(row_size))
- param.materialize(q_shape, dtype=loaded_weight.dtype)
- param_data = param.data
- output_dim = getattr(param, "output_dim", None)
- # Special case for AQLM codebooks.
- is_metadata = getattr(param, "is_metadata", False)
- # Special case for per-tensor scales in fused case.
- needs_scalar_to_array = getattr(param, "needs_scalar_to_array", False)
- if loaded_shard_id is None:
- # Loaded weight is already fused on disk (qkv/mlp).
- if output_dim is None:
- if needs_scalar_to_array:
- param_data, loaded_weight = adjust_scalar_to_fused_array(
- param_data, loaded_weight, 0)
- assert param_data.shape == loaded_weight.shape
- param_data.copy_(loaded_weight)
- return
- shard_offsets = [
- # (shard_id, shard_offset, shard_size)
- ("q", 0, self.total_num_heads * self.head_size),
- ("k", self.total_num_heads * self.head_size,
- self.total_num_kv_heads * self.head_size),
- ("v", (self.total_num_heads + self.total_num_kv_heads) *
- self.head_size, self.total_num_kv_heads * self.head_size),
- ]
- packed_dim = getattr(param, "packed_dim", None)
- for shard_id, shard_offset, shard_size in shard_offsets:
- # Special case for Quantized Weights.
- # If quantized, we need to adjust the offset and size to account
- # for the packing.
- if packed_dim == output_dim:
- shard_size = shard_size // param.pack_factor
- shard_offset = shard_offset // param.pack_factor
- # Special case for Marlin.
- shard_size, shard_offset = adjust_marlin_shard(
- param, shard_size, shard_offset)
- loaded_weight_shard = loaded_weight.narrow(
- output_dim, shard_offset, shard_size)
- self.weight_loader(param, loaded_weight_shard, shard_id)
- return
- tp_rank = get_tensor_model_parallel_rank()
- assert loaded_shard_id in ["q", "k", "v"]
- # If output dim is defined, use the default loading process.
- if output_dim is not None:
- if loaded_shard_id == "q":
- shard_offset = 0
- shard_size = self.num_heads * self.head_size
- if self.quant_config is None:
- multiple_of = self.head_size * self.num_heads_per_kv_head
- elif loaded_shard_id == "k":
- shard_offset = self.num_heads * self.head_size
- shard_size = self.num_kv_heads * self.head_size
- if self.quant_config is None:
- multiple_of = self.head_size
- elif loaded_shard_id == "v":
- shard_offset = (self.num_heads +
- self.num_kv_heads) * self.head_size
- shard_size = self.num_kv_heads * self.head_size
- if self.quant_config is None:
- multiple_of = self.head_size
- # Special case for Quantized Weights.
- # If quantized, we need to adjust the offset and size to account
- # for the packing.
- packed_dim = getattr(param, "packed_dim", None)
- if packed_dim == output_dim:
- shard_size = shard_size // param.pack_factor
- shard_offset = shard_offset // param.pack_factor
- if self.quant_config is None:
- multiple_of = multiple_of // param.pack_factor
- # Special case for Marlin.
- shard_size, shard_offset = adjust_marlin_shard(
- param, shard_size, shard_offset)
- use_bitsandbytes = getattr(param, "use_bitsandbytes", False)
- if use_bitsandbytes:
- orig_qkv_offsets = {
- "q": (0, self.num_heads * self.head_size),
- "k": (self.num_heads * self.head_size,
- self.num_kv_heads * self.head_size),
- "v":
- ((self.num_heads + self.num_kv_heads) * self.head_size,
- self.num_kv_heads * self.head_size),
- "total":
- ((self.num_heads + 2 * self.num_kv_heads) * self.head_size,
- 0)
- }
- shard_size, shard_offset = adjust_bitsandbytes_shard(
- param, orig_qkv_offsets, loaded_shard_id)
- if is_gguf_weight:
- tp_size = get_tensor_model_parallel_world_size()
- output_dim = getattr(param, "output_dim", None)
- shard_shape = list(loaded_weight.shape)
- shard_shape[output_dim] = shard_shape[output_dim] // tp_size
- param.shard_id.append(loaded_shard_id)
- param.shard_size[loaded_shard_id] = shard_shape
- input_dim = getattr(param, "input_dim", None)
- input_size = loaded_weight.shape[input_dim]
- param_data = param_data.narrow(input_dim, 0, input_size)
- param_data = param_data.narrow(output_dim, shard_offset,
- shard_size)
- if self.quant_config is None:
- tp_size = get_tensor_model_parallel_world_size()
- total_size = loaded_weight.shape[output_dim]
- start_idx = get_current_tp_rank_partition_offset(
- total_size, tp_rank, tp_size, multiple_of=multiple_of)
- else:
- if loaded_shard_id == "q":
- shard_id = tp_rank
- else:
- shard_id = tp_rank // self.num_kv_head_replicas
- start_idx = shard_id * shard_size
- loaded_weight = loaded_weight.narrow(output_dim, start_idx,
- shard_size)
- # Special case for for AQLM codebooks.
- elif is_metadata:
- # metadata indicates fixed size concatenated along dim 0
- shard_size = loaded_weight.shape[0]
- shard_index = ["q", "k", "v"].index(loaded_shard_id)
- param_data = param_data.narrow(0, shard_index * shard_size,
- shard_size)
- # Special case for per-tensor scales in fused case.
- elif needs_scalar_to_array:
- param_data, loaded_weight = adjust_scalar_to_fused_array(
- param_data, loaded_weight, loaded_shard_id)
- else:
- ignore_warning = getattr(param, "ignore_warning", False)
- if not ignore_warning:
- logger.warning(
- "Loading a weight without `output_dim` attribute in "
- "QKVParallelLinear, assume the weight is the same "
- "for all partitions.")
- assert param_data.shape == loaded_weight.shape
- param_data.copy_(loaded_weight)
- class RowParallelLinear(LinearBase):
- """Linear layer with row parallelism.
- The linear layer is defined as Y = XA + b. A is parallelized along
- its first dimension and X along its second dimension as:
- - -
- | A_1 |
- | . |
- A = | . | X = [X_1, ..., X_p]
- | . |
- | A_p |
- - -
- Arguments:
- input_size: first dimension of matrix A.
- output_size: second dimension of matrix A.
- bias: If true, add bias. Note that bias is not parallelized.
- input_is_parallel: If true, we assume that the input is already
- split across the GPUs and we do not split
- again.
- skip_bias_add: This was added to enable performance optimization where
- bias can be fused with other element-wise operations.
- We skip adding bias but instead return it.
- params_dtype: Data type for the parameters.
- quant_config: Quantization configure.
- partition_multiple_of: Partitions will be divided,
- so each partition is a multiple of this number.
- """
- def __init__(self,
- input_size: int,
- output_size: int,
- bias: bool = True,
- input_is_parallel: bool = True,
- skip_bias_add: bool = False,
- params_dtype: Optional[torch.dtype] = None,
- reduce_results: bool = True,
- quant_config: Optional[QuantizationConfig] = None,
- partition_multiple_of: int = 1,
- prefix: str = ""):
- super().__init__(input_size, output_size, skip_bias_add, params_dtype,
- quant_config, prefix)
- self.input_is_parallel = input_is_parallel
- self.reduce_results = reduce_results
- self.quant_config = quant_config
- # Divide the weight matrix along the last dimension.
- self.tp_rank = get_tensor_model_parallel_rank()
- self.tp_size = get_tensor_model_parallel_world_size()
- self.tp_rank = get_tensor_model_parallel_rank()
- if quant_config is None:
- self.partition_multiple_of = partition_multiple_of
- self.input_size_per_partition = get_current_tp_rank_partition_size(
- input_size, self.tp_rank, self.tp_size, partition_multiple_of)
- else:
- self.input_size_per_partition = divide(input_size, self.tp_size)
- assert self.quant_method is not None
- self.quant_method.create_weights(
- layer=self,
- input_size_per_partition=self.input_size_per_partition,
- output_partition_sizes=[self.output_size],
- input_size=self.input_size,
- output_size=self.output_size,
- params_dtype=self.params_dtype,
- weight_loader=(
- self.weight_loader_v2 if self.quant_method.__class__.__name__
- in WEIGHT_LOADER_V2_SUPPORTED else self.weight_loader),
- prefix=prefix)
- if not reduce_results and (bias and not skip_bias_add):
- raise ValueError("When not reduce the results, adding bias to the "
- "results can lead to incorrect results")
- if bias:
- self.bias = Parameter(
- torch.empty(self.output_size, dtype=params_dtype))
- set_weight_attrs(self.bias, {
- "output_dim": 0,
- "weight_loader": self.weight_loader,
- })
- else:
- self.register_parameter("bias", None)
- def weight_loader(self, param: Parameter, loaded_weight: torch.Tensor):
- tp_size = get_tensor_model_parallel_world_size()
- input_dim = getattr(param, "input_dim", None)
- # Special case for GGUF
- is_gguf_weight = getattr(param, "is_gguf_weight", False)
- is_gguf_weight_type = getattr(param, "is_gguf_weight_type", False)
- if is_gguf_weight_type:
- param.weight_type = loaded_weight.item()
- # Materialize GGUF UninitializedParameter
- if is_gguf_weight and isinstance(param, UninitializedParameter):
- weight_shape = list(loaded_weight.shape)
- if input_dim:
- weight_shape[input_dim] = weight_shape[input_dim] // tp_size
- param.materialize(tuple(weight_shape), dtype=loaded_weight.dtype)
- param_data = param.data
- if input_dim is not None:
- shard_size = param_data.shape[input_dim]
- if self.quant_config is None:
- start_idx = get_current_tp_rank_partition_offset(
- self.input_size,
- self.tp_rank,
- self.tp_size,
- multiple_of=self.partition_multiple_of)
- else:
- start_idx = self.tp_rank * shard_size
- loaded_weight = loaded_weight.narrow(input_dim, start_idx,
- shard_size)
- # Special case for loading scales off disk, which often do not
- # have a shape (such as in the case of AutoFP8).
- if len(loaded_weight.shape) == 0:
- loaded_weight = loaded_weight.reshape(1)
- assert param_data.shape == loaded_weight.shape
- param_data.copy_(loaded_weight)
- def weight_loader_v2(self, param: BaseAphroditeParameter,
- loaded_weight: torch.Tensor):
- param.load_row_parallel_weight(loaded_weight=loaded_weight)
- def forward(self, input_):
- if self.input_is_parallel:
- input_parallel = input_
- else:
- tp_rank = get_tensor_model_parallel_rank()
- splitted_input = split_tensor_along_last_dim(
- input_, num_partitions=self.tp_size)
- input_parallel = splitted_input[tp_rank].contiguous()
- # Matrix multiply.
- assert self.quant_method is not None
- # Only fuse bias add into GEMM for rank 0 (this ensures that
- # bias will not get added more than once in TP>1 case)
- bias_ = None if (self.tp_rank > 0 or self.skip_bias_add) else self.bias
- output_parallel = self.quant_method.apply(self,
- input_parallel,
- bias=bias_)
- if self.reduce_results and self.tp_size > 1:
- output = tensor_model_parallel_all_reduce(output_parallel)
- else:
- output = output_parallel
- output_bias = self.bias if self.skip_bias_add else None
- return output, output_bias
- def extra_repr(self) -> str:
- s = f"input_features={self.input_size_per_partition}"
- s += f", output_features={self.output_size}"
- s += f", bias={self.bias is not None}"
- s += f", tp_size={self.tp_size}"
- s += f", reduce_results={self.reduce_results}"
- return s
|