123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131 |
- import inspect
- from abc import ABC, abstractmethod
- from typing import Any, Dict, List, Optional, Type
- import torch
- from torch import nn
- class QuantizeMethodBase(ABC):
- """Base class for different quantized methods."""
- @abstractmethod
- def create_weights(self, layer: torch.nn.Module, *weight_args,
- **extra_weight_attrs):
- """Create weights for a layer.
- The weights will be set as attributes of the layer."""
- raise NotImplementedError
- @abstractmethod
- def apply(self, layer: torch.nn.Module, *args, **kwargs) -> torch.Tensor:
- """Apply the weights in layer to the input tensor.
- Expects create_weights to have been called before on the layer."""
- raise NotImplementedError
- # Not required functions
- def embedding(self, layer: torch.nn.Module, *args,
- **kwargs) -> torch.Tensor:
- """Gather embeddings in the layer based on indices in the input tensor.
- Expects create_weights to have been called before on the layer."""
- raise NotImplementedError
- def process_weights_after_loading(self, layer: nn.Module) -> None:
- """Process the weight after loading.
- This can be used for example, to transpose weights for computation.
- """
- return
- def method_has_implemented_embedding(
- method_class: Type[QuantizeMethodBase]) -> bool:
- """
- Not all quant methods have embedding implemented, so we need to check that
- it exists for our given method. We check this by making sure the function
- has been changed from the base implementation.
- """
- base_embedding = inspect.getattr_static(QuantizeMethodBase, "embedding",
- None)
- class_embedding = inspect.getattr_static(method_class, "embedding", None)
- return (class_embedding is not None
- and class_embedding is not base_embedding)
- class QuantizationConfig(ABC):
- """Base class for quantization configs."""
- @abstractmethod
- def get_name(self) -> str:
- """Name of the quantization method."""
- raise NotImplementedError
- @abstractmethod
- def get_supported_act_dtypes(self) -> List[torch.dtype]:
- """List of supported activation dtypes."""
- raise NotImplementedError
- @classmethod
- @abstractmethod
- def get_min_capability(cls) -> int:
- """Minimum GPU capability to support the quantization method.
- E.g., 70 for Volta, 75 for Turing, 80 for Ampere.
- This requirement is due to the custom CUDA kernels used by the
- quantization method.
- """
- raise NotImplementedError
- @staticmethod
- @abstractmethod
- def get_config_filenames() -> List[str]:
- """List of filenames to search for in the model directory."""
- raise NotImplementedError
- @classmethod
- @abstractmethod
- def from_config(cls, config: Dict[str, Any]) -> "QuantizationConfig":
- """Create a config class from the model's quantization config."""
- raise NotImplementedError
- @classmethod
- def override_quantization_method(cls, hf_quant_cfg,
- user_quant) -> Optional[str]:
- """
- Detects if this quantization method can support a given checkpoint
- format by overriding the user specified quantization method --
- this method should only be overwritten by subclasses in exceptional
- circumstances
- """
- return None
- @staticmethod
- def get_from_keys(config: Dict[str, Any], keys: List[str]) -> Any:
- """Get a value from the model's quantization config."""
- for key in keys:
- if key in config:
- return config[key]
- raise ValueError(f"Cannot find any of {keys} in the model's "
- "quantization config.")
- @staticmethod
- def get_from_keys_or(config: Dict[str, Any], keys: List[str],
- default: Any) -> Any:
- """Get a optional value from the model's quantization config."""
- try:
- return QuantizationConfig.get_from_keys(config, keys)
- except ValueError:
- return default
- @abstractmethod
- def get_quant_method(self, layer: torch.nn.Module,
- prefix: str) -> QuantizeMethodBase:
- """Get the quantize method to use for the quantized layer."""
- raise NotImplementedError
- @abstractmethod
- def get_scaled_act_names(self) -> List[str]:
- """Returns the activation function names that should be post-scaled.
- For now, this is only used by AWQ.
- """
- raise NotImplementedError
|