1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586 |
- from abc import ABC, abstractmethod
- from typing import Any, Dict, List
- import torch
- from torch import nn
- class QuantizeMethodBase(ABC):
- """Base class for different quantized methods."""
- @abstractmethod
- def create_weights(self, layer: torch.nn.Module, *weight_args,
- **extra_weight_attrs):
- """Create weights for a layer.
- The weights will be set as attributes of the layer."""
- raise NotImplementedError
- @abstractmethod
- def apply(self, layer: torch.nn.Module, *args, **kwargs) -> torch.Tensor:
- """Apply the weights in layer to the input tensor.
- Expects create_weights to have been called before on the layer."""
- raise NotImplementedError
- def process_weights_after_loading(self, layer: nn.Module) -> None:
- """Process the weight after loading.
- This can be used for example, to transpose weights for computation.
- """
- return
- class QuantizationConfig(ABC):
- """Base class for quantization configs."""
- @abstractmethod
- def get_name(self) -> str:
- """Name of the quantization method."""
- raise NotImplementedError
- @abstractmethod
- def get_supported_act_dtypes(self) -> List[torch.dtype]:
- """List of supported activation dtypes."""
- raise NotImplementedError
- @abstractmethod
- def get_min_capability(self) -> int:
- """Minimum GPU capability to support the quantization method.
- E.g., 70 for Volta, 75 for Turing, 80 for Ampere.
- This requirement is due to the custom CUDA kernels used by the
- quantization method.
- """
- raise NotImplementedError
- @staticmethod
- @abstractmethod
- def get_config_filenames() -> List[str]:
- """List of filenames to search for in the model directory."""
- raise NotImplementedError
- @classmethod
- @abstractmethod
- def from_config(cls, config: Dict[str, Any]) -> "QuantizationConfig":
- """Create a config class from the model's quantization config."""
- raise NotImplementedError
- @staticmethod
- def get_from_keys(config: Dict[str, Any], keys: List[str]) -> Any:
- """Get a value from the model's quantization config."""
- for key in keys:
- if key in config:
- return config[key]
- raise ValueError(f"Cannot find any of {keys} in the model's "
- "quantization config.")
- @abstractmethod
- def get_quant_method(self, layer: torch.nn.Module) -> QuantizeMethodBase:
- """Get the quantize method to use for the quantized layer."""
- raise NotImplementedError
- @abstractmethod
- def get_scaled_act_names(self) -> List[str]:
- """Returns the activation function names that should be post-scaled.
- For now, this is only used by AWQ.
- """
- raise NotImplementedError
|