base_config.py 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. from abc import ABC, abstractmethod
  2. from typing import Any, Dict, List, Optional
  3. import torch
  4. from torch import nn
  5. class QuantizeMethodBase(ABC):
  6. """Base class for different quantized methods."""
  7. @abstractmethod
  8. def create_weights(self, layer: torch.nn.Module, *weight_args,
  9. **extra_weight_attrs):
  10. """Create weights for a layer.
  11. The weights will be set as attributes of the layer."""
  12. raise NotImplementedError
  13. @abstractmethod
  14. def apply(self, layer: torch.nn.Module, *args, **kwargs) -> torch.Tensor:
  15. """Apply the weights in layer to the input tensor.
  16. Expects create_weights to have been called before on the layer."""
  17. raise NotImplementedError
  18. def process_weights_after_loading(self, layer: nn.Module) -> None:
  19. """Process the weight after loading.
  20. This can be used for example, to transpose weights for computation.
  21. """
  22. return
  23. class QuantizationConfig(ABC):
  24. """Base class for quantization configs."""
  25. @abstractmethod
  26. def get_name(self) -> str:
  27. """Name of the quantization method."""
  28. raise NotImplementedError
  29. @abstractmethod
  30. def get_supported_act_dtypes(self) -> List[torch.dtype]:
  31. """List of supported activation dtypes."""
  32. raise NotImplementedError
  33. @classmethod
  34. @abstractmethod
  35. def get_min_capability(cls) -> int:
  36. """Minimum GPU capability to support the quantization method.
  37. E.g., 70 for Volta, 75 for Turing, 80 for Ampere.
  38. This requirement is due to the custom CUDA kernels used by the
  39. quantization method.
  40. """
  41. raise NotImplementedError
  42. @staticmethod
  43. @abstractmethod
  44. def get_config_filenames() -> List[str]:
  45. """List of filenames to search for in the model directory."""
  46. raise NotImplementedError
  47. @classmethod
  48. @abstractmethod
  49. def from_config(cls, config: Dict[str, Any]) -> "QuantizationConfig":
  50. """Create a config class from the model's quantization config."""
  51. raise NotImplementedError
  52. @classmethod
  53. def override_quantization_method(cls, hf_quant_cfg,
  54. user_quant) -> Optional[str]:
  55. """
  56. Detects if this quantization method can support a given checkpoint
  57. format by overriding the user specified quantization method --
  58. this method should only be overwritten by subclasses in exceptional
  59. circumstances
  60. """
  61. return None
  62. @staticmethod
  63. def get_from_keys(config: Dict[str, Any], keys: List[str]) -> Any:
  64. """Get a value from the model's quantization config."""
  65. for key in keys:
  66. if key in config:
  67. return config[key]
  68. raise ValueError(f"Cannot find any of {keys} in the model's "
  69. "quantization config.")
  70. @staticmethod
  71. def get_from_keys_or(config: Dict[str, Any], keys: List[str],
  72. default: Any) -> Any:
  73. """Get a optional value from the model's quantization config."""
  74. try:
  75. return QuantizationConfig.get_from_keys(config, keys)
  76. except ValueError:
  77. return default
  78. @abstractmethod
  79. def get_quant_method(self, layer: torch.nn.Module,
  80. prefix: str) -> QuantizeMethodBase:
  81. """Get the quantize method to use for the quantized layer."""
  82. raise NotImplementedError
  83. @abstractmethod
  84. def get_scaled_act_names(self) -> List[str]:
  85. """Returns the activation function names that should be post-scaled.
  86. For now, this is only used by AWQ.
  87. """
  88. raise NotImplementedError