__init__.py 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869
  1. from typing import Type
  2. from aphrodite.quantization.aqlm import AQLMConfig
  3. from aphrodite.quantization.awq import AWQConfig
  4. from aphrodite.quantization.awq_marlin import AWQMarlinConfig
  5. from aphrodite.quantization.base_config import QuantizationConfig
  6. from aphrodite.quantization.bitsandbytes import BitsAndBytesConfig
  7. from aphrodite.quantization.compressed_tensors.compressed_tensors import (
  8. CompressedTensorsConfig)
  9. from aphrodite.quantization.deepspeedfp import DeepSpeedFPConfig
  10. from aphrodite.quantization.eetq import EETQConfig
  11. from aphrodite.quantization.experts_int8 import ExpertsInt8Config
  12. from aphrodite.quantization.fbgemm_fp8 import FBGEMMFp8Config
  13. from aphrodite.quantization.fp6 import QuantLLMFPConfig
  14. from aphrodite.quantization.fp8 import Fp8Config
  15. from aphrodite.quantization.gguf import GGUFConfig
  16. from aphrodite.quantization.gptq import GPTQConfig
  17. from aphrodite.quantization.gptq_marlin import GPTQMarlinConfig
  18. from aphrodite.quantization.gptq_marlin_24 import GPTQMarlin24Config
  19. from aphrodite.quantization.marlin import MarlinConfig
  20. from aphrodite.quantization.qqq import QQQConfig
  21. from aphrodite.quantization.quip import QuipConfig
  22. from aphrodite.quantization.squeezellm import SqueezeLLMConfig
  23. from aphrodite.quantization.tpu_int8 import Int8TpuConfig
  24. QUANTIZATION_METHODS = {
  25. "aqlm": AQLMConfig,
  26. "awq": AWQConfig,
  27. "deepspeedfp": DeepSpeedFPConfig,
  28. "tpu_int8": Int8TpuConfig,
  29. "eetq": EETQConfig,
  30. "fp8": Fp8Config,
  31. "quant_llm": QuantLLMFPConfig,
  32. "fbgemm_fp8": FBGEMMFp8Config,
  33. "gguf": GGUFConfig,
  34. # The order of gptq methods is important for config.py iteration over
  35. # override_quantization_method(..)
  36. "marlin": MarlinConfig,
  37. "gptq_marlin_24": GPTQMarlin24Config,
  38. "gptq_marlin": GPTQMarlinConfig,
  39. "awq_marlin": AWQMarlinConfig,
  40. "gptq": GPTQConfig,
  41. "quip": QuipConfig,
  42. "squeezellm": SqueezeLLMConfig,
  43. "compressed-tensors": CompressedTensorsConfig,
  44. "bitsandbytes": BitsAndBytesConfig,
  45. "qqq": QQQConfig,
  46. "experts_int8": ExpertsInt8Config,
  47. # the quant_llm methods
  48. "fp2": QuantLLMFPConfig,
  49. "fp3": QuantLLMFPConfig,
  50. "fp4": QuantLLMFPConfig,
  51. "fp5": QuantLLMFPConfig,
  52. "fp6": QuantLLMFPConfig,
  53. "fp7": QuantLLMFPConfig,
  54. }
  55. def get_quantization_config(quantization: str) -> Type[QuantizationConfig]:
  56. if quantization not in QUANTIZATION_METHODS:
  57. raise ValueError(f"Invalid quantization method: {quantization}")
  58. return QUANTIZATION_METHODS[quantization]
  59. __all__ = [
  60. "QuantizationConfig",
  61. "get_quantization_config",
  62. "QUANTIZATION_METHODS",
  63. ]