__init__.py 2.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071
  1. from typing import Type
  2. from aphrodite.quantization.aqlm import AQLMConfig
  3. from aphrodite.quantization.awq import AWQConfig
  4. from aphrodite.quantization.awq_marlin import AWQMarlinConfig
  5. from aphrodite.quantization.base_config import QuantizationConfig
  6. from aphrodite.quantization.bitsandbytes import BitsAndBytesConfig
  7. from aphrodite.quantization.compressed_tensors.compressed_tensors import (
  8. CompressedTensorsConfig)
  9. from aphrodite.quantization.deepspeedfp import DeepSpeedFPConfig
  10. from aphrodite.quantization.eetq import EETQConfig
  11. from aphrodite.quantization.experts_int8 import ExpertsInt8Config
  12. from aphrodite.quantization.fbgemm_fp8 import FBGEMMFp8Config
  13. from aphrodite.quantization.fp6 import QuantLLMFPConfig
  14. from aphrodite.quantization.fp8 import Fp8Config
  15. from aphrodite.quantization.gguf import GGUFConfig
  16. from aphrodite.quantization.gptq import GPTQConfig
  17. from aphrodite.quantization.gptq_marlin import GPTQMarlinConfig
  18. from aphrodite.quantization.gptq_marlin_24 import GPTQMarlin24Config
  19. from aphrodite.quantization.hqq_marlin import HQQMarlinConfig
  20. from aphrodite.quantization.marlin import MarlinConfig
  21. from aphrodite.quantization.qqq import QQQConfig
  22. from aphrodite.quantization.quip import QuipConfig
  23. from aphrodite.quantization.squeezellm import SqueezeLLMConfig
  24. from aphrodite.quantization.tpu_int8 import Int8TpuConfig
  25. QUANTIZATION_METHODS = {
  26. "aqlm": AQLMConfig,
  27. "awq": AWQConfig,
  28. "deepspeedfp": DeepSpeedFPConfig,
  29. "tpu_int8": Int8TpuConfig,
  30. "eetq": EETQConfig,
  31. "fp8": Fp8Config,
  32. "quant_llm": QuantLLMFPConfig,
  33. "fbgemm_fp8": FBGEMMFp8Config,
  34. "gguf": GGUFConfig,
  35. # The order of gptq methods is important for config.py iteration over
  36. # override_quantization_method(..)
  37. "marlin": MarlinConfig,
  38. "gptq_marlin_24": GPTQMarlin24Config,
  39. "gptq_marlin": GPTQMarlinConfig,
  40. "awq_marlin": AWQMarlinConfig,
  41. "gptq": GPTQConfig,
  42. "quip": QuipConfig,
  43. "squeezellm": SqueezeLLMConfig,
  44. "compressed-tensors": CompressedTensorsConfig,
  45. "bitsandbytes": BitsAndBytesConfig,
  46. "qqq": QQQConfig,
  47. "hqq": HQQMarlinConfig,
  48. "experts_int8": ExpertsInt8Config,
  49. # the quant_llm methods
  50. "fp2": QuantLLMFPConfig,
  51. "fp3": QuantLLMFPConfig,
  52. "fp4": QuantLLMFPConfig,
  53. "fp5": QuantLLMFPConfig,
  54. "fp6": QuantLLMFPConfig,
  55. "fp7": QuantLLMFPConfig,
  56. }
  57. def get_quantization_config(quantization: str) -> Type[QuantizationConfig]:
  58. if quantization not in QUANTIZATION_METHODS:
  59. raise ValueError(f"Invalid quantization method: {quantization}")
  60. return QUANTIZATION_METHODS[quantization]
  61. __all__ = [
  62. "QuantizationConfig",
  63. "get_quantization_config",
  64. "QUANTIZATION_METHODS",
  65. ]