__init__.py 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. import importlib
  2. from typing import Dict, List, Optional, Type
  3. import torch.nn as nn
  4. from loguru import logger
  5. from aphrodite.common.utils import is_hip
  6. # Architecture -> (module, class).
  7. _GENERATION_MODELS = {
  8. "AquilaModel": ("llama", "LlamaForCausalLM"),
  9. "AquilaForCausalLM": ("llama", "LlamaForCausalLM"), # AquilaChat2
  10. "BaiChuanForCausalLM": ("baichuan", "BaiChuanForCausalLM"), # baichuan-7b
  11. "BaichuanForCausalLM": ("baichuan", "BaichuanForCausalLM"), # baichuan-13b
  12. "BloomForCausalLM": ("bloom", "BloomForCausalLM"),
  13. "BitnetForCausalLM": ("bitnet", "BitnetForCausalLM"),
  14. "ChatGLMModel": ("chatglm", "ChatGLMForCausalLM"),
  15. "ChatGLMForConditionalGeneration": ("chatglm", "ChatGLMForCausalLM"),
  16. "CohereForCausalLM": ("commandr", "CohereForCausalLM"),
  17. "DbrxForCausalLM": ("dbrx", "DbrxForCausalLM"),
  18. "DeciLMForCausalLM": ("decilm", "DeciLMForCausalLM"),
  19. "DeepseekForCausalLM": ("deepseek", "DeepseekForCausalLM"),
  20. "FalconForCausalLM": ("falcon", "FalconForCausalLM"),
  21. "GemmaForCausalLM": ("gemma", "GemmaForCausalLM"),
  22. "GPT2LMHeadModel": ("gpt2", "GPT2LMHeadModel"),
  23. "GPTBigCodeForCausalLM": ("gpt_bigcode", "GPTBigCodeForCausalLM"),
  24. "GPTJForCausalLM": ("gpt_j", "GPTJForCausalLM"),
  25. "GPTNeoXForCausalLM": ("gpt_neox", "GPTNeoXForCausalLM"),
  26. "InternLMForCausalLM": ("llama", "LlamaForCausalLM"),
  27. "InternLM2ForCausalLM": ("internlm2", "InternLM2ForCausalLM"),
  28. "JAISLMHeadModel": ("jais", "JAISLMHeadModel"),
  29. "LlamaForCausalLM": ("llama", "LlamaForCausalLM"),
  30. "LlavaForConditionalGeneration":
  31. ("llava", "LlavaForConditionalGeneration"),
  32. # For decapoda-research/llama-*
  33. "LLaMAForCausalLM": ("llama", "LlamaForCausalLM"),
  34. "MistralForCausalLM": ("llama", "LlamaForCausalLM"),
  35. "MixtralForCausalLM": ("mixtral", "MixtralForCausalLM"),
  36. "QuantMixtralForCausalLM": ("mixtral_quant", "MixtralForCausalLM"),
  37. # transformers's mpt class has lower case
  38. "MptForCausalLM": ("mpt", "MPTForCausalLM"),
  39. "MPTForCausalLM": ("mpt", "MPTForCausalLM"),
  40. "MiniCPMForCausalLM": ("minicpm", "MiniCPMForCausalLM"),
  41. "OlmoForCausalLM": ("olmo", "OlmoForCausalLM"),
  42. "OPTForCausalLM": ("opt", "OPTForCausalLM"),
  43. "OrionForCausalLM": ("orion", "OrionForCausalLM"),
  44. "PhiForCausalLM": ("phi", "PhiForCausalLM"),
  45. "Phi3ForCausalLM": ("llama", "LlamaForCausalLM"),
  46. "QWenLMHeadModel": ("qwen", "QWenLMHeadModel"),
  47. "Qwen2ForCausalLM": ("qwen2", "Qwen2ForCausalLM"),
  48. "Qwen2MoeForCausalLM": ("qwen2_moe", "Qwen2MoeForCausalLM"),
  49. "RWForCausalLM": ("falcon", "FalconForCausalLM"),
  50. "StableLMEpochForCausalLM": ("stablelm", "StablelmForCausalLM"),
  51. "StableLmForCausalLM": ("stablelm", "StablelmForCausalLM"),
  52. "Starcoder2ForCausalLM": ("starcoder2", "Starcoder2ForCausalLM"),
  53. "ArcticForCausalLM": ("arctic", "ArcticForCausalLM"),
  54. "XverseForCausalLM": ("xverse", "XverseForCausalLM"),
  55. "Phi3SmallForCausalLM": ("phi3_small", "Phi3SmallForCausalLM"),
  56. }
  57. _EMBEDDING_MODELS = {
  58. "MistralModel": ("llama_embedding", "LlamaEmbeddingModel"),
  59. }
  60. _MODELS = {**_GENERATION_MODELS, **_EMBEDDING_MODELS}
  61. # Architecture -> type.
  62. # out of tree models
  63. _OOT_MODELS: Dict[str, Type[nn.Module]] = {}
  64. # Models not supported by ROCm.
  65. _ROCM_UNSUPPORTED_MODELS = []
  66. # Models partially supported by ROCm.
  67. # Architecture -> Reason.
  68. _ROCM_PARTIALLY_SUPPORTED_MODELS = {
  69. "Qwen2ForCausalLM":
  70. "Sliding window attention is not yet supported in ROCm's flash attention",
  71. "MistralForCausalLM":
  72. "Sliding window attention is not yet supported in ROCm's flash attention",
  73. "MixtralForCausalLM":
  74. "Sliding window attention is not yet supported in ROCm's flash attention",
  75. }
  76. class ModelRegistry:
  77. @staticmethod
  78. def load_model_cls(model_arch: str) -> Optional[Type[nn.Module]]:
  79. if model_arch in _OOT_MODELS:
  80. return _OOT_MODELS[model_arch]
  81. if model_arch not in _MODELS:
  82. return None
  83. if is_hip():
  84. if model_arch in _ROCM_UNSUPPORTED_MODELS:
  85. raise ValueError(
  86. f"Model architecture {model_arch} is not supported by "
  87. "ROCm for now.")
  88. if model_arch in _ROCM_PARTIALLY_SUPPORTED_MODELS:
  89. logger.warning(
  90. f"Model architecture {model_arch} is partially "
  91. "supported by ROCm: "
  92. f"{_ROCM_PARTIALLY_SUPPORTED_MODELS[model_arch]}")
  93. module_name, model_cls_name = _MODELS[model_arch]
  94. module = importlib.import_module(
  95. f"aphrodite.modeling.models.{module_name}")
  96. return getattr(module, model_cls_name, None)
  97. @staticmethod
  98. def get_supported_archs() -> List[str]:
  99. return list(_MODELS.keys())
  100. @staticmethod
  101. def register_model(model_arch: str, model_cls: Type[nn.Module]):
  102. if model_arch in _MODELS:
  103. logger.warning(f"Model architecture {model_arch} is already "
  104. "registered, and will be overwritten by the new "
  105. f"model class {model_cls.__name__}.")
  106. global _OOT_MODELS
  107. _OOT_MODELS[model_arch] = model_cls
  108. @staticmethod
  109. def is_embedding_model(model_arch: str) -> bool:
  110. return model_arch in _EMBEDDING_MODELS
  111. __all__ = [
  112. "ModelRegistry",
  113. ]