__init__.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. import functools
  2. import importlib
  3. from typing import Dict, List, Optional, Tuple, Type
  4. import torch.nn as nn
  5. from loguru import logger
  6. from aphrodite.common.utils import is_hip
  7. # Architecture -> (module, class).
  8. _GENERATION_MODELS = {
  9. "AquilaModel": ("llama", "LlamaForCausalLM"),
  10. "AquilaForCausalLM": ("llama", "LlamaForCausalLM"), # AquilaChat2
  11. "BaiChuanForCausalLM": ("baichuan", "BaiChuanForCausalLM"), # baichuan-7b
  12. "BaichuanForCausalLM": ("baichuan", "BaichuanForCausalLM"), # baichuan-13b
  13. "BloomForCausalLM": ("bloom", "BloomForCausalLM"),
  14. "Blip2ForConditionalGeneration":
  15. ("blip2", "Blip2ForConditionalGeneration"),
  16. "ChatGLMModel": ("chatglm", "ChatGLMForCausalLM"),
  17. "ChatGLMForConditionalGeneration": ("chatglm", "ChatGLMForCausalLM"),
  18. "CohereForCausalLM": ("commandr", "CohereForCausalLM"),
  19. "DbrxForCausalLM": ("dbrx", "DbrxForCausalLM"),
  20. "DeciLMForCausalLM": ("decilm", "DeciLMForCausalLM"),
  21. "DeepseekForCausalLM": ("deepseek", "DeepseekForCausalLM"),
  22. "DeepseekV2ForCausalLM": ("deepseek_v2", "DeepseekV2ForCausalLM"),
  23. "FalconForCausalLM": ("falcon", "FalconForCausalLM"),
  24. "GemmaForCausalLM": ("gemma", "GemmaForCausalLM"),
  25. "Gemma2ForCausalLM": ("gemma2", "Gemma2ForCausalLM"),
  26. "GPT2LMHeadModel": ("gpt2", "GPT2LMHeadModel"),
  27. "GPTBigCodeForCausalLM": ("gpt_bigcode", "GPTBigCodeForCausalLM"),
  28. "GPTJForCausalLM": ("gpt_j", "GPTJForCausalLM"),
  29. "GPTNeoXForCausalLM": ("gpt_neox", "GPTNeoXForCausalLM"),
  30. "InternLMForCausalLM": ("llama", "LlamaForCausalLM"),
  31. "InternLM2ForCausalLM": ("internlm2", "InternLM2ForCausalLM"),
  32. "InternVLChatModel": ("internvl", "InternVLChatModel"),
  33. "JAISLMHeadModel": ("jais", "JAISLMHeadModel"),
  34. "LlamaForCausalLM": ("llama", "LlamaForCausalLM"),
  35. "LlavaForConditionalGeneration":
  36. ("llava", "LlavaForConditionalGeneration"),
  37. "LlavaNextForConditionalGeneration":
  38. ("llava_next", "LlavaNextForConditionalGeneration"),
  39. # For decapoda-research/llama-*
  40. "LLaMAForCausalLM": ("llama", "LlamaForCausalLM"),
  41. "MistralForCausalLM": ("llama", "LlamaForCausalLM"),
  42. "MixtralForCausalLM": ("mixtral", "MixtralForCausalLM"),
  43. "QuantMixtralForCausalLM": ("mixtral_quant", "MixtralForCausalLM"),
  44. # transformers's mpt class has lower case
  45. "MptForCausalLM": ("mpt", "MPTForCausalLM"),
  46. "MPTForCausalLM": ("mpt", "MPTForCausalLM"),
  47. "MiniCPMForCausalLM": ("minicpm", "MiniCPMForCausalLM"),
  48. "MiniCPMV": ("minicpmv", "MiniCPMV"),
  49. "NemotronForCausalLM": ("nemotron", "NemotronForCausalLM"),
  50. "OlmoForCausalLM": ("olmo", "OlmoForCausalLM"),
  51. "OPTForCausalLM": ("opt", "OPTForCausalLM"),
  52. "OrionForCausalLM": ("orion", "OrionForCausalLM"),
  53. "PaliGemmaForConditionalGeneration":
  54. ("paligemma", "PaliGemmaForConditionalGeneration"),
  55. "PhiForCausalLM": ("phi", "PhiForCausalLM"),
  56. "Phi3ForCausalLM": ("llama", "LlamaForCausalLM"),
  57. "Phi3VForCausalLM": ("phi3v", "Phi3VForCausalLM"),
  58. "QWenLMHeadModel": ("qwen", "QWenLMHeadModel"),
  59. "Qwen2ForCausalLM": ("qwen2", "Qwen2ForCausalLM"),
  60. "Qwen2MoeForCausalLM": ("qwen2_moe", "Qwen2MoeForCausalLM"),
  61. "RWForCausalLM": ("falcon", "FalconForCausalLM"),
  62. "StableLMEpochForCausalLM": ("stablelm", "StablelmForCausalLM"),
  63. "StableLmForCausalLM": ("stablelm", "StablelmForCausalLM"),
  64. "Starcoder2ForCausalLM": ("starcoder2", "Starcoder2ForCausalLM"),
  65. "ArcticForCausalLM": ("arctic", "ArcticForCausalLM"),
  66. "XverseForCausalLM": ("xverse", "XverseForCausalLM"),
  67. "Phi3SmallForCausalLM": ("phi3_small", "Phi3SmallForCausalLM"),
  68. "MLPSpeculatorPreTrainedModel": ("mlp_speculator", "MLPSpeculator"),
  69. "JambaForCausalLM": ("jamba", "JambaForCausalLM"),
  70. "MedusaModel": ("medusa", "Medusa"),
  71. "FuyuForCausalLM": ("fuyu", "FuyuForCausalLM"),
  72. "PersimmonForCausalLM": ("persimmon", "PersimmonForCausalLM"),
  73. "ChameleonForConditionalGeneration": ("chameleon",
  74. "ChameleonForConditionalGeneration"),
  75. }
  76. _EMBEDDING_MODELS = {
  77. "MistralModel": ("llama_embedding", "LlamaEmbeddingModel"),
  78. }
  79. _MODELS = {**_GENERATION_MODELS, **_EMBEDDING_MODELS}
  80. # Architecture -> type.
  81. # out of tree models
  82. _OOT_MODELS: Dict[str, Type[nn.Module]] = {}
  83. # Models not supported by ROCm.
  84. _ROCM_UNSUPPORTED_MODELS = []
  85. # Models partially supported by ROCm.
  86. # Architecture -> Reason.
  87. _ROCM_SWA_REASON = ("Sliding window attention (SWA) is not yet supported in "
  88. "Triton flash attention. For half-precision SWA support, "
  89. "please use CK flash attention by setting "
  90. "`APHRODITE_USE_TRITON_FLASH_ATTN=0`")
  91. _ROCM_PARTIALLY_SUPPORTED_MODELS: Dict[str, str] = {
  92. "Qwen2ForCausalLM":
  93. _ROCM_SWA_REASON,
  94. "MistralForCausalLM":
  95. _ROCM_SWA_REASON,
  96. "MixtralForCausalLM":
  97. _ROCM_SWA_REASON,
  98. "PaliGemmaForConditionalGeneration":
  99. ("ROCm flash attention does not yet "
  100. "fully support 32-bit precision on PaliGemma"),
  101. "Phi3VForCausalLM":
  102. ("ROCm Triton flash attention may run into compilation errors due to "
  103. "excessive use of shared memory. If this happens, disable Triton FA "
  104. "by setting `APHRODITE_USE_TRITON_FLASH_ATTN=0`")
  105. }
  106. class ModelRegistry:
  107. @staticmethod
  108. @functools.lru_cache(maxsize=128)
  109. def _get_model(model_arch: str):
  110. module_name, model_cls_name = _MODELS[model_arch]
  111. module = importlib.import_module(
  112. f"aphrodite.modeling.models.{module_name}")
  113. return getattr(module, model_cls_name, None)
  114. @staticmethod
  115. def _try_load_model_cls(model_arch: str) -> Optional[Type[nn.Module]]:
  116. if model_arch in _OOT_MODELS:
  117. return _OOT_MODELS[model_arch]
  118. if model_arch not in _MODELS:
  119. return None
  120. if is_hip():
  121. if model_arch in _ROCM_UNSUPPORTED_MODELS:
  122. raise ValueError(
  123. f"Model architecture {model_arch} is not supported by "
  124. "ROCm for now.")
  125. if model_arch in _ROCM_PARTIALLY_SUPPORTED_MODELS:
  126. logger.warning(
  127. f"Model architecture {model_arch} is partially "
  128. "supported by ROCm: "
  129. f"{_ROCM_PARTIALLY_SUPPORTED_MODELS[model_arch]}")
  130. return ModelRegistry._get_model(model_arch)
  131. @staticmethod
  132. def resolve_model_cls(
  133. architectures: List[str]) -> Tuple[Type[nn.Module], str]:
  134. for arch in architectures:
  135. model_cls = ModelRegistry._try_load_model_cls(arch)
  136. if model_cls is not None:
  137. return (model_cls, arch)
  138. raise ValueError(
  139. f"Model architectures {architectures} are not supported for now. "
  140. f"Supported architectures: {ModelRegistry.get_supported_archs()}")
  141. @staticmethod
  142. def get_supported_archs() -> List[str]:
  143. return list(_MODELS.keys())
  144. @staticmethod
  145. def register_model(model_arch: str, model_cls: Type[nn.Module]):
  146. if model_arch in _MODELS:
  147. logger.warning(f"Model architecture {model_arch} is already "
  148. "registered, and will be overwritten by the new "
  149. f"model class {model_cls.__name__}.")
  150. global _OOT_MODELS
  151. _OOT_MODELS[model_arch] = model_cls
  152. @staticmethod
  153. def is_embedding_model(model_arch: str) -> bool:
  154. return model_arch in _EMBEDDING_MODELS
  155. __all__ = [
  156. "ModelRegistry",
  157. ]