__init__.py 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204
  1. import functools
  2. import importlib
  3. from typing import Dict, List, Optional, Tuple, Type
  4. import torch.nn as nn
  5. from loguru import logger
  6. from aphrodite.common.utils import is_hip
  7. _GENERATION_MODELS = {
  8. "AquilaModel": ("llama", "LlamaForCausalLM"),
  9. "AquilaForCausalLM": ("llama", "LlamaForCausalLM"), # AquilaChat2
  10. "BaiChuanForCausalLM": ("baichuan", "BaiChuanForCausalLM"), # baichuan-7b
  11. "BaichuanForCausalLM": ("baichuan", "BaichuanForCausalLM"), # baichuan-13b
  12. "BloomForCausalLM": ("bloom", "BloomForCausalLM"),
  13. "ChatGLMModel": ("chatglm", "ChatGLMForCausalLM"),
  14. "ChatGLMForConditionalGeneration": ("chatglm", "ChatGLMForCausalLM"),
  15. "CohereForCausalLM": ("commandr", "CohereForCausalLM"),
  16. "DbrxForCausalLM": ("dbrx", "DbrxForCausalLM"),
  17. "DeciLMForCausalLM": ("decilm", "DeciLMForCausalLM"),
  18. "DeepseekForCausalLM": ("deepseek", "DeepseekForCausalLM"),
  19. "DeepseekV2ForCausalLM": ("deepseek_v2", "DeepseekV2ForCausalLM"),
  20. "FalconForCausalLM": ("falcon", "FalconForCausalLM"),
  21. "GemmaForCausalLM": ("gemma", "GemmaForCausalLM"),
  22. "Gemma2ForCausalLM": ("gemma2", "Gemma2ForCausalLM"),
  23. "GPT2LMHeadModel": ("gpt2", "GPT2LMHeadModel"),
  24. "GPTBigCodeForCausalLM": ("gpt_bigcode", "GPTBigCodeForCausalLM"),
  25. "GPTJForCausalLM": ("gpt_j", "GPTJForCausalLM"),
  26. "GPTNeoXForCausalLM": ("gpt_neox", "GPTNeoXForCausalLM"),
  27. "InternLMForCausalLM": ("llama", "LlamaForCausalLM"),
  28. "InternLM2ForCausalLM": ("internlm2", "InternLM2ForCausalLM"),
  29. "JAISLMHeadModel": ("jais", "JAISLMHeadModel"),
  30. "LlamaForCausalLM": ("llama", "LlamaForCausalLM"),
  31. # For decapoda-research/llama-*
  32. "LLaMAForCausalLM": ("llama", "LlamaForCausalLM"),
  33. "MistralForCausalLM": ("llama", "LlamaForCausalLM"),
  34. "MixtralForCausalLM": ("mixtral", "MixtralForCausalLM"),
  35. "QuantMixtralForCausalLM": ("mixtral_quant", "MixtralForCausalLM"),
  36. # transformers's mpt class has lower case
  37. "MptForCausalLM": ("mpt", "MPTForCausalLM"),
  38. "MPTForCausalLM": ("mpt", "MPTForCausalLM"),
  39. "MiniCPMForCausalLM": ("minicpm", "MiniCPMForCausalLM"),
  40. "NemotronForCausalLM": ("nemotron", "NemotronForCausalLM"),
  41. "OlmoForCausalLM": ("olmo", "OlmoForCausalLM"),
  42. "OlmoeForCausalLM": ("olmoe", "OlmoeForCausalLM"),
  43. "OPTForCausalLM": ("opt", "OPTForCausalLM"),
  44. "OrionForCausalLM": ("orion", "OrionForCausalLM"),
  45. "PhiForCausalLM": ("phi", "PhiForCausalLM"),
  46. "Phi3ForCausalLM": ("llama", "LlamaForCausalLM"),
  47. "QWenLMHeadModel": ("qwen", "QWenLMHeadModel"),
  48. "Qwen2ForCausalLM": ("qwen2", "Qwen2ForCausalLM"),
  49. "Qwen2MoeForCausalLM": ("qwen2_moe", "Qwen2MoeForCausalLM"),
  50. "RWForCausalLM": ("falcon", "FalconForCausalLM"),
  51. "StableLMEpochForCausalLM": ("stablelm", "StablelmForCausalLM"),
  52. "StableLmForCausalLM": ("stablelm", "StablelmForCausalLM"),
  53. "Starcoder2ForCausalLM": ("starcoder2", "Starcoder2ForCausalLM"),
  54. "ArcticForCausalLM": ("arctic", "ArcticForCausalLM"),
  55. "XverseForCausalLM": ("xverse", "XverseForCausalLM"),
  56. "Phi3SmallForCausalLM": ("phi3_small", "Phi3SmallForCausalLM"),
  57. "MLPSpeculatorPreTrainedModel": ("mlp_speculator", "MLPSpeculator"),
  58. "JambaForCausalLM": ("jamba", "JambaForCausalLM"),
  59. "MambaForCausalLM": ("mamba", "MambaForCausalLM"),
  60. "MedusaModel": ("medusa", "Medusa"),
  61. "PersimmonForCausalLM": ("persimmon", "PersimmonForCausalLM"),
  62. "SolarForCausalLM": ("solar", "SolarForCausalLM"),
  63. "ExaoneForCausalLM": ("exaone", "ExaoneForCausalLM"),
  64. "HunYuanForCausalLM": ("hunyuan", "HunYuanForCausalLM"),
  65. }
  66. _EMBEDDING_MODELS = {
  67. "MistralModel": ("llama_embedding", "LlamaEmbeddingModel"),
  68. }
  69. _MULTIMODAL_MODELS = {
  70. "Blip2ForConditionalGeneration":
  71. ("blip2", "Blip2ForConditionalGeneration"),
  72. "ChameleonForConditionalGeneration":
  73. ("chameleon", "ChameleonForConditionalGeneration"),
  74. "FuyuForCausalLM": ("fuyu", "FuyuForCausalLM"),
  75. "InternVLChatModel": ("internvl", "InternVLChatModel"),
  76. "LlavaForConditionalGeneration":
  77. ("llava", "LlavaForConditionalGeneration"),
  78. "LlavaNextForConditionalGeneration":
  79. ("llava_next", "LlavaNextForConditionalGeneration"),
  80. "MiniCPMV": ("minicpmv", "MiniCPMV"),
  81. "PaliGemmaForConditionalGeneration": ("paligemma",
  82. "PaliGemmaForConditionalGeneration"),
  83. "Phi3VForCausalLM": ("phi3v", "Phi3VForCausalLM"),
  84. }
  85. _CONDITIONAL_GENERATION_MODELS = {
  86. "BartModel": ("bart", "BartForConditionalGeneration"),
  87. "BartForConditionalGeneration": ("bart", "BartForConditionalGeneration"),
  88. }
  89. _MODELS = {
  90. **_GENERATION_MODELS,
  91. **_EMBEDDING_MODELS,
  92. **_MULTIMODAL_MODELS,
  93. **_CONDITIONAL_GENERATION_MODELS,
  94. }
  95. # Architecture -> type.
  96. # out of tree models
  97. _OOT_MODELS: Dict[str, Type[nn.Module]] = {}
  98. # Models not supported by ROCm.
  99. _ROCM_UNSUPPORTED_MODELS = []
  100. # Models partially supported by ROCm.
  101. # Architecture -> Reason.
  102. _ROCM_SWA_REASON = ("Sliding window attention (SWA) is not yet supported in "
  103. "Triton flash attention. For half-precision SWA support, "
  104. "please use CK flash attention by setting "
  105. "`APHRODITE_USE_TRITON_FLASH_ATTN=0`")
  106. _ROCM_PARTIALLY_SUPPORTED_MODELS: Dict[str, str] = {
  107. "Qwen2ForCausalLM":
  108. _ROCM_SWA_REASON,
  109. "MistralForCausalLM":
  110. _ROCM_SWA_REASON,
  111. "MixtralForCausalLM":
  112. _ROCM_SWA_REASON,
  113. "PaliGemmaForConditionalGeneration":
  114. ("ROCm flash attention does not yet "
  115. "fully support 32-bit precision on PaliGemma"),
  116. "Phi3VForCausalLM":
  117. ("ROCm Triton flash attention may run into compilation errors due to "
  118. "excessive use of shared memory. If this happens, disable Triton FA "
  119. "by setting `APHRODITE_USE_TRITON_FLASH_ATTN=0`")
  120. }
  121. class ModelRegistry:
  122. @staticmethod
  123. @functools.lru_cache(maxsize=128)
  124. def _get_model(model_arch: str):
  125. module_name, model_cls_name = _MODELS[model_arch]
  126. module = importlib.import_module(
  127. f"aphrodite.modeling.models.{module_name}")
  128. return getattr(module, model_cls_name, None)
  129. @staticmethod
  130. def _try_load_model_cls(model_arch: str) -> Optional[Type[nn.Module]]:
  131. if model_arch in _OOT_MODELS:
  132. return _OOT_MODELS[model_arch]
  133. if model_arch not in _MODELS:
  134. return None
  135. if is_hip():
  136. if model_arch in _ROCM_UNSUPPORTED_MODELS:
  137. raise ValueError(
  138. f"Model architecture {model_arch} is not supported by "
  139. "ROCm for now.")
  140. if model_arch in _ROCM_PARTIALLY_SUPPORTED_MODELS:
  141. logger.warning(
  142. f"Model architecture {model_arch} is partially "
  143. "supported by ROCm: "
  144. f"{_ROCM_PARTIALLY_SUPPORTED_MODELS[model_arch]}")
  145. return ModelRegistry._get_model(model_arch)
  146. @staticmethod
  147. def resolve_model_cls(
  148. architectures: List[str]) -> Tuple[Type[nn.Module], str]:
  149. for arch in architectures:
  150. model_cls = ModelRegistry._try_load_model_cls(arch)
  151. if model_cls is not None:
  152. return (model_cls, arch)
  153. raise ValueError(
  154. f"Model architectures {architectures} are not supported for now. "
  155. f"Supported architectures: {ModelRegistry.get_supported_archs()}")
  156. @staticmethod
  157. def get_supported_archs() -> List[str]:
  158. return list(_MODELS.keys()) + list(_OOT_MODELS.keys())
  159. @staticmethod
  160. def register_model(model_arch: str, model_cls: Type[nn.Module]):
  161. if model_arch in _MODELS:
  162. logger.warning(f"Model architecture {model_arch} is already "
  163. "registered, and will be overwritten by the new "
  164. f"model class {model_cls.__name__}.")
  165. global _OOT_MODELS
  166. _OOT_MODELS[model_arch] = model_cls
  167. @staticmethod
  168. def is_embedding_model(model_arch: str) -> bool:
  169. return model_arch in _EMBEDDING_MODELS
  170. @staticmethod
  171. def is_multimodal_model(model_arch: str) -> bool:
  172. # TODO: find a way to avoid initializing CUDA prematurely to
  173. # use `supports_multimodal` to determine if a model is multimodal
  174. # model_cls = ModelRegistry._try_load_model_cls(model_arch)
  175. # from aphrodite.modeling.models.interfaces import supports_multimodal
  176. return model_arch in _MULTIMODAL_MODELS
  177. __all__ = [
  178. "ModelRegistry",
  179. ]