1
0

__init__.py 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201
  1. import functools
  2. import importlib
  3. from typing import Dict, List, Optional, Tuple, Type
  4. import torch.nn as nn
  5. from loguru import logger
  6. from aphrodite.common.utils import is_hip
  7. _GENERATION_MODELS = {
  8. "AquilaModel": ("llama", "LlamaForCausalLM"),
  9. "AquilaForCausalLM": ("llama", "LlamaForCausalLM"), # AquilaChat2
  10. "BaiChuanForCausalLM": ("baichuan", "BaiChuanForCausalLM"), # baichuan-7b
  11. "BaichuanForCausalLM": ("baichuan", "BaichuanForCausalLM"), # baichuan-13b
  12. "BloomForCausalLM": ("bloom", "BloomForCausalLM"),
  13. "ChatGLMModel": ("chatglm", "ChatGLMForCausalLM"),
  14. "ChatGLMForConditionalGeneration": ("chatglm", "ChatGLMForCausalLM"),
  15. "CohereForCausalLM": ("commandr", "CohereForCausalLM"),
  16. "DbrxForCausalLM": ("dbrx", "DbrxForCausalLM"),
  17. "DeciLMForCausalLM": ("decilm", "DeciLMForCausalLM"),
  18. "DeepseekForCausalLM": ("deepseek", "DeepseekForCausalLM"),
  19. "DeepseekV2ForCausalLM": ("deepseek_v2", "DeepseekV2ForCausalLM"),
  20. "FalconForCausalLM": ("falcon", "FalconForCausalLM"),
  21. "GemmaForCausalLM": ("gemma", "GemmaForCausalLM"),
  22. "Gemma2ForCausalLM": ("gemma2", "Gemma2ForCausalLM"),
  23. "GPT2LMHeadModel": ("gpt2", "GPT2LMHeadModel"),
  24. "GPTBigCodeForCausalLM": ("gpt_bigcode", "GPTBigCodeForCausalLM"),
  25. "GPTJForCausalLM": ("gpt_j", "GPTJForCausalLM"),
  26. "GPTNeoXForCausalLM": ("gpt_neox", "GPTNeoXForCausalLM"),
  27. "InternLMForCausalLM": ("llama", "LlamaForCausalLM"),
  28. "InternLM2ForCausalLM": ("internlm2", "InternLM2ForCausalLM"),
  29. "JAISLMHeadModel": ("jais", "JAISLMHeadModel"),
  30. "LlamaForCausalLM": ("llama", "LlamaForCausalLM"),
  31. # For decapoda-research/llama-*
  32. "LLaMAForCausalLM": ("llama", "LlamaForCausalLM"),
  33. "MistralForCausalLM": ("llama", "LlamaForCausalLM"),
  34. "MixtralForCausalLM": ("mixtral", "MixtralForCausalLM"),
  35. "QuantMixtralForCausalLM": ("mixtral_quant", "MixtralForCausalLM"),
  36. # transformers's mpt class has lower case
  37. "MptForCausalLM": ("mpt", "MPTForCausalLM"),
  38. "MPTForCausalLM": ("mpt", "MPTForCausalLM"),
  39. "MiniCPMForCausalLM": ("minicpm", "MiniCPMForCausalLM"),
  40. "NemotronForCausalLM": ("nemotron", "NemotronForCausalLM"),
  41. "OlmoForCausalLM": ("olmo", "OlmoForCausalLM"),
  42. "OPTForCausalLM": ("opt", "OPTForCausalLM"),
  43. "OrionForCausalLM": ("orion", "OrionForCausalLM"),
  44. "PhiForCausalLM": ("phi", "PhiForCausalLM"),
  45. "Phi3ForCausalLM": ("llama", "LlamaForCausalLM"),
  46. "QWenLMHeadModel": ("qwen", "QWenLMHeadModel"),
  47. "Qwen2ForCausalLM": ("qwen2", "Qwen2ForCausalLM"),
  48. "Qwen2MoeForCausalLM": ("qwen2_moe", "Qwen2MoeForCausalLM"),
  49. "RWForCausalLM": ("falcon", "FalconForCausalLM"),
  50. "StableLMEpochForCausalLM": ("stablelm", "StablelmForCausalLM"),
  51. "StableLmForCausalLM": ("stablelm", "StablelmForCausalLM"),
  52. "Starcoder2ForCausalLM": ("starcoder2", "Starcoder2ForCausalLM"),
  53. "ArcticForCausalLM": ("arctic", "ArcticForCausalLM"),
  54. "XverseForCausalLM": ("xverse", "XverseForCausalLM"),
  55. "Phi3SmallForCausalLM": ("phi3_small", "Phi3SmallForCausalLM"),
  56. "MLPSpeculatorPreTrainedModel": ("mlp_speculator", "MLPSpeculator"),
  57. "JambaForCausalLM": ("jamba", "JambaForCausalLM"),
  58. "MambaForCausalLM": ("mamba", "MambaForCausalLM"),
  59. "MedusaModel": ("medusa", "Medusa"),
  60. "PersimmonForCausalLM": ("persimmon", "PersimmonForCausalLM"),
  61. "SolarForCausalLM": ("solar", "SolarForCausalLM"),
  62. "ExaoneForCausalLM": ("exaone", "ExaoneForCausalLM"),
  63. }
  64. _EMBEDDING_MODELS = {
  65. "MistralModel": ("llama_embedding", "LlamaEmbeddingModel"),
  66. }
  67. _MULTIMODAL_MODELS = {
  68. "Blip2ForConditionalGeneration":
  69. ("blip2", "Blip2ForConditionalGeneration"),
  70. "ChameleonForConditionalGeneration":
  71. ("chameleon", "ChameleonForConditionalGeneration"),
  72. "FuyuForCausalLM": ("fuyu", "FuyuForCausalLM"),
  73. "InternVLChatModel": ("internvl", "InternVLChatModel"),
  74. "LlavaForConditionalGeneration":
  75. ("llava", "LlavaForConditionalGeneration"),
  76. "LlavaNextForConditionalGeneration":
  77. ("llava_next", "LlavaNextForConditionalGeneration"),
  78. "MiniCPMV": ("minicpmv", "MiniCPMV"),
  79. "PaliGemmaForConditionalGeneration": ("paligemma",
  80. "PaliGemmaForConditionalGeneration"),
  81. "Phi3VForCausalLM": ("phi3v", "Phi3VForCausalLM"),
  82. }
  83. _CONDITIONAL_GENERATION_MODELS = {
  84. "BartModel": ("bart", "BartForConditionalGeneration"),
  85. "BartForConditionalGeneration": ("bart", "BartForConditionalGeneration"),
  86. }
  87. _MODELS = {
  88. **_GENERATION_MODELS,
  89. **_EMBEDDING_MODELS,
  90. **_MULTIMODAL_MODELS,
  91. **_CONDITIONAL_GENERATION_MODELS,
  92. }
  93. # Architecture -> type.
  94. # out of tree models
  95. _OOT_MODELS: Dict[str, Type[nn.Module]] = {}
  96. # Models not supported by ROCm.
  97. _ROCM_UNSUPPORTED_MODELS = []
  98. # Models partially supported by ROCm.
  99. # Architecture -> Reason.
  100. _ROCM_SWA_REASON = ("Sliding window attention (SWA) is not yet supported in "
  101. "Triton flash attention. For half-precision SWA support, "
  102. "please use CK flash attention by setting "
  103. "`APHRODITE_USE_TRITON_FLASH_ATTN=0`")
  104. _ROCM_PARTIALLY_SUPPORTED_MODELS: Dict[str, str] = {
  105. "Qwen2ForCausalLM":
  106. _ROCM_SWA_REASON,
  107. "MistralForCausalLM":
  108. _ROCM_SWA_REASON,
  109. "MixtralForCausalLM":
  110. _ROCM_SWA_REASON,
  111. "PaliGemmaForConditionalGeneration":
  112. ("ROCm flash attention does not yet "
  113. "fully support 32-bit precision on PaliGemma"),
  114. "Phi3VForCausalLM":
  115. ("ROCm Triton flash attention may run into compilation errors due to "
  116. "excessive use of shared memory. If this happens, disable Triton FA "
  117. "by setting `APHRODITE_USE_TRITON_FLASH_ATTN=0`")
  118. }
  119. class ModelRegistry:
  120. @staticmethod
  121. @functools.lru_cache(maxsize=128)
  122. def _get_model(model_arch: str):
  123. module_name, model_cls_name = _MODELS[model_arch]
  124. module = importlib.import_module(
  125. f"aphrodite.modeling.models.{module_name}")
  126. return getattr(module, model_cls_name, None)
  127. @staticmethod
  128. def _try_load_model_cls(model_arch: str) -> Optional[Type[nn.Module]]:
  129. if model_arch in _OOT_MODELS:
  130. return _OOT_MODELS[model_arch]
  131. if model_arch not in _MODELS:
  132. return None
  133. if is_hip():
  134. if model_arch in _ROCM_UNSUPPORTED_MODELS:
  135. raise ValueError(
  136. f"Model architecture {model_arch} is not supported by "
  137. "ROCm for now.")
  138. if model_arch in _ROCM_PARTIALLY_SUPPORTED_MODELS:
  139. logger.warning(
  140. f"Model architecture {model_arch} is partially "
  141. "supported by ROCm: "
  142. f"{_ROCM_PARTIALLY_SUPPORTED_MODELS[model_arch]}")
  143. return ModelRegistry._get_model(model_arch)
  144. @staticmethod
  145. def resolve_model_cls(
  146. architectures: List[str]) -> Tuple[Type[nn.Module], str]:
  147. for arch in architectures:
  148. model_cls = ModelRegistry._try_load_model_cls(arch)
  149. if model_cls is not None:
  150. return (model_cls, arch)
  151. raise ValueError(
  152. f"Model architectures {architectures} are not supported for now. "
  153. f"Supported architectures: {ModelRegistry.get_supported_archs()}")
  154. @staticmethod
  155. def get_supported_archs() -> List[str]:
  156. return list(_MODELS.keys()) + list(_OOT_MODELS.keys())
  157. @staticmethod
  158. def register_model(model_arch: str, model_cls: Type[nn.Module]):
  159. if model_arch in _MODELS:
  160. logger.warning(f"Model architecture {model_arch} is already "
  161. "registered, and will be overwritten by the new "
  162. f"model class {model_cls.__name__}.")
  163. global _OOT_MODELS
  164. _OOT_MODELS[model_arch] = model_cls
  165. @staticmethod
  166. def is_embedding_model(model_arch: str) -> bool:
  167. return model_arch in _EMBEDDING_MODELS
  168. @staticmethod
  169. def is_multimodal_model(model_arch: str) -> bool:
  170. # TODO: find a way to avoid initializing CUDA prematurely to
  171. # use `supports_multimodal` to determine if a model is multimodal
  172. # model_cls = ModelRegistry._try_load_model_cls(model_arch)
  173. # from aphrodite.modeling.models.interfaces import supports_multimodal
  174. return model_arch in _MULTIMODAL_MODELS
  175. __all__ = [
  176. "ModelRegistry",
  177. ]