__init__.py 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. import functools
  2. import importlib
  3. from typing import Dict, List, Optional, Tuple, Type
  4. import torch.nn as nn
  5. from loguru import logger
  6. from aphrodite.common.utils import is_hip
  7. # Architecture -> (module, class).
  8. _GENERATION_MODELS = {
  9. "AquilaModel": ("llama", "LlamaForCausalLM"),
  10. "AquilaForCausalLM": ("llama", "LlamaForCausalLM"), # AquilaChat2
  11. "BaiChuanForCausalLM": ("baichuan", "BaiChuanForCausalLM"), # baichuan-7b
  12. "BaichuanForCausalLM": ("baichuan", "BaichuanForCausalLM"), # baichuan-13b
  13. "BloomForCausalLM": ("bloom", "BloomForCausalLM"),
  14. "Blip2ForConditionalGeneration":
  15. ("blip2", "Blip2ForConditionalGeneration"),
  16. "ChatGLMModel": ("chatglm", "ChatGLMForCausalLM"),
  17. "ChatGLMForConditionalGeneration": ("chatglm", "ChatGLMForCausalLM"),
  18. "CohereForCausalLM": ("commandr", "CohereForCausalLM"),
  19. "DbrxForCausalLM": ("dbrx", "DbrxForCausalLM"),
  20. "DeciLMForCausalLM": ("decilm", "DeciLMForCausalLM"),
  21. "DeepseekForCausalLM": ("deepseek", "DeepseekForCausalLM"),
  22. "DeepseekV2ForCausalLM": ("deepseek_v2", "DeepseekV2ForCausalLM"),
  23. "FalconForCausalLM": ("falcon", "FalconForCausalLM"),
  24. "GemmaForCausalLM": ("gemma", "GemmaForCausalLM"),
  25. "Gemma2ForCausalLM": ("gemma2", "Gemma2ForCausalLM"),
  26. "GPT2LMHeadModel": ("gpt2", "GPT2LMHeadModel"),
  27. "GPTBigCodeForCausalLM": ("gpt_bigcode", "GPTBigCodeForCausalLM"),
  28. "GPTJForCausalLM": ("gpt_j", "GPTJForCausalLM"),
  29. "GPTNeoXForCausalLM": ("gpt_neox", "GPTNeoXForCausalLM"),
  30. "InternLMForCausalLM": ("llama", "LlamaForCausalLM"),
  31. "InternLM2ForCausalLM": ("internlm2", "InternLM2ForCausalLM"),
  32. "InternVLChatModel": ("internvl", "InternVLChatModel"),
  33. "JAISLMHeadModel": ("jais", "JAISLMHeadModel"),
  34. "LlamaForCausalLM": ("llama", "LlamaForCausalLM"),
  35. "LlavaForConditionalGeneration":
  36. ("llava", "LlavaForConditionalGeneration"),
  37. "LlavaNextForConditionalGeneration":
  38. ("llava_next", "LlavaNextForConditionalGeneration"),
  39. # For decapoda-research/llama-*
  40. "LLaMAForCausalLM": ("llama", "LlamaForCausalLM"),
  41. "MistralForCausalLM": ("llama", "LlamaForCausalLM"),
  42. "MixtralForCausalLM": ("mixtral", "MixtralForCausalLM"),
  43. "QuantMixtralForCausalLM": ("mixtral_quant", "MixtralForCausalLM"),
  44. # transformers's mpt class has lower case
  45. "MptForCausalLM": ("mpt", "MPTForCausalLM"),
  46. "MPTForCausalLM": ("mpt", "MPTForCausalLM"),
  47. "MiniCPMForCausalLM": ("minicpm", "MiniCPMForCausalLM"),
  48. "MiniCPMV": ("minicpmv", "MiniCPMV"),
  49. "NemotronForCausalLM": ("nemotron", "NemotronForCausalLM"),
  50. "OlmoForCausalLM": ("olmo", "OlmoForCausalLM"),
  51. "OPTForCausalLM": ("opt", "OPTForCausalLM"),
  52. "OrionForCausalLM": ("orion", "OrionForCausalLM"),
  53. "PaliGemmaForConditionalGeneration":
  54. ("paligemma", "PaliGemmaForConditionalGeneration"),
  55. "PhiForCausalLM": ("phi", "PhiForCausalLM"),
  56. "Phi3ForCausalLM": ("llama", "LlamaForCausalLM"),
  57. "Phi3VForCausalLM": ("phi3v", "Phi3VForCausalLM"),
  58. "QWenLMHeadModel": ("qwen", "QWenLMHeadModel"),
  59. "Qwen2ForCausalLM": ("qwen2", "Qwen2ForCausalLM"),
  60. "Qwen2MoeForCausalLM": ("qwen2_moe", "Qwen2MoeForCausalLM"),
  61. "RWForCausalLM": ("falcon", "FalconForCausalLM"),
  62. "StableLMEpochForCausalLM": ("stablelm", "StablelmForCausalLM"),
  63. "StableLmForCausalLM": ("stablelm", "StablelmForCausalLM"),
  64. "Starcoder2ForCausalLM": ("starcoder2", "Starcoder2ForCausalLM"),
  65. "ArcticForCausalLM": ("arctic", "ArcticForCausalLM"),
  66. "XverseForCausalLM": ("xverse", "XverseForCausalLM"),
  67. "Phi3SmallForCausalLM": ("phi3_small", "Phi3SmallForCausalLM"),
  68. "MLPSpeculatorPreTrainedModel": ("mlp_speculator", "MLPSpeculator"),
  69. "JambaForCausalLM": ("jamba", "JambaForCausalLM"),
  70. "MambaForCausalLM": ("mamba", "MambaForCausalLM"),
  71. "MedusaModel": ("medusa", "Medusa"),
  72. "FuyuForCausalLM": ("fuyu", "FuyuForCausalLM"),
  73. "PersimmonForCausalLM": ("persimmon", "PersimmonForCausalLM"),
  74. "ChameleonForConditionalGeneration": ("chameleon",
  75. "ChameleonForConditionalGeneration"),
  76. }
  77. _EMBEDDING_MODELS = {
  78. "MistralModel": ("llama_embedding", "LlamaEmbeddingModel"),
  79. }
  80. _CONDITIONAL_GENERATION_MODELS = {
  81. "BartModel": ("bart", "BartForConditionalGeneration"),
  82. "BartForConditionalGeneration": ("bart", "BartForConditionalGeneration"),
  83. }
  84. _MODELS = {
  85. **_GENERATION_MODELS,
  86. **_EMBEDDING_MODELS,
  87. **_CONDITIONAL_GENERATION_MODELS
  88. }
  89. # Architecture -> type.
  90. # out of tree models
  91. _OOT_MODELS: Dict[str, Type[nn.Module]] = {}
  92. # Models not supported by ROCm.
  93. _ROCM_UNSUPPORTED_MODELS = []
  94. # Models partially supported by ROCm.
  95. # Architecture -> Reason.
  96. _ROCM_SWA_REASON = ("Sliding window attention (SWA) is not yet supported in "
  97. "Triton flash attention. For half-precision SWA support, "
  98. "please use CK flash attention by setting "
  99. "`APHRODITE_USE_TRITON_FLASH_ATTN=0`")
  100. _ROCM_PARTIALLY_SUPPORTED_MODELS: Dict[str, str] = {
  101. "Qwen2ForCausalLM":
  102. _ROCM_SWA_REASON,
  103. "MistralForCausalLM":
  104. _ROCM_SWA_REASON,
  105. "MixtralForCausalLM":
  106. _ROCM_SWA_REASON,
  107. "PaliGemmaForConditionalGeneration":
  108. ("ROCm flash attention does not yet "
  109. "fully support 32-bit precision on PaliGemma"),
  110. "Phi3VForCausalLM":
  111. ("ROCm Triton flash attention may run into compilation errors due to "
  112. "excessive use of shared memory. If this happens, disable Triton FA "
  113. "by setting `APHRODITE_USE_TRITON_FLASH_ATTN=0`")
  114. }
  115. class ModelRegistry:
  116. @staticmethod
  117. @functools.lru_cache(maxsize=128)
  118. def _get_model(model_arch: str):
  119. module_name, model_cls_name = _MODELS[model_arch]
  120. module = importlib.import_module(
  121. f"aphrodite.modeling.models.{module_name}")
  122. return getattr(module, model_cls_name, None)
  123. @staticmethod
  124. def _try_load_model_cls(model_arch: str) -> Optional[Type[nn.Module]]:
  125. if model_arch in _OOT_MODELS:
  126. return _OOT_MODELS[model_arch]
  127. if model_arch not in _MODELS:
  128. return None
  129. if is_hip():
  130. if model_arch in _ROCM_UNSUPPORTED_MODELS:
  131. raise ValueError(
  132. f"Model architecture {model_arch} is not supported by "
  133. "ROCm for now.")
  134. if model_arch in _ROCM_PARTIALLY_SUPPORTED_MODELS:
  135. logger.warning(
  136. f"Model architecture {model_arch} is partially "
  137. "supported by ROCm: "
  138. f"{_ROCM_PARTIALLY_SUPPORTED_MODELS[model_arch]}")
  139. return ModelRegistry._get_model(model_arch)
  140. @staticmethod
  141. def resolve_model_cls(
  142. architectures: List[str]) -> Tuple[Type[nn.Module], str]:
  143. for arch in architectures:
  144. model_cls = ModelRegistry._try_load_model_cls(arch)
  145. if model_cls is not None:
  146. return (model_cls, arch)
  147. raise ValueError(
  148. f"Model architectures {architectures} are not supported for now. "
  149. f"Supported architectures: {ModelRegistry.get_supported_archs()}")
  150. @staticmethod
  151. def get_supported_archs() -> List[str]:
  152. return list(_MODELS.keys())
  153. @staticmethod
  154. def register_model(model_arch: str, model_cls: Type[nn.Module]):
  155. if model_arch in _MODELS:
  156. logger.warning(f"Model architecture {model_arch} is already "
  157. "registered, and will be overwritten by the new "
  158. f"model class {model_cls.__name__}.")
  159. global _OOT_MODELS
  160. _OOT_MODELS[model_arch] = model_cls
  161. @staticmethod
  162. def is_embedding_model(model_arch: str) -> bool:
  163. return model_arch in _EMBEDDING_MODELS
  164. __all__ = [
  165. "ModelRegistry",
  166. ]