__init__.py 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. import importlib
  2. from typing import Dict, List, Optional, Type
  3. import torch.nn as nn
  4. from loguru import logger
  5. from aphrodite.common.utils import is_hip
  6. # Architecture -> (module, class).
  7. _GENERATION_MODELS = {
  8. "AquilaModel": ("llama", "LlamaForCausalLM"),
  9. "AquilaForCausalLM": ("llama", "LlamaForCausalLM"), # AquilaChat2
  10. "BaiChuanForCausalLM": ("baichuan", "BaiChuanForCausalLM"), # baichuan-7b
  11. "BaichuanForCausalLM": ("baichuan", "BaichuanForCausalLM"), # baichuan-13b
  12. "BloomForCausalLM": ("bloom", "BloomForCausalLM"),
  13. "ChatGLMModel": ("chatglm", "ChatGLMForCausalLM"),
  14. "ChatGLMForConditionalGeneration": ("chatglm", "ChatGLMForCausalLM"),
  15. "CohereForCausalLM": ("commandr", "CohereForCausalLM"),
  16. "DbrxForCausalLM": ("dbrx", "DbrxForCausalLM"),
  17. "DeciLMForCausalLM": ("decilm", "DeciLMForCausalLM"),
  18. "DeepseekForCausalLM": ("deepseek", "DeepseekForCausalLM"),
  19. "DeepseekV2ForCausalLM": ("deepseek_v2", "DeepseekV2ForCausalLM"),
  20. "FalconForCausalLM": ("falcon", "FalconForCausalLM"),
  21. "GemmaForCausalLM": ("gemma", "GemmaForCausalLM"),
  22. "GPT2LMHeadModel": ("gpt2", "GPT2LMHeadModel"),
  23. "GPTBigCodeForCausalLM": ("gpt_bigcode", "GPTBigCodeForCausalLM"),
  24. "GPTJForCausalLM": ("gpt_j", "GPTJForCausalLM"),
  25. "GPTNeoXForCausalLM": ("gpt_neox", "GPTNeoXForCausalLM"),
  26. "InternLMForCausalLM": ("llama", "LlamaForCausalLM"),
  27. "InternLM2ForCausalLM": ("internlm2", "InternLM2ForCausalLM"),
  28. "JAISLMHeadModel": ("jais", "JAISLMHeadModel"),
  29. "LlamaForCausalLM": ("llama", "LlamaForCausalLM"),
  30. "LlavaForConditionalGeneration":
  31. ("llava", "LlavaForConditionalGeneration"),
  32. "LlavaNextForConditionalGeneration":
  33. ("llava_next", "LlavaNextForConditionalGeneration"),
  34. # For decapoda-research/llama-*
  35. "LLaMAForCausalLM": ("llama", "LlamaForCausalLM"),
  36. "MistralForCausalLM": ("llama", "LlamaForCausalLM"),
  37. "MixtralForCausalLM": ("mixtral", "MixtralForCausalLM"),
  38. "QuantMixtralForCausalLM": ("mixtral_quant", "MixtralForCausalLM"),
  39. # transformers's mpt class has lower case
  40. "MptForCausalLM": ("mpt", "MPTForCausalLM"),
  41. "MPTForCausalLM": ("mpt", "MPTForCausalLM"),
  42. "MiniCPMForCausalLM": ("minicpm", "MiniCPMForCausalLM"),
  43. "OlmoForCausalLM": ("olmo", "OlmoForCausalLM"),
  44. "OPTForCausalLM": ("opt", "OPTForCausalLM"),
  45. "OrionForCausalLM": ("orion", "OrionForCausalLM"),
  46. "PhiForCausalLM": ("phi", "PhiForCausalLM"),
  47. "Phi3ForCausalLM": ("llama", "LlamaForCausalLM"),
  48. "Phi3VForCausalLM": ("phi3v", "Phi3VForCausalLM"),
  49. "QWenLMHeadModel": ("qwen", "QWenLMHeadModel"),
  50. "Qwen2ForCausalLM": ("qwen2", "Qwen2ForCausalLM"),
  51. "Qwen2MoeForCausalLM": ("qwen2_moe", "Qwen2MoeForCausalLM"),
  52. "RWForCausalLM": ("falcon", "FalconForCausalLM"),
  53. "StableLMEpochForCausalLM": ("stablelm", "StablelmForCausalLM"),
  54. "StableLmForCausalLM": ("stablelm", "StablelmForCausalLM"),
  55. "Starcoder2ForCausalLM": ("starcoder2", "Starcoder2ForCausalLM"),
  56. "ArcticForCausalLM": ("arctic", "ArcticForCausalLM"),
  57. "XverseForCausalLM": ("xverse", "XverseForCausalLM"),
  58. "Phi3SmallForCausalLM": ("phi3_small", "Phi3SmallForCausalLM"),
  59. "MLPSpeculatorPreTrainedModel": ("mlp_speculator", "MLPSpeculator"),
  60. }
  61. _EMBEDDING_MODELS = {
  62. "MistralModel": ("llama_embedding", "LlamaEmbeddingModel"),
  63. }
  64. _MODELS = {**_GENERATION_MODELS, **_EMBEDDING_MODELS}
  65. # Architecture -> type.
  66. # out of tree models
  67. _OOT_MODELS: Dict[str, Type[nn.Module]] = {}
  68. # Models not supported by ROCm.
  69. _ROCM_UNSUPPORTED_MODELS = []
  70. # Models partially supported by ROCm.
  71. # Architecture -> Reason.
  72. _ROCM_PARTIALLY_SUPPORTED_MODELS = {
  73. "Qwen2ForCausalLM":
  74. "Sliding window attention is not yet supported in ROCm's flash attention",
  75. "MistralForCausalLM":
  76. "Sliding window attention is not yet supported in ROCm's flash attention",
  77. "MixtralForCausalLM":
  78. "Sliding window attention is not yet supported in ROCm's flash attention",
  79. }
  80. class ModelRegistry:
  81. @staticmethod
  82. def load_model_cls(model_arch: str) -> Optional[Type[nn.Module]]:
  83. if model_arch in _OOT_MODELS:
  84. return _OOT_MODELS[model_arch]
  85. if model_arch not in _MODELS:
  86. return None
  87. if is_hip():
  88. if model_arch in _ROCM_UNSUPPORTED_MODELS:
  89. raise ValueError(
  90. f"Model architecture {model_arch} is not supported by "
  91. "ROCm for now.")
  92. if model_arch in _ROCM_PARTIALLY_SUPPORTED_MODELS:
  93. logger.warning(
  94. f"Model architecture {model_arch} is partially "
  95. "supported by ROCm: "
  96. f"{_ROCM_PARTIALLY_SUPPORTED_MODELS[model_arch]}")
  97. module_name, model_cls_name = _MODELS[model_arch]
  98. module = importlib.import_module(
  99. f"aphrodite.modeling.models.{module_name}")
  100. return getattr(module, model_cls_name, None)
  101. @staticmethod
  102. def get_supported_archs() -> List[str]:
  103. return list(_MODELS.keys())
  104. @staticmethod
  105. def register_model(model_arch: str, model_cls: Type[nn.Module]):
  106. if model_arch in _MODELS:
  107. logger.warning(f"Model architecture {model_arch} is already "
  108. "registered, and will be overwritten by the new "
  109. f"model class {model_cls.__name__}.")
  110. global _OOT_MODELS
  111. _OOT_MODELS[model_arch] = model_cls
  112. @staticmethod
  113. def is_embedding_model(model_arch: str) -> bool:
  114. return model_arch in _EMBEDDING_MODELS
  115. __all__ = [
  116. "ModelRegistry",
  117. ]