1
0

__init__.py 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135
  1. import importlib
  2. from typing import Dict, List, Optional, Type
  3. import torch.nn as nn
  4. from loguru import logger
  5. from aphrodite.common.utils import is_hip
  6. # Architecture -> (module, class).
  7. _GENERATION_MODELS = {
  8. "AquilaModel": ("llama", "LlamaForCausalLM"),
  9. "AquilaForCausalLM": ("llama", "LlamaForCausalLM"), # AquilaChat2
  10. "BaiChuanForCausalLM": ("baichuan", "BaiChuanForCausalLM"), # baichuan-7b
  11. "BaichuanForCausalLM": ("baichuan", "BaichuanForCausalLM"), # baichuan-13b
  12. "BloomForCausalLM": ("bloom", "BloomForCausalLM"),
  13. "ChatGLMModel": ("chatglm", "ChatGLMForCausalLM"),
  14. "ChatGLMForConditionalGeneration": ("chatglm", "ChatGLMForCausalLM"),
  15. "CohereForCausalLM": ("commandr", "CohereForCausalLM"),
  16. "DbrxForCausalLM": ("dbrx", "DbrxForCausalLM"),
  17. "DeciLMForCausalLM": ("decilm", "DeciLMForCausalLM"),
  18. "DeepseekForCausalLM": ("deepseek", "DeepseekForCausalLM"),
  19. "DeepseekV2ForCausalLM": ("deepseek_v2", "DeepseekV2ForCausalLM"),
  20. "FalconForCausalLM": ("falcon", "FalconForCausalLM"),
  21. "GemmaForCausalLM": ("gemma", "GemmaForCausalLM"),
  22. "GPT2LMHeadModel": ("gpt2", "GPT2LMHeadModel"),
  23. "GPTBigCodeForCausalLM": ("gpt_bigcode", "GPTBigCodeForCausalLM"),
  24. "GPTJForCausalLM": ("gpt_j", "GPTJForCausalLM"),
  25. "GPTNeoXForCausalLM": ("gpt_neox", "GPTNeoXForCausalLM"),
  26. "InternLMForCausalLM": ("llama", "LlamaForCausalLM"),
  27. "InternLM2ForCausalLM": ("internlm2", "InternLM2ForCausalLM"),
  28. "JAISLMHeadModel": ("jais", "JAISLMHeadModel"),
  29. "LlamaForCausalLM": ("llama", "LlamaForCausalLM"),
  30. "LlavaForConditionalGeneration":
  31. ("llava", "LlavaForConditionalGeneration"),
  32. "LlavaNextForConditionalGeneration":
  33. ("llava_next", "LlavaNextForConditionalGeneration"),
  34. # For decapoda-research/llama-*
  35. "LLaMAForCausalLM": ("llama", "LlamaForCausalLM"),
  36. "MistralForCausalLM": ("llama", "LlamaForCausalLM"),
  37. "MixtralForCausalLM": ("mixtral", "MixtralForCausalLM"),
  38. "QuantMixtralForCausalLM": ("mixtral_quant", "MixtralForCausalLM"),
  39. # transformers's mpt class has lower case
  40. "MptForCausalLM": ("mpt", "MPTForCausalLM"),
  41. "MPTForCausalLM": ("mpt", "MPTForCausalLM"),
  42. "MiniCPMForCausalLM": ("minicpm", "MiniCPMForCausalLM"),
  43. "OlmoForCausalLM": ("olmo", "OlmoForCausalLM"),
  44. "OPTForCausalLM": ("opt", "OPTForCausalLM"),
  45. "OrionForCausalLM": ("orion", "OrionForCausalLM"),
  46. "PhiForCausalLM": ("phi", "PhiForCausalLM"),
  47. "Phi3ForCausalLM": ("llama", "LlamaForCausalLM"),
  48. "Phi3VForCausalLM": ("phi3v", "Phi3VForCausalLM"),
  49. "QWenLMHeadModel": ("qwen", "QWenLMHeadModel"),
  50. "Qwen2ForCausalLM": ("qwen2", "Qwen2ForCausalLM"),
  51. "Qwen2MoeForCausalLM": ("qwen2_moe", "Qwen2MoeForCausalLM"),
  52. "RWForCausalLM": ("falcon", "FalconForCausalLM"),
  53. "StableLMEpochForCausalLM": ("stablelm", "StablelmForCausalLM"),
  54. "StableLmForCausalLM": ("stablelm", "StablelmForCausalLM"),
  55. "Starcoder2ForCausalLM": ("starcoder2", "Starcoder2ForCausalLM"),
  56. "ArcticForCausalLM": ("arctic", "ArcticForCausalLM"),
  57. "XverseForCausalLM": ("xverse", "XverseForCausalLM"),
  58. "Phi3SmallForCausalLM": ("phi3_small", "Phi3SmallForCausalLM"),
  59. "MLPSpeculatorPreTrainedModel": ("mlp_speculator", "MLPSpeculator"),
  60. "JambaForCausalLM": ("jamba", "JambaForCausalLM")
  61. }
  62. _EMBEDDING_MODELS = {
  63. "MistralModel": ("llama_embedding", "LlamaEmbeddingModel"),
  64. }
  65. _MODELS = {**_GENERATION_MODELS, **_EMBEDDING_MODELS}
  66. # Architecture -> type.
  67. # out of tree models
  68. _OOT_MODELS: Dict[str, Type[nn.Module]] = {}
  69. # Models not supported by ROCm.
  70. _ROCM_UNSUPPORTED_MODELS = []
  71. # Models partially supported by ROCm.
  72. # Architecture -> Reason.
  73. _ROCM_PARTIALLY_SUPPORTED_MODELS = {
  74. "Qwen2ForCausalLM":
  75. "Sliding window attention is not yet supported in ROCm's flash attention",
  76. "MistralForCausalLM":
  77. "Sliding window attention is not yet supported in ROCm's flash attention",
  78. "MixtralForCausalLM":
  79. "Sliding window attention is not yet supported in ROCm's flash attention",
  80. }
  81. class ModelRegistry:
  82. @staticmethod
  83. def load_model_cls(model_arch: str) -> Optional[Type[nn.Module]]:
  84. if model_arch in _OOT_MODELS:
  85. return _OOT_MODELS[model_arch]
  86. if model_arch not in _MODELS:
  87. return None
  88. if is_hip():
  89. if model_arch in _ROCM_UNSUPPORTED_MODELS:
  90. raise ValueError(
  91. f"Model architecture {model_arch} is not supported by "
  92. "ROCm for now.")
  93. if model_arch in _ROCM_PARTIALLY_SUPPORTED_MODELS:
  94. logger.warning(
  95. f"Model architecture {model_arch} is partially "
  96. "supported by ROCm: "
  97. f"{_ROCM_PARTIALLY_SUPPORTED_MODELS[model_arch]}")
  98. module_name, model_cls_name = _MODELS[model_arch]
  99. module = importlib.import_module(
  100. f"aphrodite.modeling.models.{module_name}")
  101. return getattr(module, model_cls_name, None)
  102. @staticmethod
  103. def get_supported_archs() -> List[str]:
  104. return list(_MODELS.keys())
  105. @staticmethod
  106. def register_model(model_arch: str, model_cls: Type[nn.Module]):
  107. if model_arch in _MODELS:
  108. logger.warning(f"Model architecture {model_arch} is already "
  109. "registered, and will be overwritten by the new "
  110. f"model class {model_cls.__name__}.")
  111. global _OOT_MODELS
  112. _OOT_MODELS[model_arch] = model_cls
  113. @staticmethod
  114. def is_embedding_model(model_arch: str) -> bool:
  115. return model_arch in _EMBEDDING_MODELS
  116. __all__ = [
  117. "ModelRegistry",
  118. ]