config.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. import contextlib
  2. import enum
  3. import json
  4. from pathlib import Path
  5. from typing import Any, Dict, Optional, Type, Union
  6. import huggingface_hub
  7. from huggingface_hub import (file_exists, hf_hub_download,
  8. try_to_load_from_cache)
  9. from loguru import logger
  10. from transformers import GenerationConfig, PretrainedConfig
  11. from transformers.models.auto.image_processing_auto import (
  12. get_image_processor_config)
  13. from transformers.models.auto.modeling_auto import (
  14. MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
  15. from transformers.utils import CONFIG_NAME as HF_CONFIG_NAME
  16. import aphrodite.common.envs as envs
  17. from aphrodite.transformers_utils.configs import (ChatGLMConfig, DbrxConfig,
  18. EAGLEConfig,
  19. InternVLChatConfig,
  20. JAISConfig, MedusaConfig,
  21. MLPSpeculatorConfig,
  22. MPTConfig, RWConfig,
  23. UltravoxConfig)
  24. from aphrodite.transformers_utils.utils import check_gguf_file
  25. APHRODITE_USE_MODELSCOPE = envs.APHRODITE_USE_MODELSCOPE
  26. if APHRODITE_USE_MODELSCOPE:
  27. from modelscope import AutoConfig
  28. else:
  29. from transformers import AutoConfig
  30. MISTRAL_CONFIG_NAME = "params.json"
  31. _CONFIG_REGISTRY: Dict[str, Type[PretrainedConfig]] = {
  32. "chatglm": ChatGLMConfig,
  33. "dbrx": DbrxConfig,
  34. "mpt": MPTConfig,
  35. "RefinedWeb": RWConfig, # For tiiuae/falcon-40b(-instruct)
  36. "RefinedWebModel": RWConfig, # For tiiuae/falcon-7b(-instruct)
  37. "jais": JAISConfig,
  38. "mlp_speculator": MLPSpeculatorConfig,
  39. "medusa": MedusaConfig,
  40. "internvl_chat": InternVLChatConfig,
  41. "ultravox": UltravoxConfig,
  42. "eagle": EAGLEConfig,
  43. }
  44. for name, cls in _CONFIG_REGISTRY.items():
  45. with contextlib.suppress(ValueError):
  46. AutoConfig.register(name, cls)
  47. class ConfigFormat(str, enum.Enum):
  48. AUTO = "auto"
  49. HF = "hf"
  50. MISTRAL = "mistral"
  51. def file_or_path_exists(model: Union[str, Path], config_name, revision,
  52. token) -> bool:
  53. if Path(model).exists():
  54. return (Path(model) / config_name).is_file()
  55. # Offline mode support: Check if config file is cached already
  56. cached_filepath = try_to_load_from_cache(repo_id=model,
  57. filename=config_name,
  58. revision=revision)
  59. if isinstance(cached_filepath, str):
  60. # The config file exists in cache- we can continue trying to load
  61. return True
  62. # NB: file_exists will only check for the existence of the config file on
  63. # hf_hub. This will fail in offline mode.
  64. try:
  65. return file_exists(model, config_name, revision=revision, token=token)
  66. except huggingface_hub.errors.OfflineModeIsEnabled:
  67. # Don't raise in offline mode, all we know is that we don't have this
  68. # file cached.
  69. return False
  70. def get_config(
  71. model: Union[str, Path],
  72. trust_remote_code: bool,
  73. revision: Optional[str] = None,
  74. code_revision: Optional[str] = None,
  75. rope_scaling: Optional[dict] = None,
  76. rope_theta: Optional[float] = None,
  77. config_format: ConfigFormat = ConfigFormat.AUTO,
  78. **kwargs,
  79. ) -> PretrainedConfig:
  80. # Separate model folder from file path for GGUF models
  81. is_gguf = check_gguf_file(model)
  82. if is_gguf:
  83. kwargs["gguf_file"] = Path(model).name
  84. model = Path(model).parent
  85. if config_format == ConfigFormat.AUTO:
  86. if is_gguf or file_or_path_exists(model,
  87. HF_CONFIG_NAME,
  88. revision=revision,
  89. token=kwargs.get("token")):
  90. config_format = ConfigFormat.HF
  91. elif file_or_path_exists(model,
  92. MISTRAL_CONFIG_NAME,
  93. revision=revision,
  94. token=kwargs.get("token")):
  95. config_format = ConfigFormat.MISTRAL
  96. else:
  97. # If we're in offline mode and found no valid config format, then
  98. # raise an offline mode error to indicate to the user that they
  99. # don't have files cached and may need to go online.
  100. # This is conveniently triggered by calling file_exists().
  101. file_exists(model,
  102. HF_CONFIG_NAME,
  103. revision=revision,
  104. token=kwargs.get("token"))
  105. raise ValueError(f"No supported config format found in {model}")
  106. if config_format == ConfigFormat.HF:
  107. config_dict, _ = PretrainedConfig.get_config_dict(
  108. model, revision=revision, code_revision=code_revision, **kwargs)
  109. # Use custom model class if it's in our registry
  110. model_type = config_dict.get("model_type")
  111. if model_type in _CONFIG_REGISTRY:
  112. config_class = _CONFIG_REGISTRY[model_type]
  113. config = config_class.from_pretrained(model,
  114. revision=revision,
  115. code_revision=code_revision)
  116. else:
  117. try:
  118. config = AutoConfig.from_pretrained(
  119. model,
  120. trust_remote_code=trust_remote_code,
  121. revision=revision,
  122. code_revision=code_revision,
  123. **kwargs,
  124. )
  125. except ValueError as e:
  126. if (not trust_remote_code
  127. and "requires you to execute the configuration file"
  128. in str(e)):
  129. err_msg = (
  130. "Failed to load the model config. If the model "
  131. "is a custom model not yet available in the "
  132. "HuggingFace transformers library, consider setting "
  133. "`trust_remote_code=True` in LLM or using the "
  134. "`--trust-remote-code` flag in the CLI.")
  135. raise RuntimeError(err_msg) from e
  136. else:
  137. raise e
  138. elif config_format == ConfigFormat.MISTRAL:
  139. config = load_params_config(model, revision)
  140. else:
  141. raise ValueError(f"Unsupported config format: {config_format}")
  142. # Special architecture mapping check for GGUF models
  143. if is_gguf:
  144. if config.model_type not in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES:
  145. raise RuntimeError(
  146. f"Can't get gguf config for {config.model_type}.")
  147. model_type = MODEL_FOR_CAUSAL_LM_MAPPING_NAMES[config.model_type]
  148. config.update({"architectures": [model_type]})
  149. for key, value in [
  150. ("rope_scaling", rope_scaling),
  151. ("rope_theta", rope_theta),
  152. ]:
  153. if value is not None:
  154. logger.info(
  155. "Updating %s from %r to %r",
  156. key,
  157. getattr(config, key, None),
  158. value,
  159. )
  160. config.update({key: value})
  161. return config
  162. def load_params_config(model, revision) -> PretrainedConfig:
  163. # This function loads a params.json config which
  164. # should be used when loading models in mistral format
  165. config_file_name = "params.json"
  166. config_path = Path(model) / config_file_name
  167. if not config_path.is_file():
  168. config_path = Path(
  169. hf_hub_download(model, config_file_name, revision=revision))
  170. with open(config_path, "r") as file:
  171. config_dict = json.load(file)
  172. config_mapping = {
  173. "dim": "hidden_size",
  174. "norm_eps": "rms_norm_eps",
  175. "n_kv_heads": "num_key_value_heads",
  176. "n_layers": "num_hidden_layers",
  177. "n_heads": "num_attention_heads",
  178. "hidden_dim": "intermediate_size",
  179. }
  180. def recurse_elems(elem: Any):
  181. if isinstance(elem, dict):
  182. config_dict = {}
  183. for key, value in elem.items():
  184. key = config_mapping.get(key, key)
  185. config_dict[key] = recurse_elems(value)
  186. return PretrainedConfig(**config_dict)
  187. else:
  188. return elem
  189. config_dict["model_type"] = config_dict.get("model_type", "transformer")
  190. config_dict["hidden_act"] = config_dict.get("activation", "silu")
  191. config_dict["tie_word_embeddings"] = config_dict.get(
  192. "tie_embeddings", False)
  193. config_dict["max_seq_len"] = config_dict.get("max_seq_len", 128_000)
  194. config_dict["max_position_embeddings"] = config_dict.get(
  195. "max_position_embeddings", 128_000)
  196. if config_dict.get("moe") is not None:
  197. config_dict["architectures"] = ["MixtralForCausalLM"]
  198. else:
  199. config_dict["architectures"] = ["MistralForCausalLM"]
  200. if config_dict.get("vision_encoder") is not None:
  201. multimodal_config = config_dict.pop("vision_encoder")
  202. config_dict = {
  203. "text_config": config_dict,
  204. "vision_config": multimodal_config
  205. }
  206. config_dict["architectures"] = ["PixtralForConditionalGeneration"]
  207. config_dict["model_type"] = "pixtral"
  208. config = recurse_elems(config_dict)
  209. return config
  210. def get_hf_image_processor_config(
  211. model: Union[str, Path],
  212. revision: Optional[str] = None,
  213. **kwargs,
  214. ) -> Dict[str, Any]:
  215. # Separate model folder from file path for GGUF models
  216. if Path(model).is_file() and Path(model).suffix == ".gguf":
  217. model = Path(model).parent
  218. return get_image_processor_config(model, revision=revision, **kwargs)
  219. def get_hf_text_config(config: PretrainedConfig):
  220. """Get the "sub" config relevant to llm for multi modal models.
  221. No op for pure text models.
  222. """
  223. if hasattr(config, "text_config"):
  224. # The code operates under the assumption that text_config should have
  225. # `num_attention_heads` (among others). Assert here to fail early
  226. # if transformers config doesn't align with this assumption.
  227. assert hasattr(config.text_config, "num_attention_heads")
  228. return config.text_config
  229. else:
  230. return config
  231. def try_get_generation_config(
  232. model: str,
  233. trust_remote_code: bool,
  234. revision: Optional[str] = None,
  235. ) -> Optional[GenerationConfig]:
  236. try:
  237. return GenerationConfig.from_pretrained(
  238. model,
  239. revision=revision,
  240. )
  241. except OSError: # Not found
  242. try:
  243. config = get_config(
  244. model,
  245. trust_remote_code=trust_remote_code,
  246. revision=revision,
  247. )
  248. return GenerationConfig.from_model_config(config)
  249. except OSError: # Not found
  250. return None