config.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287
  1. import contextlib
  2. import enum
  3. import json
  4. from pathlib import Path
  5. from typing import Any, Dict, Optional, Type, Union
  6. import huggingface_hub
  7. from huggingface_hub import (file_exists, hf_hub_download,
  8. try_to_load_from_cache)
  9. from loguru import logger
  10. from transformers import GenerationConfig, PretrainedConfig
  11. from transformers.models.auto.modeling_auto import (
  12. MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
  13. from transformers.utils import CONFIG_NAME as HF_CONFIG_NAME
  14. import aphrodite.common.envs as envs
  15. from aphrodite.transformers_utils.configs import (ChatGLMConfig, DbrxConfig,
  16. InternVLChatConfig,
  17. JAISConfig, MedusaConfig,
  18. MLPSpeculatorConfig,
  19. MPTConfig, RWConfig,
  20. UltravoxConfig)
  21. from aphrodite.transformers_utils.utils import check_gguf_file
  22. APHRODITE_USE_MODELSCOPE = envs.APHRODITE_USE_MODELSCOPE
  23. if APHRODITE_USE_MODELSCOPE:
  24. from modelscope import AutoConfig
  25. else:
  26. from transformers import AutoConfig
  27. MISTRAL_CONFIG_NAME = "params.json"
  28. _CONFIG_REGISTRY: Dict[str, Type[PretrainedConfig]] = {
  29. "chatglm": ChatGLMConfig,
  30. "dbrx": DbrxConfig,
  31. "mpt": MPTConfig,
  32. "RefinedWeb": RWConfig, # For tiiuae/falcon-40b(-instruct)
  33. "RefinedWebModel": RWConfig, # For tiiuae/falcon-7b(-instruct)
  34. "jais": JAISConfig,
  35. "mlp_speculator": MLPSpeculatorConfig,
  36. "medusa": MedusaConfig,
  37. "internvl_chat": InternVLChatConfig,
  38. "ultravox": UltravoxConfig,
  39. }
  40. for name, cls in _CONFIG_REGISTRY.items():
  41. with contextlib.suppress(ValueError):
  42. AutoConfig.register(name, cls)
  43. class ConfigFormat(str, enum.Enum):
  44. AUTO = "auto"
  45. HF = "hf"
  46. MISTRAL = "mistral"
  47. def file_or_path_exists(model: Union[str, Path], config_name, revision,
  48. token) -> bool:
  49. if Path(model).exists():
  50. return (Path(model) / config_name).is_file()
  51. # Offline mode support: Check if config file is cached already
  52. cached_filepath = try_to_load_from_cache(repo_id=model,
  53. filename=config_name,
  54. revision=revision)
  55. if isinstance(cached_filepath, str):
  56. # The config file exists in cache- we can continue trying to load
  57. return True
  58. # NB: file_exists will only check for the existence of the config file on
  59. # hf_hub. This will fail in offline mode.
  60. try:
  61. return file_exists(model, config_name, revision=revision, token=token)
  62. except huggingface_hub.errors.OfflineModeIsEnabled:
  63. # Don't raise in offline mode, all we know is that we don't have this
  64. # file cached.
  65. return False
  66. def get_config(
  67. model: Union[str, Path],
  68. trust_remote_code: bool,
  69. revision: Optional[str] = None,
  70. code_revision: Optional[str] = None,
  71. rope_scaling: Optional[dict] = None,
  72. rope_theta: Optional[float] = None,
  73. config_format: ConfigFormat = ConfigFormat.AUTO,
  74. **kwargs,
  75. ) -> PretrainedConfig:
  76. # Separate model folder from file path for GGUF models
  77. is_gguf = check_gguf_file(model)
  78. if is_gguf:
  79. kwargs["gguf_file"] = Path(model).name
  80. model = Path(model).parent
  81. if config_format == ConfigFormat.AUTO:
  82. if is_gguf or file_or_path_exists(model,
  83. HF_CONFIG_NAME,
  84. revision=revision,
  85. token=kwargs.get("token")):
  86. config_format = ConfigFormat.HF
  87. elif file_or_path_exists(model,
  88. MISTRAL_CONFIG_NAME,
  89. revision=revision,
  90. token=kwargs.get("token")):
  91. config_format = ConfigFormat.MISTRAL
  92. else:
  93. # If we're in offline mode and found no valid config format, then
  94. # raise an offline mode error to indicate to the user that they
  95. # don't have files cached and may need to go online.
  96. # This is conveniently triggered by calling file_exists().
  97. file_exists(model,
  98. HF_CONFIG_NAME,
  99. revision=revision,
  100. token=kwargs.get("token"))
  101. raise ValueError(f"No supported config format found in {model}")
  102. if config_format == ConfigFormat.HF:
  103. config_dict, _ = PretrainedConfig.get_config_dict(
  104. model, revision=revision, code_revision=code_revision, **kwargs)
  105. # Store the original rope_scaling if it exists
  106. original_rope_scaling = config_dict.get('rope_scaling')
  107. # Use custom model class if it's in our registry
  108. model_type = config_dict.get("model_type")
  109. if model_type in _CONFIG_REGISTRY:
  110. config_class = _CONFIG_REGISTRY[model_type]
  111. config = config_class.from_pretrained(model,
  112. revision=revision,
  113. code_revision=code_revision)
  114. else:
  115. try:
  116. config = AutoConfig.from_pretrained(
  117. model,
  118. trust_remote_code=trust_remote_code,
  119. revision=revision,
  120. code_revision=code_revision,
  121. **kwargs,
  122. )
  123. except ValueError as e:
  124. if (not trust_remote_code
  125. and "requires you to execute the configuration file"
  126. in str(e)):
  127. err_msg = (
  128. "Failed to load the model config. If the model "
  129. "is a custom model not yet available in the "
  130. "HuggingFace transformers library, consider setting "
  131. "`trust_remote_code=True` in LLM or using the "
  132. "`--trust-remote-code` flag in the CLI.")
  133. raise RuntimeError(err_msg) from e
  134. else:
  135. raise e
  136. # Restore the original rope_scaling if it was modified
  137. if original_rope_scaling and getattr(config, 'rope_scaling', {}).get('type') == 'default':
  138. if 'mrope_section' in original_rope_scaling:
  139. config.rope_scaling = original_rope_scaling
  140. elif config_format == ConfigFormat.MISTRAL:
  141. config = load_params_config(model, revision)
  142. else:
  143. raise ValueError(f"Unsupported config format: {config_format}")
  144. # Special architecture mapping check for GGUF models
  145. if is_gguf:
  146. if config.model_type not in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES:
  147. raise RuntimeError(
  148. f"Can't get gguf config for {config.model_type}.")
  149. model_type = MODEL_FOR_CAUSAL_LM_MAPPING_NAMES[config.model_type]
  150. config.update({"architectures": [model_type]})
  151. for key, value in [
  152. ("rope_scaling", rope_scaling),
  153. ("rope_theta", rope_theta),
  154. ]:
  155. if value is not None:
  156. logger.info(
  157. "Updating %s from %r to %r",
  158. key,
  159. getattr(config, key, None),
  160. value,
  161. )
  162. config.update({key: value})
  163. return config
  164. def load_params_config(model, revision) -> PretrainedConfig:
  165. # This function loads a params.json config which
  166. # should be used when loading models in mistral format
  167. config_file_name = "params.json"
  168. config_path = Path(model) / config_file_name
  169. if not config_path.is_file():
  170. config_path = Path(
  171. hf_hub_download(model, config_file_name, revision=revision))
  172. with open(config_path, "r") as file:
  173. config_dict = json.load(file)
  174. config_mapping = {
  175. "dim": "hidden_size",
  176. "norm_eps": "rms_norm_eps",
  177. "n_kv_heads": "num_key_value_heads",
  178. "n_layers": "num_hidden_layers",
  179. "n_heads": "num_attention_heads",
  180. "hidden_dim": "intermediate_size",
  181. }
  182. def recurse_elems(elem: Any):
  183. if isinstance(elem, dict):
  184. config_dict = {}
  185. for key, value in elem.items():
  186. key = config_mapping.get(key, key)
  187. config_dict[key] = recurse_elems(value)
  188. return PretrainedConfig(**config_dict)
  189. else:
  190. return elem
  191. config_dict["model_type"] = config_dict.get("model_type", "transformer")
  192. config_dict["hidden_act"] = config_dict.get("activation", "silu")
  193. config_dict["tie_word_embeddings"] = config_dict.get(
  194. "tie_embeddings", False)
  195. config_dict["max_seq_len"] = config_dict.get("max_seq_len", 128_000)
  196. config_dict["max_position_embeddings"] = config_dict.get(
  197. "max_position_embeddings", 128_000)
  198. if config_dict.get("moe") is not None:
  199. config_dict["architectures"] = ["MixtralForCausalLM"]
  200. else:
  201. config_dict["architectures"] = ["MistralForCausalLM"]
  202. if config_dict.get("vision_encoder") is not None:
  203. multimodal_config = config_dict.pop("vision_encoder")
  204. config_dict = {
  205. "text_config": config_dict,
  206. "vision_config": multimodal_config
  207. }
  208. config_dict["architectures"] = ["PixtralForConditionalGeneration"]
  209. config_dict["model_type"] = "pixtral"
  210. config = recurse_elems(config_dict)
  211. return config
  212. def get_hf_text_config(config: PretrainedConfig):
  213. """Get the "sub" config relevant to llm for multi modal models.
  214. No op for pure text models.
  215. """
  216. if hasattr(config, "text_config"):
  217. # The code operates under the assumption that text_config should have
  218. # `num_attention_heads` (among others). Assert here to fail early
  219. # if transformers config doesn't align with this assumption.
  220. assert hasattr(config.text_config, "num_attention_heads")
  221. return config.text_config
  222. else:
  223. return config
  224. def try_get_generation_config(
  225. model: str,
  226. trust_remote_code: bool,
  227. revision: Optional[str] = None,
  228. ) -> Optional[GenerationConfig]:
  229. try:
  230. return GenerationConfig.from_pretrained(
  231. model,
  232. revision=revision,
  233. )
  234. except OSError: # Not found
  235. try:
  236. config = get_config(
  237. model,
  238. trust_remote_code=trust_remote_code,
  239. revision=revision,
  240. )
  241. return GenerationConfig.from_model_config(config)
  242. except OSError: # Not found
  243. return None