config.py 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277
  1. import contextlib
  2. import enum
  3. import json
  4. import os
  5. from pathlib import Path
  6. from typing import Any, Dict, Optional, Type, Union
  7. import huggingface_hub
  8. from huggingface_hub import (file_exists, hf_hub_download,
  9. try_to_load_from_cache)
  10. from loguru import logger
  11. from transformers import GenerationConfig, PretrainedConfig
  12. from transformers.models.auto.modeling_auto import (
  13. MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
  14. from transformers.utils import CONFIG_NAME as HF_CONFIG_NAME
  15. from aphrodite.transformers_utils.configs import (ChatGLMConfig, DbrxConfig,
  16. InternVLChatConfig,
  17. JAISConfig, MedusaConfig,
  18. MLPSpeculatorConfig,
  19. MPTConfig, RWConfig)
  20. from aphrodite.transformers_utils.utils import check_gguf_file
  21. APHRODITE_USE_MODELSCOPE = os.getenv("APHRODITE_USE_MODELSCOPE", "0") == "1"
  22. if APHRODITE_USE_MODELSCOPE:
  23. from modelscope import AutoConfig
  24. else:
  25. from transformers import AutoConfig
  26. MISTRAL_CONFIG_NAME = "params.json"
  27. _CONFIG_REGISTRY: Dict[str, Type[PretrainedConfig]] = {
  28. "chatglm": ChatGLMConfig,
  29. "dbrx": DbrxConfig,
  30. "mpt": MPTConfig,
  31. "RefinedWeb": RWConfig, # For tiiuae/falcon-40b(-instruct)
  32. "RefinedWebModel": RWConfig, # For tiiuae/falcon-7b(-instruct)
  33. "jais": JAISConfig,
  34. "mlp_speculator": MLPSpeculatorConfig,
  35. "medusa": MedusaConfig,
  36. "internvl_chat": InternVLChatConfig,
  37. }
  38. for name, cls in _CONFIG_REGISTRY.items():
  39. with contextlib.suppress(ValueError):
  40. AutoConfig.register(name, cls)
  41. class ConfigFormat(str, enum.Enum):
  42. AUTO = "auto"
  43. HF = "hf"
  44. MISTRAL = "mistral"
  45. def file_or_path_exists(model: Union[str, Path], config_name, revision,
  46. token) -> bool:
  47. if Path(model).exists():
  48. return (Path(model) / config_name).is_file()
  49. # Offline mode support: Check if config file is cached already
  50. cached_filepath = try_to_load_from_cache(repo_id=model,
  51. filename=config_name,
  52. revision=revision)
  53. if isinstance(cached_filepath, str):
  54. # The config file exists in cache- we can continue trying to load
  55. return True
  56. # NB: file_exists will only check for the existence of the config file on
  57. # hf_hub. This will fail in offline mode.
  58. try:
  59. return file_exists(model, config_name, revision=revision, token=token)
  60. except huggingface_hub.errors.OfflineModeIsEnabled:
  61. # Don't raise in offline mode, all we know is that we don't have this
  62. # file cached.
  63. return False
  64. def get_config(
  65. model: Union[str, Path],
  66. trust_remote_code: bool,
  67. revision: Optional[str] = None,
  68. code_revision: Optional[str] = None,
  69. rope_scaling: Optional[dict] = None,
  70. rope_theta: Optional[float] = None,
  71. config_format: ConfigFormat = ConfigFormat.AUTO,
  72. **kwargs,
  73. ) -> PretrainedConfig:
  74. # Separate model folder from file path for GGUF models
  75. is_gguf = check_gguf_file(model)
  76. if is_gguf:
  77. kwargs["gguf_file"] = Path(model).name
  78. model = Path(model).parent
  79. if config_format == ConfigFormat.AUTO:
  80. if is_gguf or file_or_path_exists(model,
  81. HF_CONFIG_NAME,
  82. revision=revision,
  83. token=kwargs.get("token")):
  84. config_format = ConfigFormat.HF
  85. elif file_or_path_exists(model,
  86. MISTRAL_CONFIG_NAME,
  87. revision=revision,
  88. token=kwargs.get("token")):
  89. config_format = ConfigFormat.MISTRAL
  90. else:
  91. # If we're in offline mode and found no valid config format, then
  92. # raise an offline mode error to indicate to the user that they
  93. # don't have files cached and may need to go online.
  94. # This is conveniently triggered by calling file_exists().
  95. file_exists(model,
  96. HF_CONFIG_NAME,
  97. revision=revision,
  98. token=kwargs.get("token"))
  99. raise ValueError(f"No supported config format found in {model}")
  100. if config_format == ConfigFormat.HF:
  101. config_dict, _ = PretrainedConfig.get_config_dict(
  102. model, revision=revision, code_revision=code_revision, **kwargs)
  103. # Use custom model class if it's in our registry
  104. model_type = config_dict.get("model_type")
  105. if model_type in _CONFIG_REGISTRY:
  106. config_class = _CONFIG_REGISTRY[model_type]
  107. config = config_class.from_pretrained(model,
  108. revision=revision,
  109. code_revision=code_revision)
  110. else:
  111. try:
  112. config = AutoConfig.from_pretrained(
  113. model,
  114. trust_remote_code=trust_remote_code,
  115. revision=revision,
  116. code_revision=code_revision,
  117. **kwargs,
  118. )
  119. except ValueError as e:
  120. if (not trust_remote_code
  121. and "requires you to execute the configuration file"
  122. in str(e)):
  123. err_msg = (
  124. "Failed to load the model config. If the model "
  125. "is a custom model not yet available in the "
  126. "HuggingFace transformers library, consider setting "
  127. "`trust_remote_code=True` in LLM or using the "
  128. "`--trust-remote-code` flag in the CLI.")
  129. raise RuntimeError(err_msg) from e
  130. else:
  131. raise e
  132. elif config_format == ConfigFormat.MISTRAL:
  133. config = load_params_config(model, revision)
  134. else:
  135. raise ValueError(f"Unsupported config format: {config_format}")
  136. # Special architecture mapping check for GGUF models
  137. if is_gguf:
  138. if config.model_type not in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES:
  139. raise RuntimeError(
  140. f"Can't get gguf config for {config.model_type}.")
  141. model_type = MODEL_FOR_CAUSAL_LM_MAPPING_NAMES[config.model_type]
  142. config.update({"architectures": [model_type]})
  143. for key, value in [
  144. ("rope_scaling", rope_scaling),
  145. ("rope_theta", rope_theta),
  146. ]:
  147. if value is not None:
  148. logger.info(
  149. "Updating %s from %r to %r",
  150. key,
  151. getattr(config, key, None),
  152. value,
  153. )
  154. config.update({key: value})
  155. return config
  156. def load_params_config(model, revision) -> PretrainedConfig:
  157. # This function loads a params.json config which
  158. # should be used when loading models in mistral format
  159. config_file_name = "params.json"
  160. config_path = Path(model) / config_file_name
  161. if not config_path.is_file():
  162. config_path = Path(
  163. hf_hub_download(model, config_file_name, revision=revision))
  164. with open(config_path, "r") as file:
  165. config_dict = json.load(file)
  166. config_mapping = {
  167. "dim": "hidden_size",
  168. "norm_eps": "rms_norm_eps",
  169. "n_kv_heads": "num_key_value_heads",
  170. "n_layers": "num_hidden_layers",
  171. "n_heads": "num_attention_heads",
  172. "hidden_dim": "intermediate_size",
  173. }
  174. def recurse_elems(elem: Any):
  175. if isinstance(elem, dict):
  176. config_dict = {}
  177. for key, value in elem.items():
  178. key = config_mapping.get(key, key)
  179. config_dict[key] = recurse_elems(value)
  180. return PretrainedConfig(**config_dict)
  181. else:
  182. return elem
  183. config_dict["model_type"] = config_dict.get("model_type", "transformer")
  184. config_dict["hidden_act"] = config_dict.get("activation", "silu")
  185. config_dict["tie_word_embeddings"] = config_dict.get(
  186. "tie_embeddings", False)
  187. config_dict["max_seq_len"] = config_dict.get("max_seq_len", 128_000)
  188. config_dict["max_position_embeddings"] = config_dict.get(
  189. "max_position_embeddings", 128_000)
  190. if config_dict.get("moe") is not None:
  191. config_dict["architectures"] = ["MixtralForCausalLM"]
  192. else:
  193. config_dict["architectures"] = ["MistralForCausalLM"]
  194. if config_dict.get("vision_encoder") is not None:
  195. multimodal_config = config_dict.pop("vision_encoder")
  196. config_dict = {
  197. "text_config": config_dict,
  198. "vision_config": multimodal_config
  199. }
  200. config_dict["architectures"] = ["PixtralForConditionalGeneration"]
  201. config_dict["model_type"] = "pixtral"
  202. config = recurse_elems(config_dict)
  203. return config
  204. def get_hf_text_config(config: PretrainedConfig):
  205. """Get the "sub" config relevant to llm for multi modal models.
  206. No op for pure text models.
  207. """
  208. if hasattr(config, "text_config"):
  209. # The code operates under the assumption that text_config should have
  210. # `num_attention_heads` (among others). Assert here to fail early
  211. # if transformers config doesn't align with this assumption.
  212. assert hasattr(config.text_config, "num_attention_heads")
  213. return config.text_config
  214. else:
  215. return config
  216. def try_get_generation_config(
  217. model: str,
  218. trust_remote_code: bool,
  219. revision: Optional[str] = None,
  220. ) -> Optional[GenerationConfig]:
  221. try:
  222. return GenerationConfig.from_pretrained(
  223. model,
  224. revision=revision,
  225. )
  226. except OSError: # Not found
  227. try:
  228. config = get_config(
  229. model,
  230. trust_remote_code=trust_remote_code,
  231. revision=revision,
  232. )
  233. return GenerationConfig.from_model_config(config)
  234. except OSError: # Not found
  235. return None