hf_downloader.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. """Utilities for downloading and initializing model weights."""
  2. import fnmatch
  3. import glob
  4. import json
  5. import os
  6. from collections import defaultdict
  7. from typing import Any, Iterable, Iterator, List, Optional, Tuple
  8. import filelock
  9. import huggingface_hub.constants
  10. import numpy as np
  11. import torch
  12. from huggingface_hub import HfFileSystem, snapshot_download
  13. from loguru import logger
  14. from safetensors.torch import load_file, safe_open, save_file
  15. from tqdm.auto import tqdm
  16. from transformers import PretrainedConfig, AutoModelForCausalLM
  17. from aphrodite.common.config import ModelConfig
  18. from aphrodite.common.gguf import (GGUFReader, get_tensor_name_map,
  19. MODEL_ARCH_NAMES)
  20. from aphrodite.common.logger import get_loading_progress_bar
  21. from aphrodite.modeling.layers.quantization import (QuantizationConfig,
  22. get_quantization_config)
  23. from aphrodite.modeling.layers.quantization.schema import QuantParamSchema
  24. _xdg_cache_home = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
  25. _aphrodite_filelocks_path = os.path.join(_xdg_cache_home, "aphrodite/locks/")
  26. def enable_hf_transfer():
  27. """automatically activates hf_transfer
  28. """
  29. if "HF_HUB_ENABLE_HF_TRANSFER" not in os.environ:
  30. try:
  31. # enable hf hub transfer if available
  32. import hf_transfer # type: ignore # noqa
  33. huggingface_hub.constants.HF_HUB_ENABLE_HF_TRANSFER = True
  34. except ImportError:
  35. pass
  36. enable_hf_transfer()
  37. class Disabledtqdm(tqdm):
  38. def __init__(self, *args, **kwargs):
  39. super().__init__(*args, **kwargs, disable=True)
  40. def get_lock(model_name_or_path: str, cache_dir: Optional[str] = None):
  41. lock_dir = cache_dir if cache_dir is not None else _aphrodite_filelocks_path
  42. os.makedirs(os.path.dirname(lock_dir), exist_ok=True)
  43. lock_file_name = model_name_or_path.replace("/", "-") + ".lock"
  44. lock = filelock.SoftFileLock(os.path.join(lock_dir, lock_file_name))
  45. return lock
  46. def _shared_pointers(tensors):
  47. ptrs = defaultdict(list)
  48. for k, v in tensors.items():
  49. ptrs[v.data_ptr()].append(k)
  50. failing = []
  51. for _, names in ptrs.items():
  52. if len(names) > 1:
  53. failing.append(names)
  54. return failing
  55. def convert_bin_to_safetensor_file(
  56. pt_filename: str,
  57. sf_filename: str,
  58. ) -> None:
  59. loaded = torch.load(pt_filename, map_location="cpu")
  60. if "state_dict" in loaded:
  61. loaded = loaded["state_dict"]
  62. shared = _shared_pointers(loaded)
  63. for shared_weights in shared:
  64. for name in shared_weights[1:]:
  65. loaded.pop(name)
  66. # For tensors to be contiguous
  67. loaded = {k: v.contiguous() for k, v in loaded.items()}
  68. dirname = os.path.dirname(sf_filename)
  69. os.makedirs(dirname, exist_ok=True)
  70. save_file(loaded, sf_filename, metadata={"format": "pt"})
  71. # check file size
  72. sf_size = os.stat(sf_filename).st_size
  73. pt_size = os.stat(pt_filename).st_size
  74. if (sf_size - pt_size) / pt_size > 0.01:
  75. raise RuntimeError(f"""The file size different is more than 1%:
  76. - {sf_filename}: {sf_size}
  77. - {pt_filename}: {pt_size}
  78. """)
  79. # check if the tensors are the same
  80. reloaded = load_file(sf_filename)
  81. for k in loaded:
  82. pt_tensor = loaded[k]
  83. sf_tensor = reloaded[k]
  84. if not torch.equal(pt_tensor, sf_tensor):
  85. raise RuntimeError(f"The output tensors do not match for key {k}")
  86. # TODO: Move this to another place.
  87. def get_quant_config(model_config: ModelConfig) -> QuantizationConfig:
  88. quant_cls = get_quantization_config(model_config.quantization)
  89. # Read the quantization config from the HF model config, if available.
  90. # if the quantization if "gguf", we skip and return quant_cls()
  91. if model_config.quantization in ["exl2", "gguf"]:
  92. return quant_cls()
  93. hf_quant_config = getattr(model_config.hf_config, "quantization_config",
  94. None)
  95. if hf_quant_config is not None:
  96. return quant_cls.from_config(hf_quant_config)
  97. model_name_or_path = model_config.model
  98. is_local = os.path.isdir(model_name_or_path)
  99. if not is_local:
  100. # Download the config files.
  101. with get_lock(model_name_or_path, model_config.download_dir):
  102. hf_folder = snapshot_download(
  103. model_name_or_path,
  104. revision=model_config.revision,
  105. allow_patterns="*.json",
  106. cache_dir=model_config.download_dir,
  107. tqdm_class=Disabledtqdm,
  108. )
  109. else:
  110. hf_folder = model_name_or_path
  111. config_files = glob.glob(os.path.join(hf_folder, "*.json"))
  112. quant_config_files = [
  113. f for f in config_files if any(
  114. f.endswith(x) for x in quant_cls.get_config_filenames())
  115. ]
  116. if len(quant_config_files) == 0:
  117. raise ValueError(
  118. f"Cannot find the config file for {model_config.quantization}")
  119. if len(quant_config_files) > 1:
  120. raise ValueError(
  121. f"Found multiple config files for {model_config.quantization}: "
  122. f"{quant_config_files}")
  123. quant_config_file = quant_config_files[0]
  124. with open(quant_config_file, "r") as f:
  125. config = json.load(f)
  126. return quant_cls.from_config(config)
  127. def prepare_hf_model_weights(
  128. model_name_or_path: str,
  129. cache_dir: Optional[str] = None,
  130. load_format: str = "auto",
  131. fall_back_to_pt: bool = True,
  132. revision: Optional[str] = None,
  133. ) -> Tuple[str, List[str], bool]:
  134. # Download model weights from huggingface.
  135. is_local = os.path.isdir(model_name_or_path)
  136. use_safetensors = False
  137. # Some quantized models use .pt files for storing the weights.
  138. if load_format == "auto":
  139. allow_patterns = ["*.safetensors", "*.bin"]
  140. elif load_format == "safetensors":
  141. use_safetensors = True
  142. allow_patterns = ["*.safetensors"]
  143. elif load_format == "pt":
  144. allow_patterns = ["*.pt"]
  145. elif load_format == "npcache":
  146. allow_patterns = ["*.bin"]
  147. else:
  148. raise ValueError(f"Unknown load_format: {load_format}")
  149. if fall_back_to_pt:
  150. allow_patterns += ["*.pt"]
  151. if not is_local:
  152. # Before we download we look at that is available:
  153. fs = HfFileSystem()
  154. file_list = fs.ls(model_name_or_path, detail=False, revision=revision)
  155. # depending on what is available we download different things
  156. for pattern in allow_patterns:
  157. matching = fnmatch.filter(file_list, pattern)
  158. if len(matching) > 0:
  159. allow_patterns = [pattern]
  160. break
  161. logger.info(f"Using model weights format {allow_patterns}")
  162. # Use file lock to prevent multiple processes from
  163. # downloading the same model weights at the same time.
  164. with get_lock(model_name_or_path, cache_dir):
  165. hf_folder = snapshot_download(
  166. model_name_or_path,
  167. allow_patterns=allow_patterns,
  168. cache_dir=cache_dir,
  169. tqdm_class=Disabledtqdm,
  170. revision=revision,
  171. )
  172. else:
  173. hf_folder = model_name_or_path
  174. hf_weights_files: List[str] = []
  175. for pattern in allow_patterns:
  176. hf_weights_files += glob.glob(os.path.join(hf_folder, pattern))
  177. if len(hf_weights_files) > 0:
  178. if pattern == "*.safetensors":
  179. use_safetensors = True
  180. break
  181. if not use_safetensors:
  182. # Exclude files that are not needed for inference.
  183. # https://github.com/huggingface/transformers/blob/v4.34.0/src/transformers/trainer.py#L227-L233
  184. blacklist = [
  185. "training_args.bin",
  186. "optimizer.bin",
  187. "optimizer.pt",
  188. "scheduler.pt",
  189. "scaler.pt",
  190. "trainer_state.json",
  191. "hidden_states.safetensors", # exllamav2
  192. ]
  193. hf_weights_files = [
  194. f for f in hf_weights_files
  195. if not any(f.endswith(x) for x in blacklist)
  196. ]
  197. if len(hf_weights_files) == 0:
  198. raise RuntimeError(
  199. f"Cannot find any model weights with `{model_name_or_path}`")
  200. return hf_folder, hf_weights_files, use_safetensors
  201. def convert_gguf_to_state_dict(checkpoint, config):
  202. model_type = config.model_type
  203. # hack: ggufs have a different name than transformers
  204. if model_type == "cohere":
  205. model_type = "command-r"
  206. arch = None
  207. for key, value in MODEL_ARCH_NAMES.items():
  208. if value == model_type:
  209. arch = key
  210. break
  211. if arch is None:
  212. raise RuntimeError(f"Unknown model_type: {model_type}")
  213. num_layers = config.num_hidden_layers
  214. name_map = get_tensor_name_map(arch, num_layers)
  215. with torch.device("meta"):
  216. dummy_model = AutoModelForCausalLM.from_config(config)
  217. state_dict = dummy_model.state_dict()
  218. gguf_to_hf_name_map = {}
  219. keys_to_remove = []
  220. for hf_name in state_dict:
  221. name, suffix = hf_name.rsplit(".", 1)
  222. gguf_name = name_map.get_name(name)
  223. if gguf_name:
  224. gguf_to_hf_name_map[f"{gguf_name}.{suffix}"] = hf_name
  225. elif name == "lm_head":
  226. keys_to_remove.append(hf_name)
  227. logger.warning(
  228. f"GGUF tensor name for {hf_name} not found, "
  229. "this is normal if the model uses tie word embeddings.")
  230. else:
  231. logger.warning(
  232. f"GGUF tensor name for {hf_name} in hf state_dict not found.")
  233. for key in keys_to_remove:
  234. state_dict.pop(key)
  235. if os.path.isfile(checkpoint):
  236. results = [GGUFReader(checkpoint)]
  237. elif os.path.isdir(checkpoint):
  238. results = [
  239. GGUFReader(os.path.join(checkpoint, file))
  240. for file in os.listdir(checkpoint)
  241. if os.path.splitext(file)[-1].lower() == ".gguf"
  242. ]
  243. else:
  244. raise RuntimeError(
  245. f"Cannot find any model weights with `{checkpoint}`")
  246. with get_loading_progress_bar() as progress:
  247. task = progress.add_task(
  248. "[cyan]Converting GGUF tensors to PyTorch...",
  249. total=sum([len(result.tensors) for result in results]),
  250. )
  251. for result in results:
  252. for ts in result.tensors:
  253. try:
  254. hf_name = gguf_to_hf_name_map[ts.name]
  255. except KeyError:
  256. logger.warning(
  257. f"hf tensor name for {ts.name} in GGUF not found.")
  258. continue
  259. data = torch.tensor(ts.data)
  260. if state_dict[hf_name].dim() == 2:
  261. data = data.view(state_dict[hf_name].shape[0], -1)
  262. state_dict[hf_name] = data
  263. weight_type = torch.tensor(int(ts.tensor_type),
  264. dtype=torch.int)
  265. if weight_type > 1:
  266. state_dict[hf_name.replace("weight",
  267. "weight_type")] = weight_type
  268. progress.update(task, advance=1)
  269. return state_dict
  270. def hf_model_weights_iterator(
  271. model_name_or_path: str,
  272. cache_dir: Optional[str] = None,
  273. load_format: str = "auto",
  274. revision: Optional[str] = None,
  275. config: Optional[PretrainedConfig] = None,
  276. fall_back_to_pt: Optional[bool] = True,
  277. ) -> Iterator[Tuple[str, torch.Tensor]]:
  278. if model_name_or_path.endswith("gguf"):
  279. for name, param in convert_gguf_to_state_dict(model_name_or_path,
  280. config).items():
  281. yield name, param
  282. return
  283. hf_folder, hf_weights_files, use_safetensors = prepare_hf_model_weights(
  284. model_name_or_path,
  285. cache_dir=cache_dir,
  286. load_format=load_format,
  287. fall_back_to_pt=fall_back_to_pt,
  288. revision=revision,
  289. )
  290. if load_format == "npcache":
  291. # Currently np_cache only support *.bin checkpoints
  292. assert use_safetensors is False
  293. # Convert the model weights from torch tensors to numpy arrays for
  294. # faster loading.
  295. np_folder = os.path.join(hf_folder, "np")
  296. os.makedirs(np_folder, exist_ok=True)
  297. weight_names_file = os.path.join(np_folder, "weight_names.json")
  298. # Use file lock to prevent multiple processes from
  299. # dumping the same model weights to numpy at the same time.
  300. with get_lock(model_name_or_path, cache_dir):
  301. if not os.path.exists(weight_names_file):
  302. weight_names = []
  303. for bin_file in hf_weights_files:
  304. state = torch.load(bin_file, map_location="cpu")
  305. for name, param in state.items():
  306. param_path = os.path.join(np_folder, name)
  307. with open(param_path, "wb") as f:
  308. np.save(f, param.cpu().detach().numpy())
  309. weight_names.append(name)
  310. with open(weight_names_file, "w") as f:
  311. json.dump(weight_names, f)
  312. with open(weight_names_file, "r") as f:
  313. weight_names = json.load(f)
  314. for name in weight_names:
  315. param_path = os.path.join(np_folder, name)
  316. with open(param_path, "rb") as f:
  317. param = np.load(f)
  318. yield name, torch.from_numpy(param)
  319. elif use_safetensors:
  320. for st_file in hf_weights_files:
  321. with safe_open(st_file, framework="pt") as f:
  322. for name in f.keys(): # noqa: SIM118
  323. param = f.get_tensor(name)
  324. yield name, param
  325. else:
  326. for bin_file in hf_weights_files:
  327. state = torch.load(bin_file, map_location="cpu")
  328. for name, param in state.items():
  329. yield name, param
  330. del state
  331. torch.cuda.empty_cache()
  332. def kv_cache_scales_loader(
  333. filename: str, tp_rank: int, tp_size: int, num_hidden_layers: int,
  334. model_type: Optional[str]) -> Iterable[Tuple[int, float]]:
  335. """
  336. A simple utility to read in KV cache scaling factors that have been
  337. previously serialized to disk. Used by the model to populate the appropriate
  338. KV cache scaling factors. The serialization should represent a dictionary
  339. whose keys are the TP ranks and values are another dictionary mapping layers
  340. to their KV cache scaling factors.
  341. Keep this function in sync with the output of examples/fp8/extract_scales.py
  342. """
  343. try:
  344. with open(filename) as f:
  345. context = {
  346. "model_type": model_type,
  347. "num_hidden_layers": num_hidden_layers,
  348. "tp_rank": tp_rank,
  349. "tp_size": tp_size,
  350. }
  351. schema_dct = json.load(f)
  352. schema = QuantParamSchema.model_validate(schema_dct,
  353. context=context)
  354. layer_scales_map = schema.kv_cache.scaling_factor[tp_rank]
  355. return layer_scales_map.items()
  356. except FileNotFoundError:
  357. logger.error(f"File or directory '{filename}' not found.")
  358. except json.JSONDecodeError:
  359. logger.error(f"Error decoding JSON in file '{filename}'.")
  360. except Exception as e:
  361. logger.error(f"An error occurred while reading '{filename}': {e}")
  362. # This section is reached if and only if any of the excepts are hit
  363. # Return an empty iterable (list) => no KV cache scales are loaded
  364. # which ultimately defaults to 1.0 scales
  365. logger.warning("Defaulting to KV cache scaling factors = 1.0 "
  366. f"for all layers in TP rank {tp_rank} "
  367. "as an error occurred during loading.")
  368. return []
  369. def convert_pyslice_to_tensor(x: Any) -> torch.Tensor:
  370. """convert PySafeSlice object from safetensors to torch.Tensor
  371. PySafeSlice object supports indexing, which is done before loading the
  372. actual tensor and can reduce the amount of memory being read into the
  373. memory. However, it does not support more advanced functionalities
  374. like `.view()` or `.t()`. Therefore, if we need to modify the loaded
  375. tensor with these more complicated operators, we need to convert to
  376. tensor first.
  377. """
  378. if not isinstance(x, torch.Tensor):
  379. x = x[:]
  380. return x
  381. def default_weight_loader(param: torch.Tensor,
  382. loaded_weight: torch.Tensor) -> None:
  383. """Default weight loader."""
  384. if isinstance(param, torch.nn.parameter.UninitializedParameter):
  385. param.materialize(loaded_weight.shape, dtype=loaded_weight.dtype)
  386. assert param.size() == loaded_weight.size()
  387. param.data.copy_(loaded_weight)
  388. def initialize_dummy_weights(
  389. model: torch.nn.Module,
  390. low: float = -1e-3,
  391. high: float = 1e-3,
  392. ) -> None:
  393. """Initialize model weights with random values.
  394. The model weights must be randomly initialized for accurate performance
  395. measurements. Additionally, the model weights should not cause NaNs in the
  396. forward pass. We empirically found that initializing the weights with
  397. values between -1e-3 and 1e-3 works well for most models.
  398. """
  399. for param in model.state_dict().values():
  400. if torch.is_floating_point(param):
  401. param.data.uniform_(low, high)