config.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397
  1. from typing import Optional
  2. import torch
  3. from transformers import PretrainedConfig
  4. from transformers.utils.quantization_config import QuantizationMethod
  5. from aphrodite.common.logger import init_logger
  6. from aphrodite.transformers_utils.config import get_config
  7. from aphrodite.common.utils import get_cpu_memory
  8. from math import exp, log
  9. logger = init_logger(__name__)
  10. _GB = 1 << 30
  11. class ModelConfig:
  12. """Configuration for the model.
  13. Args:
  14. model: Name or path of the huggingface model to use.
  15. tokenizer: Name or path of the huggingface tokenizer to use.
  16. tokenizer_mode: Tokenizer mode. "auto" will use the fast tokenizer if
  17. available, and "slow" will always use the slow tokenizer.
  18. trust_remote_code: Trust remote code (e.g., from HuggingFace) when
  19. downloading the model and tokenizer.
  20. download_dir: Directory to download and load the weights, default to the
  21. default cache directory of huggingface.
  22. load_format: The format of the model weights to load:
  23. "auto" will try to load the weights in the safetensors format and
  24. fall back to the pytorch bin format if safetensors format is
  25. not available.
  26. "pt" will load the weights in the pytorch bin format.
  27. "safetensors" will load the weights in the safetensors format.
  28. "npcache" will load the weights in pytorch format and store
  29. a numpy cache to speed up the loading.
  30. "dummy" will initialize the weights with random values, which is
  31. mainly for profiling.
  32. dtype: Data type for model weights and activations. The "auto" option
  33. will use FP16 precision for FP32 and FP16 models, and BF16 precision
  34. for BF16 models.
  35. seed: Random seed for reproducibility.
  36. revision: The specific model version to use. It can be a branch name,
  37. a tag name, or a commit id. If unspecified, will use the default
  38. version.
  39. max_model_len: Maximum length of a sequence (including prompt and
  40. output). If None, will be derived from the model.
  41. quantization: Quantization method that was used to quantize the model
  42. weights. If None, we assume the model weights are not quantized.
  43. """
  44. def __init__(
  45. self,
  46. model: str,
  47. tokenizer: str,
  48. tokenizer_mode: str,
  49. trust_remote_code: bool,
  50. download_dir: Optional[str],
  51. load_format: str,
  52. dtype: str,
  53. seed: int,
  54. revision: Optional[str] = None,
  55. max_model_len: Optional[int] = None,
  56. quantization: Optional[str] = None,
  57. ) -> None:
  58. self.model = model
  59. self.tokenizer = tokenizer
  60. self.tokenizer_mode = tokenizer_mode
  61. self.trust_remote_code = trust_remote_code
  62. self.download_dir = download_dir
  63. self.load_format = load_format
  64. self.seed = seed
  65. self.revision = revision
  66. self.quantization = quantization
  67. self.hf_config = get_config(model, trust_remote_code, revision)
  68. self.dtype = _get_and_verify_dtype(self.hf_config, dtype)
  69. self.max_model_len = _get_and_verify_max_len(self.hf_config,
  70. max_model_len)
  71. self._verify_load_format()
  72. self._verify_tokenizer_mode()
  73. self._verify_quantization()
  74. def _verify_load_format(self) -> None:
  75. load_format = self.load_format.lower()
  76. if load_format not in [
  77. "auto", "pt", "safetensors", "npcache", "dummy"
  78. ]:
  79. raise ValueError(
  80. f"Unknown load format: {self.load_format}. Must be one of "
  81. "'auto', 'pt', 'safetensors', 'npcache', or 'dummy'.")
  82. self.load_format = load_format
  83. def _verify_tokenizer_mode(self) -> None:
  84. tokenizer_mode = self.tokenizer_mode.lower()
  85. if tokenizer_mode not in ["auto", "slow"]:
  86. raise ValueError(
  87. f"Unknown tokenizer mode: {self.tokenizer_mode}. Must be "
  88. "either 'auto' or 'slow'.")
  89. self.tokenizer_mode = tokenizer_mode
  90. def _verify_quantization(self) -> None:
  91. supported_quantization = ["awq", "gptq"]
  92. if hasattr(self.hf_config, "quantization_config"
  93. ) and self.hf_config.quantization_config.get(
  94. "quant_method") == QuantizationMethod.GPTQ:
  95. self.quantization = "gptq"
  96. if self.quantization is None:
  97. return
  98. quantization = self.quantization.lower()
  99. if quantization not in supported_quantization:
  100. raise ValueError(
  101. f"Unknown quantization: {self.quantization}. Must be one of "
  102. f"{supported_quantization}.")
  103. self.quantization = quantization
  104. def verify_with_parallel_config(
  105. self,
  106. parallel_config: "ParallelConfig",
  107. ) -> None:
  108. total_num_attention_heads = self.hf_config.num_attention_heads
  109. tensor_parallel_size = parallel_config.tensor_parallel_size
  110. if total_num_attention_heads % tensor_parallel_size != 0:
  111. raise ValueError(
  112. f"Total number of attention heads ({total_num_attention_heads})"
  113. " must be divisible by tensor parallel size "
  114. f"({tensor_parallel_size}).")
  115. total_num_hidden_layers = self.hf_config.num_hidden_layers
  116. pipeline_parallel_size = parallel_config.pipeline_parallel_size
  117. if total_num_hidden_layers % pipeline_parallel_size != 0:
  118. raise ValueError(
  119. f"Total number of hidden layers ({total_num_hidden_layers}) "
  120. "must be divisible by pipeline parallel size "
  121. f"({pipeline_parallel_size}).")
  122. def get_hidden_size(self) -> int:
  123. return self.hf_config.hidden_size
  124. def get_head_size(self) -> int:
  125. # FIXME(woosuk): This may not be true for all models.
  126. return self.hf_config.hidden_size // self.hf_config.num_attention_heads
  127. def get_num_kv_heads(self, parallel_config: "ParallelConfig") -> int:
  128. """Returns the number of KV heads per GPU worker."""
  129. if getattr(self.hf_config, "n_head_kv", None) is not None:
  130. return (self.hf_config.n_head_kv //
  131. parallel_config.tensor_parallel_size)
  132. if getattr(self.hf_config, "num_kv_heads", None) is not None:
  133. return (self.hf_config.num_kv_heads //
  134. parallel_config.tensor_parallel_size)
  135. # For LLaMA-2:
  136. if getattr(self.hf_config, "num_key_value_heads", None) is not None:
  137. return (self.hf_config.num_key_value_heads //
  138. parallel_config.tensor_parallel_size)
  139. total_num_attention_heads = self.hf_config.num_attention_heads
  140. return total_num_attention_heads // parallel_config.tensor_parallel_size
  141. def get_max_model_len(self) -> int:
  142. return self.max_model_len
  143. def get_num_layers(self, parallel_config: "ParallelConfig") -> int:
  144. total_num_hidden_layers = self.hf_config.num_hidden_layers
  145. return total_num_hidden_layers // parallel_config.pipeline_parallel_size
  146. class CacheConfig:
  147. """Configuration for the KV cache.
  148. Args:
  149. block_size: Size of a cache block in number of tokens.
  150. gpu_memory_utilization: Fraction of GPU memory to use for the
  151. Aphrodite execution.
  152. swap_space: Size of the CPU swap space per GPU (in GiB).
  153. """
  154. def __init__(
  155. self,
  156. block_size: int,
  157. gpu_memory_utilization: float,
  158. swap_space: int,
  159. sliding_window: Optional[int] = None,
  160. ) -> None:
  161. self.block_size = block_size
  162. self.gpu_memory_utilization = gpu_memory_utilization
  163. self.swap_space_bytes = swap_space * _GB
  164. self.sliding_window = sliding_window
  165. self._verify_args()
  166. # Will be set after profiling.
  167. self.num_gpu_blocks = None
  168. self.num_cpu_blocks = None
  169. def _verify_args(self) -> None:
  170. if self.gpu_memory_utilization > 1.0:
  171. raise ValueError(
  172. "GPU memory utilization must be less than 1.0. Got "
  173. f"{self.gpu_memory_utilization}.")
  174. def verify_with_parallel_config(
  175. self,
  176. parallel_config: "ParallelConfig",
  177. ) -> None:
  178. total_cpu_memory = get_cpu_memory()
  179. # FIXME: Here, it is assumed that the GPUs in a tensor parallel
  180. # group are in the same node. However, the GPUs may span multiple nodes.
  181. num_gpus_per_node = parallel_config.tensor_parallel_size
  182. cpu_memory_usage = self.swap_space_bytes * num_gpus_per_node
  183. msg = (f"{cpu_memory_usage / _GB:.2f} GiB out of "
  184. f"the {total_cpu_memory / _GB:.2f} GiB total CPU memory is "
  185. "allocated for the swap space.")
  186. if cpu_memory_usage > 0.7 * total_cpu_memory:
  187. raise ValueError("Too large swap space. " + msg)
  188. elif cpu_memory_usage > 0.4 * total_cpu_memory:
  189. logger.warning("Possibly too large swap space. " + msg)
  190. class ParallelConfig:
  191. """Configuration for the distributed execution.
  192. Args:
  193. pipeline_parallel_size: Number of pipeline parallel groups.
  194. tensor_parallel_size: Number of tensor parallel groups.
  195. worker_use_ray: Whether to use Ray for model workers. Will be set to
  196. True if either pipeline_parallel_size or tensor_parallel_size is
  197. greater than 1.
  198. """
  199. def __init__(
  200. self,
  201. pipeline_parallel_size: int,
  202. tensor_parallel_size: int,
  203. worker_use_ray: bool,
  204. ) -> None:
  205. self.pipeline_parallel_size = pipeline_parallel_size
  206. self.tensor_parallel_size = tensor_parallel_size
  207. self.worker_use_ray = worker_use_ray
  208. self.world_size = pipeline_parallel_size * tensor_parallel_size
  209. if self.world_size > 1:
  210. self.worker_use_ray = True
  211. self._verify_args()
  212. def _verify_args(self) -> None:
  213. if self.pipeline_parallel_size > 1:
  214. raise NotImplementedError(
  215. "Pipeline parallelism is not supported yet.")
  216. class SchedulerConfig:
  217. """Scheduler configuration.
  218. Args:
  219. max_num_batched_tokens: Maximum number of tokens to be processed in
  220. a single iteration.
  221. max_num_seqs: Maximum number of sequences to be processed in a single
  222. iteration.
  223. max_model_len: Maximum length of a sequence (including prompt
  224. and generated text).
  225. max_paddings: Maximum number of paddings to be added to a batch.
  226. """
  227. def __init__(
  228. self,
  229. max_num_batched_tokens: Optional[int],
  230. max_num_seqs: int,
  231. max_model_len: int,
  232. max_paddings: int,
  233. ) -> None:
  234. if max_num_batched_tokens is not None:
  235. self.max_num_batched_tokens = max_num_batched_tokens
  236. else:
  237. self.max_num_batched_tokens = max(max_model_len, 2048)
  238. self.max_num_seqs = max_num_seqs
  239. self.max_model_len = max_model_len
  240. self.max_paddings = max_paddings
  241. self._verify_args()
  242. def _verify_args(self) -> None:
  243. if self.max_num_batched_tokens < self.max_model_len:
  244. raise ValueError(
  245. f"max_num_batched_tokens ({self.max_num_batched_tokens}) is "
  246. f"smaller than max_model_len ({self.max_model_len}). "
  247. f"This effectively limits the maximum sequence length to "
  248. f"max_num_batched_tokens and makes Aphrodite reject longer "
  249. f"sequences. Please increase max_num_batched_tokens or "
  250. f"decrease max_model_len.")
  251. if self.max_num_batched_tokens < self.max_num_seqs:
  252. raise ValueError(
  253. f"max_num_batched_tokens ({self.max_num_batched_tokens}) must "
  254. "be greater than or equal to max_num_seqs "
  255. f"({self.max_num_seqs}).")
  256. _STR_DTYPE_TO_TORCH_DTYPE = {
  257. "half": torch.float16,
  258. "float16": torch.float16,
  259. "float": torch.float32,
  260. "float32": torch.float32,
  261. "bfloat16": torch.bfloat16,
  262. }
  263. def _get_and_verify_dtype(
  264. config: PretrainedConfig,
  265. dtype: str,
  266. ) -> torch.dtype:
  267. # NOTE: getattr(config, "torch_dtype", torch.float32) is not correct
  268. # because config.torch_dtype can be None.
  269. config_dtype = getattr(config, "torch_dtype", None)
  270. if config_dtype is None:
  271. config_dtype = torch.float32
  272. dtype = dtype.lower()
  273. if dtype == "auto":
  274. if config_dtype == torch.float32:
  275. # Following the common practice, we use float16 for float32 models.
  276. torch_dtype = torch.float16
  277. else:
  278. torch_dtype = config_dtype
  279. else:
  280. if dtype not in _STR_DTYPE_TO_TORCH_DTYPE:
  281. raise ValueError(f"Unknown dtype: {dtype}")
  282. torch_dtype = _STR_DTYPE_TO_TORCH_DTYPE[dtype]
  283. # Verify the dtype.
  284. if torch_dtype != config_dtype:
  285. if torch_dtype == torch.float32:
  286. # Upcasting to float32 is allowed.
  287. pass
  288. elif config_dtype == torch.float32:
  289. # Downcasting from float32 to float16 or bfloat16 is allowed.
  290. pass
  291. else:
  292. # Casting between float16 and bfloat16 is allowed with a warning.
  293. logger.warning(f"Casting {config_dtype} to {torch_dtype}.")
  294. return torch_dtype
  295. def _get_and_verify_max_len(
  296. hf_config: PretrainedConfig,
  297. max_model_len: Optional[int],
  298. ) -> int:
  299. """Get and verify the model's maximum length."""
  300. derived_max_model_len = float("inf")
  301. possible_keys = [
  302. "max_position_embeddings",
  303. "n_positions",
  304. "max_seq_len",
  305. "max_sequence_length",
  306. "max_seq_length",
  307. "seq_len",
  308. ]
  309. for key in possible_keys:
  310. max_len_key = getattr(hf_config, key, None)
  311. if max_len_key is not None:
  312. derived_max_model_len = min(derived_max_model_len, max_len_key)
  313. if derived_max_model_len == float("inf"):
  314. if max_model_len is not None:
  315. # If max_model_len is specified, we use it.
  316. return max_model_len
  317. default_max_len = 2048
  318. logger.warning(
  319. "The model's config.json does not contain any of the following "
  320. "keys to determine the original maximum length of the model: "
  321. f"{possible_keys}. Assuming the model's maximum length is "
  322. f"{default_max_len}.")
  323. derived_max_model_len = default_max_len
  324. rope_scaling = getattr(hf_config, "rope_scaling", None)
  325. if rope_scaling is not None:
  326. assert "factor" in rope_scaling
  327. scaling_factor = rope_scaling["factor"]
  328. if rope_scaling["type"] == "yarn":
  329. derived_max_model_len = rope_scaling[
  330. "original_max_position_embeddings"]
  331. derived_max_model_len *= scaling_factor
  332. if max_model_len is None:
  333. max_model_len = derived_max_model_len
  334. elif max_model_len > derived_max_model_len:
  335. if derived_max_model_len == 4096:
  336. scaling_factor = exp(
  337. log((max_model_len - 1150.29) / 2982.33) / .884113)
  338. else:
  339. scaling_factor = max_model_len / derived_max_model_len
  340. hf_config.rope_scaling = {"factor": scaling_factor, "type": "dynamic"}
  341. logger.warning(
  342. f"User-specified max_model_len {max_model_len} is higher than "
  343. f"the original {derived_max_model_len}. "
  344. "Attempting to use RoPE scaling.")
  345. derived_max_model_len = max_model_len
  346. return int(max_model_len)