1
0

config.py 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422
  1. import enum
  2. import json
  3. import os
  4. from dataclasses import dataclass, field, fields
  5. from typing import (TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Tuple,
  6. Union)
  7. import torch
  8. from loguru import logger
  9. from transformers import PretrainedConfig
  10. from aphrodite.common.utils import get_cpu_memory, is_cpu, is_hip, is_neuron
  11. from aphrodite.modeling.models import ModelRegistry
  12. from aphrodite.quantization import QUANTIZATION_METHODS
  13. from aphrodite.transformers_utils.config import get_config, get_hf_text_config
  14. if TYPE_CHECKING:
  15. from ray.util.placement_group import PlacementGroup
  16. from aphrodite.modeling.model_loader.loader import BaseModelLoader
  17. # If true, will load models from ModelScope instead of Hugging Face Hub.
  18. APHRODITE_USE_MODELSCOPE = os.environ.get("APHRODITE_USE_MODELSCOPE",
  19. "False").lower() == "true"
  20. _GB = 1 << 30
  21. _EMBEDDING_MODEL_MAX_NUM_BATCHED_TOKENS = 32768
  22. class ModelConfig:
  23. """Configuration for the model.
  24. Args:
  25. model: Name or path of the huggingface model to use.
  26. tokenizer: Name or path of the huggingface tokenizer to use.
  27. tokenizer_mode: Tokenizer mode. "auto" will use the fast tokenizer if
  28. available, and "slow" will always use the slow tokenizer.
  29. trust_remote_code: Trust remote code (e.g., from HuggingFace) when
  30. downloading the model and tokenizer.
  31. dtype: Data type for model weights and activations. The "auto" option
  32. will use FP16 precision for FP32 and FP16 models, and BF16 precision
  33. for BF16 models.
  34. seed: Random seed for reproducibility.
  35. revision: The specific model version to use. It can be a branch name,
  36. a tag name, or a commit id. If unspecified, will use the default
  37. version.
  38. code_revision: The specific revision to use for the model code on
  39. Hugging Face Hub. It can be a branch name, a tag name, or a
  40. commit id. If unspecified, will use the default version.
  41. rope_scaling: Dictionary containing the scaling configuration for the
  42. RoPE embeddings. When using this flag, don't update
  43. `max_position_embeddings` to the expected new maximum.
  44. tokenizer_revision: The specific tokenizer version to use. It can be a
  45. branch name, a tag name, or a commit id. If unspecified, will use
  46. the default version.
  47. max_model_len: Maximum length of a sequence (including prompt and
  48. output). If None, will be derived from the model.
  49. quantization: Quantization method that was used to quantize the model
  50. weights. If None, we assume the model weights are not quantized.
  51. load_in_4bit: Whether to load the FP16 model in AutoQuant 4bit
  52. format. Works with AWQ models as well as FP16.
  53. load_in_8bit: Whether to load the FP16 model in 8bit format. Slower
  54. than load_in_smooth in terms of throughput.
  55. load_in_smooth: Whether to load the FP16 model in smoothquant format.
  56. deepspeed_fp_bits: Number of bits to use for DeepSpeed FP quantization.
  57. Supported number of bits are: 4, 6, 8, 12.
  58. quantization_param_path: Path to JSON file containing scaling factors.
  59. Used to load KV cache scaling factors into the model when KV cache
  60. type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also
  61. be used to load activation and weight scaling factors when the
  62. model dtype is FP8_E4M3 on ROCm.
  63. enforce_eager: Whether to enforce eager execution. If True, we will
  64. disable CUDA graph and always execute the model in eager mode.
  65. If False, we will use CUDA graph and eager execution in hybrid.
  66. max_context_len_to_capture: Maximum context len covered by CUDA graphs.
  67. When a sequence has context length larger than this, we fall back
  68. to eager mode (DEPRECATED. Use max_seq_len_to_capture instead).
  69. max_seq_len_to_capture: Maximum sequence len covered by CUDA graphs.
  70. When a sequence has context length larger than this, we fall back
  71. to eager mode
  72. disable_sliding_window: Whether to disable sliding window. If True,
  73. we will disable the sliding window functionality of the model.
  74. If the model does not support sliding window, this argument is
  75. ignored.
  76. skip_tokenizer_init: If true, skip initialization of tokenizer and
  77. detokenizer.
  78. """
  79. def __init__(
  80. self,
  81. model: str,
  82. tokenizer: str,
  83. tokenizer_mode: str,
  84. trust_remote_code: bool,
  85. dtype: Union[str, torch.dtype],
  86. seed: int,
  87. revision: Optional[str] = None,
  88. code_revision: Optional[str] = None,
  89. rope_scaling: Optional[dict] = None,
  90. tokenizer_revision: Optional[str] = None,
  91. max_model_len: Optional[int] = None,
  92. quantization: Optional[str] = None,
  93. load_in_4bit: bool = False,
  94. load_in_8bit: bool = False,
  95. load_in_smooth: bool = False,
  96. deepspeed_fp_bits: Optional[int] = None,
  97. quantization_param_path: Optional[str] = None,
  98. enforce_eager: bool = True,
  99. max_context_len_to_capture: Optional[int] = None,
  100. max_seq_len_to_capture: Optional[int] = None,
  101. max_logprobs: int = 5,
  102. disable_sliding_window: bool = False,
  103. skip_tokenizer_init: bool = False,
  104. ) -> None:
  105. self.model = model
  106. self.tokenizer = tokenizer
  107. self.tokenizer_mode = tokenizer_mode
  108. self.trust_remote_code = trust_remote_code
  109. self.seed = seed
  110. self.revision = revision
  111. self.code_revision = code_revision
  112. self.rope_scaling = rope_scaling
  113. # The tokenizer version is consistent with the model version by default.
  114. if tokenizer_revision is None:
  115. self.tokenizer_revision = revision
  116. else:
  117. self.tokenizer_revision = tokenizer_revision
  118. self.quantization = quantization
  119. self.load_in_4bit = load_in_4bit
  120. self.load_in_8bit = load_in_8bit
  121. self.load_in_smooth = load_in_smooth
  122. self.deepspeed_fp_bits = deepspeed_fp_bits
  123. self.quantization_param_path = quantization_param_path
  124. self.enforce_eager = enforce_eager
  125. self.max_context_len_to_capture = max_context_len_to_capture
  126. if self.max_context_len_to_capture is not None:
  127. raise ValueError("`max_context_len_to_capture` is deprecated. "
  128. "Use `max_seq_len_to_capture` instead.")
  129. self.max_seq_len_to_capture = (max_seq_len_to_capture
  130. or max_context_len_to_capture)
  131. self.max_logprobs = max_logprobs
  132. self.disable_sliding_window = disable_sliding_window
  133. self.skip_tokenizer_init = skip_tokenizer_init
  134. self.hf_config = get_config(self.model, trust_remote_code, revision,
  135. code_revision, rope_scaling)
  136. self.hf_text_config = get_hf_text_config(self.hf_config)
  137. self.dtype = _get_and_verify_dtype(self.hf_text_config, dtype)
  138. self.max_model_len = _get_and_verify_max_len(
  139. hf_config=self.hf_text_config,
  140. max_model_len=max_model_len,
  141. disable_sliding_window=self.disable_sliding_window,
  142. sliding_window_len=self.get_hf_config_sliding_window())
  143. if (getattr(self.hf_config, "max_position_embeddings", 0) == 131072
  144. and getattr(self.hf_config, "rope_scaling", None) is None):
  145. self.hf_config.update({"rope_scaling": {
  146. "type": "extended",
  147. }})
  148. if not self.skip_tokenizer_init:
  149. self._verify_tokenizer_mode()
  150. self._verify_embedding_mode()
  151. self._verify_quantization()
  152. self._verify_cuda_graph()
  153. def _verify_tokenizer_mode(self) -> None:
  154. tokenizer_mode = self.tokenizer_mode.lower()
  155. if tokenizer_mode not in ["auto", "slow"]:
  156. raise ValueError(
  157. f"Unknown tokenizer mode: {self.tokenizer_mode}. Must be "
  158. "either 'auto' or 'slow'.")
  159. self.tokenizer_mode = tokenizer_mode
  160. def _verify_embedding_mode(self) -> None:
  161. architectures = getattr(self.hf_config, "architectures", [])
  162. self.embedding_mode = any(
  163. ModelRegistry.is_embedding_model(arch) for arch in architectures)
  164. def _parse_quant_hf_config(self):
  165. quant_cfg = getattr(self.hf_config, "quantization_config", None)
  166. if quant_cfg is None:
  167. # SparseML uses a "compression_config" with a "quantization_config".
  168. compression_cfg = getattr(self.hf_config, "compression_config",
  169. None)
  170. if compression_cfg is not None:
  171. quant_cfg = compression_cfg.get("quantization_config", None)
  172. return quant_cfg
  173. def _verify_quantization(self) -> None:
  174. supported_quantization = [*QUANTIZATION_METHODS]
  175. rocm_supported_quantization = ["gptq", "squeezellm"]
  176. if self.quantization is not None:
  177. self.quantization = self.quantization.lower()
  178. # Parse quantization method from the HF model config, if available.
  179. quant_cfg = self._parse_quant_hf_config()
  180. if quant_cfg is not None:
  181. quant_method = quant_cfg.get("quant_method", "").lower()
  182. # Detect which checkpoint is it
  183. for _, method in QUANTIZATION_METHODS.items():
  184. quantization_override = method.override_quantization_method(
  185. quant_cfg, self.quantization)
  186. if quantization_override:
  187. quant_method = quantization_override
  188. self.quantization = quantization_override
  189. break
  190. # Verify quantization configurations.
  191. if self.quantization is None:
  192. self.quantization = quant_method
  193. elif self.quantization != quant_method:
  194. raise ValueError(
  195. "Quantization method specified in the model config "
  196. f"({quant_method}) does not match the quantization "
  197. f"method specified in the `quantization` argument "
  198. f"({self.quantization}).")
  199. if self.load_in_4bit:
  200. # the kernels seem to not work with 4bit weight_only
  201. if torch.cuda.get_device_capability(0)[0] < 8:
  202. raise ValueError(
  203. "load_in_4bit quantization is not supported on GPUs with "
  204. "compute capability less than 8.0.")
  205. if self.quantization is None:
  206. self.quantization = "autoquant"
  207. self.hf_config.quantization_config = {
  208. "bits": 4,
  209. "quant_mode": "weight_only",
  210. "quant_method": "autoquant",
  211. "group_size": 128,
  212. "zero_point": True,
  213. "from_float": True
  214. }
  215. elif self.quantization == "awq":
  216. logger.warning("AWQ model is being loaded in 4bit autoquant "
  217. "format.")
  218. self.quantization = "autoquant"
  219. self.hf_config.quantization_config = {
  220. "zero_point": True,
  221. "q_group_size": 128,
  222. "w_bit": 4,
  223. "version": "gemm"
  224. }
  225. elif self.quantization != "autoquant":
  226. raise ValueError("4bit quantization is not supported in "
  227. f"{self.quantization}.")
  228. if self.load_in_8bit:
  229. if self.quantization is None:
  230. self.quantization = "autoquant"
  231. elif self.quantization != "autoquant":
  232. raise ValueError("8bit quantization is not supported in "
  233. f"{self.quantization}.")
  234. self.hf_config.quantization_config = {
  235. "bits": 8,
  236. "quant_mode": "llm_int8",
  237. "quant_method": "autoquant",
  238. "group_size": 128,
  239. "zero_point": True,
  240. "from_float": True
  241. }
  242. self.enforce_eager = True
  243. if self.load_in_smooth:
  244. if self.quantization is None:
  245. self.quantization = "autoquant"
  246. elif self.quantization != "autoquant":
  247. raise ValueError("Smooth quantization is not supported in "
  248. f"{self.quantization}.")
  249. self.hf_config.quantization_config = {
  250. "bits": 8,
  251. "quant_mode": "smoothquant",
  252. "quant_method": "autoquant",
  253. "group_size": 128,
  254. "zero_point": True,
  255. "from_float": True
  256. }
  257. self.enforce_eager = True
  258. if self.quantization == "deepspeedfp":
  259. gs = 32 if self.deepspeed_fp_bits == 4 else 128
  260. self.hf_config.quantization_config = {
  261. "bits": self.deepspeed_fp_bits,
  262. "group_size": int(os.environ.get("DEEPSPEED_GROUP_SIZE", gs)),
  263. "quant_method": "deepspeedfp"
  264. }
  265. if self.quantization is not None:
  266. if self.quantization not in supported_quantization:
  267. raise ValueError(
  268. f"Unknown quantization method: {self.quantization}. Must "
  269. f"be one of {supported_quantization}.")
  270. if is_hip(
  271. ) and self.quantization not in rocm_supported_quantization:
  272. raise ValueError(
  273. f"{self.quantization} quantization is currently not "
  274. "supported in ROCm.")
  275. if (self.quantization
  276. not in ["marlin", "gptq_marlin_24", "gptq_marlin"]):
  277. logger.warning(
  278. f"{self.quantization} quantization is not fully "
  279. "optimized yet. The speed can be slower than "
  280. "non-quantized models.")
  281. if self.quantization == "deepspeedfp" and self.deepspeed_fp_bits \
  282. is None:
  283. raise ValueError(
  284. "deepspeed_fp_bits must be specified when using "
  285. "deepspeedfp quantization.")
  286. def _verify_cuda_graph(self) -> None:
  287. if self.max_seq_len_to_capture is None:
  288. self.max_seq_len_to_capture = self.max_model_len
  289. self.max_seq_len_to_capture = min(self.max_seq_len_to_capture,
  290. self.max_model_len)
  291. def verify_with_parallel_config(
  292. self,
  293. parallel_config: "ParallelConfig",
  294. ) -> None:
  295. total_num_attention_heads = self.hf_text_config.num_attention_heads
  296. tensor_parallel_size = parallel_config.tensor_parallel_size
  297. if total_num_attention_heads % tensor_parallel_size != 0:
  298. raise ValueError(
  299. f"Total number of attention heads ({total_num_attention_heads})"
  300. " must be divisible by tensor parallel size "
  301. f"({tensor_parallel_size}).")
  302. total_num_hidden_layers = self.hf_text_config.num_hidden_layers
  303. pipeline_parallel_size = parallel_config.pipeline_parallel_size
  304. if total_num_hidden_layers % pipeline_parallel_size != 0:
  305. raise ValueError(
  306. f"Total number of hidden layers ({total_num_hidden_layers}) "
  307. "must be divisible by pipeline parallel size "
  308. f"({pipeline_parallel_size}).")
  309. if self.quantization == "bitsandbytes" and (
  310. parallel_config.tensor_parallel_size > 1
  311. or parallel_config.pipeline_parallel_size > 1):
  312. raise ValueError(
  313. "BitAndBytes quantization with TP or PP is not supported yet.")
  314. def get_hf_config_sliding_window(self) -> Optional[int]:
  315. """Get the sliding window size, or None if disabled.
  316. """
  317. # Some models, like Qwen2 and Qwen1.5, use `use_sliding_window` in
  318. # addition to sliding window size. We check if that field is present
  319. # and if it's False, return None.
  320. if (hasattr(self.hf_text_config, "use_sliding_window")
  321. and not self.hf_text_config.use_sliding_window):
  322. return None
  323. return getattr(self.hf_text_config, "sliding_window", None)
  324. def get_sliding_window(self) -> Optional[int]:
  325. """Get the sliding window size, or None if disabled.
  326. """
  327. # If user disables sliding window, return None.
  328. if self.disable_sliding_window:
  329. return None
  330. # Otherwise get the value from the hf config.
  331. return self.get_hf_config_sliding_window()
  332. def get_vocab_size(self) -> int:
  333. return self.hf_text_config.vocab_size
  334. def get_hidden_size(self) -> int:
  335. return self.hf_text_config.hidden_size
  336. def get_head_size(self) -> int:
  337. if hasattr(self.hf_text_config, "head_dim"):
  338. return self.hf_text_config.head_dim
  339. # FIXME: This may not be true for all models.
  340. return (self.hf_text_config.hidden_size //
  341. self.hf_text_config.num_attention_heads)
  342. def get_total_num_kv_heads(self) -> int:
  343. """Returns the total number of KV heads."""
  344. # For GPTBigCode & Falcon:
  345. # NOTE: for falcon, when new_decoder_architecture is True, the
  346. # multi_query flag is ignored and we use n_head_kv for the number of
  347. # KV heads.
  348. falcon_model_types = ["falcon", "RefinedWeb", "RefinedWebModel"]
  349. new_decoder_arch_falcon = (
  350. self.hf_config.model_type in falcon_model_types
  351. and getattr(self.hf_config, "new_decoder_architecture", False))
  352. if not new_decoder_arch_falcon and getattr(self.hf_text_config,
  353. "multi_query", False):
  354. # Multi-query attention, only one KV head.
  355. # Currently, tensor parallelism is not supported in this case.
  356. return 1
  357. # For DBRX and MPT
  358. if self.hf_config.model_type in ["dbrx", "mpt"]:
  359. return getattr(self.hf_config.attn_config, "kv_n_heads",
  360. self.hf_config.num_attention_heads)
  361. attributes = [
  362. # For Falcon:
  363. "n_head_kv",
  364. "num_kv_heads",
  365. # For LLaMA-2:
  366. "num_key_value_heads",
  367. # For ChatGLM:
  368. "multi_query_group_num",
  369. ]
  370. for attr in attributes:
  371. num_kv_heads = getattr(self.hf_text_config, attr, None)
  372. if num_kv_heads is not None:
  373. return num_kv_heads
  374. # For non-grouped-query attention models, the number of KV heads is
  375. # equal to the number of attention heads.
  376. return self.hf_text_config.num_attention_heads
  377. def get_num_kv_heads(self, parallel_config: "ParallelConfig") -> int:
  378. """Returns the number of KV heads per GPU."""
  379. total_num_kv_heads = self.get_total_num_kv_heads()
  380. # If tensor parallelism is used, we divide the number of KV heads by
  381. # the tensor parallel size. We will replicate the KV heads in the
  382. # case where the number of KV heads is smaller than the tensor
  383. # parallel size so each GPU has at least one KV head.
  384. return max(1,
  385. total_num_kv_heads // parallel_config.tensor_parallel_size)
  386. def get_num_attention_heads(self,
  387. parallel_config: "ParallelConfig") -> int:
  388. return self.hf_text_config.num_attention_heads // \
  389. parallel_config.tensor_parallel_size
  390. def get_num_layers(self, parallel_config: "ParallelConfig") -> int:
  391. total_num_hidden_layers = self.hf_text_config.num_hidden_layers
  392. return total_num_hidden_layers // parallel_config.pipeline_parallel_size
  393. class CacheConfig:
  394. """Configuration for the KV cache.
  395. Args:
  396. block_size: Size of a cache block in number of tokens.
  397. gpu_memory_utilization: Fraction of GPU memory to use for the
  398. Aphrodite execution.
  399. swap_space: Size of the CPU swap space per GPU (in GiB).
  400. cache_dtype: Data type for kv cache storage.
  401. num_gpu_blocks_override: Number of GPU blocks to use. This overrides the
  402. profiled num_gpu_blocks if specified. Does nothing if None.
  403. """
  404. def __init__(
  405. self,
  406. block_size: int,
  407. gpu_memory_utilization: float,
  408. swap_space: int,
  409. cache_dtype: str,
  410. num_gpu_blocks_override: Optional[int] = None,
  411. sliding_window: Optional[int] = None,
  412. enable_prefix_caching: bool = False,
  413. ) -> None:
  414. self.block_size = block_size
  415. self.gpu_memory_utilization = gpu_memory_utilization
  416. self.swap_space_bytes = swap_space * _GB
  417. self.num_gpu_blocks_override = num_gpu_blocks_override
  418. self.cache_dtype = cache_dtype
  419. self.sliding_window = sliding_window
  420. self.enable_prefix_caching = enable_prefix_caching
  421. self._verify_args()
  422. self._verify_cache_dtype()
  423. self._verify_prefix_caching()
  424. # Will be set after profiling.
  425. self.num_gpu_blocks = None
  426. self.num_cpu_blocks = None
  427. def metrics_info(self):
  428. # convert cache_config to dict(key: str, value: str) for prometheus
  429. # metrics info
  430. return {key: str(value) for key, value in self.__dict__.items()}
  431. def _verify_args(self) -> None:
  432. if self.gpu_memory_utilization > 1.0:
  433. raise ValueError(
  434. "GPU memory utilization must be less than 1.0. Got "
  435. f"{self.gpu_memory_utilization}.")
  436. def _verify_cache_dtype(self) -> None:
  437. if self.cache_dtype == "auto":
  438. pass
  439. elif self.cache_dtype in ("fp8", "fp8_e4m3", "fp8_e5m2"):
  440. logger.info(
  441. "Using fp8 data type to store kv cache. It reduces the GPU "
  442. "memory footprint and boosts the performance. "
  443. "Meanwhile, it may cause accuracy drop without a proper "
  444. "scaling factor")
  445. else:
  446. raise ValueError(f"Unknown kv cache dtype: {self.cache_dtype}")
  447. def _verify_prefix_caching(self) -> None:
  448. if not self.enable_prefix_caching:
  449. return
  450. if self.sliding_window is not None:
  451. raise NotImplementedError(
  452. "Prefix caching is not supported with sliding window. "
  453. "Run with --disable-sliding-window to use prefix caching.")
  454. if self.cache_dtype == "fp8":
  455. raise NotImplementedError(
  456. "Prefix caching is not supported for fp8 cache_dtype. "
  457. "Run with --kv-cache-dtype auto to use prefix caching.")
  458. def verify_with_parallel_config(
  459. self,
  460. parallel_config: "ParallelConfig",
  461. ) -> None:
  462. total_cpu_memory = get_cpu_memory()
  463. # FIXME: Here, it is assumed that the GPUs in a tensor parallel
  464. # group are in the same node. However, the GPUs may span multiple nodes.
  465. num_gpus_per_node = parallel_config.tensor_parallel_size
  466. cpu_memory_usage = self.swap_space_bytes * num_gpus_per_node
  467. msg = (f"{cpu_memory_usage / _GB:.2f} GiB out of "
  468. f"the {total_cpu_memory / _GB:.2f} GiB total CPU memory is "
  469. "allocated for the swap space.")
  470. if cpu_memory_usage > 0.7 * total_cpu_memory:
  471. raise ValueError("Too large swap space. " + msg)
  472. elif cpu_memory_usage > 0.4 * total_cpu_memory:
  473. logger.warning("Possibly too large swap space. " + msg)
  474. @dataclass
  475. class TokenizerPoolConfig:
  476. """Configuration for the tokenizer pool.
  477. Args:
  478. pool_size: Number of tokenizer workers in the pool.
  479. pool_type: Type of the pool.
  480. extra_config: Additional config for the pool.
  481. The way the config will be used depends on the
  482. pool type.
  483. """
  484. pool_size: int
  485. pool_type: str
  486. extra_config: dict
  487. def __post_init__(self):
  488. if self.pool_type not in ("ray", ):
  489. raise ValueError(f"Unknown pool type: {self.pool_type}")
  490. if not isinstance(self.extra_config, dict):
  491. raise ValueError("extra_config must be a dictionary.")
  492. @classmethod
  493. def create_config(
  494. cls, tokenizer_pool_size: int, tokenizer_pool_type: str,
  495. tokenizer_pool_extra_config: Optional[Union[str, dict]]
  496. ) -> Optional["TokenizerPoolConfig"]:
  497. """Create a TokenizerPoolConfig from the given parameters.
  498. If tokenizer_pool_size is 0, return None.
  499. Args:
  500. tokenizer_pool_size: Number of tokenizer workers in the pool.
  501. tokenizer_pool_type: Type of the pool.
  502. tokenizer_pool_extra_config: Additional config for the pool.
  503. The way the config will be used depends on the
  504. pool type. This can be a JSON string (will be parsed).
  505. """
  506. if tokenizer_pool_size:
  507. if isinstance(tokenizer_pool_extra_config, str):
  508. tokenizer_pool_extra_config_parsed = json.loads(
  509. tokenizer_pool_extra_config)
  510. else:
  511. tokenizer_pool_extra_config_parsed = (
  512. tokenizer_pool_extra_config or {})
  513. tokenizer_pool_config = cls(tokenizer_pool_size,
  514. tokenizer_pool_type,
  515. tokenizer_pool_extra_config_parsed)
  516. else:
  517. tokenizer_pool_config = None
  518. return tokenizer_pool_config
  519. class LoadFormat(str, enum.Enum):
  520. AUTO = "auto"
  521. PT = "pt"
  522. SAFETENSORS = "safetensors"
  523. NPCACHE = "npcache"
  524. DUMMY = "dummy"
  525. TENSORIZER = "tensorizer"
  526. SHARDED_STATE = "sharded_state"
  527. BITSANDBYTES = "bitsandbytes"
  528. @dataclass
  529. class LoadConfig:
  530. """
  531. download_dir: Directory to download and load the weights, default to the
  532. default cache directory of huggingface.
  533. load_format: The format of the model weights to load:
  534. "auto" will try to load the weights in the safetensors format and
  535. fall back to the pytorch bin format if safetensors format is
  536. not available.
  537. "pt" will load the weights in the pytorch bin format.
  538. "safetensors" will load the weights in the safetensors format.
  539. "npcache" will load the weights in pytorch format and store
  540. a numpy cache to speed up the loading.
  541. "dummy" will initialize the weights with random values, which is
  542. mainly for profiling.
  543. "tensorizer" will use CoreWeave's tensorizer library for
  544. fast weight loading.
  545. """
  546. load_format: Union[str, LoadFormat, "BaseModelLoader"] = LoadFormat.AUTO
  547. download_dir: Optional[str] = None
  548. model_loader_extra_config: Optional[Union[str, dict]] = field(
  549. default_factory=dict)
  550. def __post_init__(self):
  551. model_loader_extra_config = self.model_loader_extra_config or {}
  552. if isinstance(model_loader_extra_config, str):
  553. self.model_loader_extra_config = json.loads(
  554. model_loader_extra_config)
  555. self._verify_load_format()
  556. def _verify_load_format(self) -> None:
  557. if not isinstance(self.load_format, str):
  558. return
  559. load_format = self.load_format.lower()
  560. self.load_format = LoadFormat(load_format)
  561. rocm_not_supported_load_format: List[str] = []
  562. if is_hip() and load_format in rocm_not_supported_load_format:
  563. rocm_supported_load_format = [
  564. f for f in LoadFormat.__members__
  565. if (f not in rocm_not_supported_load_format)
  566. ]
  567. raise ValueError(
  568. f"load format '{load_format}' is not supported in ROCm. "
  569. f"Supported load formats are "
  570. f"{rocm_supported_load_format}")
  571. class ParallelConfig:
  572. """Configuration for the distributed execution.
  573. Args:
  574. pipeline_parallel_size: Number of pipeline parallel groups.
  575. tensor_parallel_size: Number of tensor parallel groups.
  576. worker_use_ray: Deprecated, use distributed_executor_backend instead.
  577. max_parallel_loading_workers: Maximum number of multiple batches
  578. when load model sequentially. To avoid RAM OOM when using tensor
  579. parallel and large models.
  580. disable_custom_all_reduce: Disable the custom all-reduce kernel and
  581. fall back to NCCL.
  582. tokenizer_pool_config: Config for the tokenizer pool.
  583. If None, will use synchronous tokenization.
  584. ray_workers_use_nsight: Whether to profile Ray workers with nsight, see
  585. https://docs.ray.io/en/latest/ray-observability/user-guides/profiling.html#profiling-nsight-profiler.
  586. placement_group: ray distributed model workers placement group.
  587. distributed_executor_backend: Backend to use for distributed model
  588. workers, either "ray" or "mp" (multiprocessing). If either
  589. pipeline_parallel_size or tensor_parallel_size is greater than 1,
  590. will default to "ray" if Ray is installed or "mp" otherwise.
  591. """
  592. def __init__(
  593. self,
  594. pipeline_parallel_size: int,
  595. tensor_parallel_size: int,
  596. worker_use_ray: Optional[bool] = None,
  597. max_parallel_loading_workers: Optional[int] = None,
  598. disable_custom_all_reduce: bool = False,
  599. tokenizer_pool_config: Optional[TokenizerPoolConfig] = None,
  600. ray_workers_use_nsight: bool = False,
  601. placement_group: Optional["PlacementGroup"] = None,
  602. distributed_executor_backend: Optional[str] = None,
  603. ) -> None:
  604. self.pipeline_parallel_size = pipeline_parallel_size
  605. self.tensor_parallel_size = tensor_parallel_size
  606. self.distributed_executor_backend = distributed_executor_backend
  607. self.max_parallel_loading_workers = max_parallel_loading_workers
  608. self.disable_custom_all_reduce = disable_custom_all_reduce
  609. self.tokenizer_pool_config = tokenizer_pool_config
  610. self.ray_workers_use_nsight = ray_workers_use_nsight
  611. self.placement_group = placement_group
  612. self.world_size = pipeline_parallel_size * self.tensor_parallel_size
  613. if worker_use_ray:
  614. if self.distributed_executor_backend is None:
  615. self.distributed_executor_backend = "ray"
  616. elif self.distributed_executor_backend != "ray":
  617. raise ValueError(f"worker-use-ray can't be used with "
  618. f"distributed executor backend "
  619. f"'{self.distributed_executor_backend}'.")
  620. if self.distributed_executor_backend is None and self.world_size > 1:
  621. from aphrodite.executor import ray_utils
  622. ray_found = ray_utils.ray is not None
  623. self.distributed_executor_backend = "ray" if ray_found else "mp"
  624. self._verify_args()
  625. def _verify_args(self) -> None:
  626. if self.pipeline_parallel_size > 1:
  627. raise NotImplementedError(
  628. "Pipeline parallelism is not supported yet.")
  629. if self.distributed_executor_backend not in ("ray", "mp", None):
  630. raise ValueError(
  631. "Unrecognized distributed executor backend. Supported values "
  632. "are 'ray' or 'mp'.")
  633. if not self.disable_custom_all_reduce and self.world_size > 1:
  634. if is_hip():
  635. self.disable_custom_all_reduce = True
  636. logger.info(
  637. "Disabled the custom all-reduce kernel because it is not "
  638. "supported on AMD GPUs.")
  639. elif self.pipeline_parallel_size > 1:
  640. self.disable_custom_all_reduce = True
  641. logger.info(
  642. "Disabled the custom all-reduce kernel because it is not "
  643. "supported with pipeline parallelism.")
  644. if self.ray_workers_use_nsight and (
  645. not self.distributed_executor_backend == "ray"):
  646. raise ValueError("Unable to use nsight profiling unless workers "
  647. "run with Ray.")
  648. class SchedulerConfig:
  649. """Scheduler configuration.
  650. Args:
  651. max_num_batched_tokens: Maximum number of tokens to be processed in
  652. a single iteration.
  653. max_num_seqs: Maximum number of sequences to be processed in a single
  654. iteration.
  655. max_model_len: Maximum length of a sequence (including prompt
  656. and generated text).
  657. use_v2_block_manager: Whether to use the BlockSpaceManagerV2 or not.
  658. num_lookahead_slots: The number of slots to allocate per sequence per
  659. step, beyond the known token ids. This is used in speculative
  660. decoding to store KV activations of tokens which may or may not be
  661. accepted.
  662. delay_factor: Apply a delay (of delay factor multiplied by previous
  663. prompt latency) before scheduling next prompt.
  664. enable_chunked_prefill: If True, prefill requests can be chunked based
  665. on the remaining max_num_batched_tokens.
  666. embedding_mode: Whether the running model is for embedding.
  667. preemption_mode: Whether to perform preemption by swapping or
  668. recomputation. If not specified, we determine the mode as follows:
  669. We use recomputation by default since it incurs lower overhead than
  670. swapping. However, when the sequence group has multiple sequences
  671. (e.g., beam search), recomputation is not currently supported. In
  672. such a case, we use swapping instead.
  673. """
  674. def __init__(self,
  675. max_num_batched_tokens: Optional[int],
  676. max_num_seqs: int,
  677. max_model_len: int,
  678. use_v2_block_manager: bool = False,
  679. num_lookahead_slots: int = 0,
  680. delay_factor: float = 0.0,
  681. enable_chunked_prefill: bool = False,
  682. embedding_mode: Optional[bool] = False,
  683. preemption_mode: Optional[str] = None) -> None:
  684. if max_num_batched_tokens is not None:
  685. self.max_num_batched_tokens = max_num_batched_tokens
  686. else:
  687. if enable_chunked_prefill:
  688. # For chunked prefill, choose the well-tuned batch size.
  689. self.max_num_batched_tokens = 768
  690. elif embedding_mode:
  691. # For embedding, choose specific value for higher throughput
  692. self.max_num_batched_tokens = max(
  693. max_model_len, _EMBEDDING_MODEL_MAX_NUM_BATCHED_TOKENS)
  694. else:
  695. # If max_model_len is too short, use 2048 as the default value
  696. # for higher throughput.
  697. self.max_num_batched_tokens = max(max_model_len, 2048)
  698. if enable_chunked_prefill:
  699. logger.info("Chunked prefill is enabled (EXPERIMENTAL).")
  700. self.max_num_seqs = max_num_seqs
  701. self.max_model_len = max_model_len
  702. self.use_v2_block_manager = use_v2_block_manager
  703. self.num_lookahead_slots = num_lookahead_slots
  704. self.delay_factor = delay_factor
  705. self.chunked_prefill_enabled = enable_chunked_prefill
  706. self.embedding_mode = embedding_mode
  707. self.preemption_mode = preemption_mode
  708. self._verify_args()
  709. def _verify_args(self) -> None:
  710. if (self.max_num_batched_tokens < self.max_model_len
  711. and not self.chunked_prefill_enabled):
  712. raise ValueError(
  713. f"max_num_batched_tokens ({self.max_num_batched_tokens}) is "
  714. f"smaller than max_model_len ({self.max_model_len}). "
  715. "This effectively limits the maximum sequence length to "
  716. "max_num_batched_tokens and makes Aphrodite reject longer "
  717. "sequences. Please increase max_num_batched_tokens or "
  718. "decrease max_model_len.")
  719. if self.max_num_batched_tokens < self.max_num_seqs:
  720. raise ValueError(
  721. f"max_num_batched_tokens ({self.max_num_batched_tokens}) must "
  722. "be greater than or equal to max_num_seqs "
  723. f"({self.max_num_seqs}).")
  724. if self.num_lookahead_slots < 0:
  725. raise ValueError(
  726. "num_lookahead_slots "
  727. f"({self.num_lookahead_slots}) must be greater than or "
  728. "equal to 0.")
  729. class DeviceConfig:
  730. def __init__(self, device: str = "auto") -> None:
  731. if device == "auto":
  732. # Automated device type detection
  733. if is_neuron():
  734. self.device_type = "neuron"
  735. elif is_cpu():
  736. self.device_type = "cpu"
  737. else:
  738. # We don't call torch.cuda.is_available() here to
  739. # avoid initializing CUDA before workers are forked
  740. self.device_type = "cuda"
  741. else:
  742. # Device type is assigned explicitly
  743. self.device_type = device
  744. # Some device types require processing inputs on CPU
  745. if self.device_type in ["neuron"]:
  746. self.device = torch.device("cpu")
  747. else:
  748. # Set device with device type
  749. self.device = torch.device(self.device_type)
  750. class SpeculativeConfig:
  751. """Configuration for speculative decoding.
  752. The configuration is currently specialized to draft-model speculative
  753. decoding with top-1 proposals.
  754. """
  755. @staticmethod
  756. def maybe_create_spec_config(
  757. target_model_config: ModelConfig,
  758. target_parallel_config: ParallelConfig,
  759. target_dtype: str,
  760. speculative_model: Optional[str],
  761. num_speculative_tokens: Optional[int],
  762. speculative_max_model_len: Optional[int],
  763. enable_chunked_prefill: bool,
  764. use_v2_block_manager: bool,
  765. speculative_disable_by_batch_size: Optional[int],
  766. ngram_prompt_lookup_max: Optional[int],
  767. ngram_prompt_lookup_min: Optional[int],
  768. ) -> Optional["SpeculativeConfig"]:
  769. """Create a SpeculativeConfig if possible, else return None.
  770. This function attempts to create a SpeculativeConfig object based on the
  771. provided parameters. If the necessary conditions are met, it returns an
  772. instance of SpeculativeConfig. Otherwise, it returns None.
  773. Args:
  774. target_model_config (ModelConfig): The configuration of the target
  775. model.
  776. target_parallel_config (ParallelConfig): The parallel configuration
  777. for the target model.
  778. target_dtype (str): The data type used for the target model.
  779. speculative_model (Optional[str]): The name of the speculative
  780. model, if provided.
  781. num_speculative_tokens (Optional[int]): The number of speculative
  782. tokens, if provided.
  783. speculative_max_model_len (Optional[int]): The maximum model len of
  784. the speculative model. Used when testing the ability to skip
  785. speculation for some sequences.
  786. enable_chunked_prefill (bool): Whether Aphrodite is configured to
  787. use chunked prefill or not. Used for raising an error since its
  788. not yet compatible with spec decode.
  789. use_v2_block_manager (bool): Whether Aphrodite is configured to
  790. use the v2 block manager or not. Used for raising an error
  791. since the v2 block manager is required with spec decode.
  792. speculative_disable_by_batch_size (Optional[int]): Disable
  793. speculative decoding for new incoming requests when the number
  794. of enqueue requests is larger than this value, if provided.
  795. ngram_prompt_lookup_max (Optional[int]): Max size of ngram token
  796. window, if provided.
  797. ngram_prompt_lookup_min (Optional[int]): Min size of ngram token
  798. window, if provided.
  799. Returns:
  800. Optional["SpeculativeConfig"]: An instance of SpeculativeConfig if
  801. the necessary conditions are met, else None.
  802. """
  803. if speculative_model is None and num_speculative_tokens is None:
  804. return None
  805. if speculative_model is not None and num_speculative_tokens is None:
  806. raise ValueError(
  807. "Expected both speculative_model and "
  808. "num_speculative_tokens to be provided, but found "
  809. f"{speculative_model=} and {num_speculative_tokens=}.")
  810. if (speculative_disable_by_batch_size is not None
  811. and speculative_disable_by_batch_size < 2):
  812. raise ValueError("Expected the batch size threshold of disabling "
  813. "speculative decoding is > 1, but got "
  814. f"{speculative_disable_by_batch_size=}")
  815. assert (speculative_model is not None
  816. and num_speculative_tokens is not None)
  817. if enable_chunked_prefill:
  818. raise ValueError(
  819. "Speculative decoding and chunked prefill are "
  820. f"currently mutually exclusive ({enable_chunked_prefill=}).")
  821. if not use_v2_block_manager:
  822. raise ValueError(
  823. "Speculative decoding requires usage of the V2 "
  824. "block manager. Enable it with --use-v2-block-manager.")
  825. # TODO: The user should be able to specify revision/quantization/max
  826. # model len for the draft model. It is not currently supported.
  827. draft_revision = None
  828. draft_code_revision = None
  829. draft_quantization = None
  830. if speculative_model == "[ngram]":
  831. if ngram_prompt_lookup_min is None:
  832. ngram_prompt_lookup_min = 1
  833. if ngram_prompt_lookup_max is None or ngram_prompt_lookup_max < 1:
  834. raise ValueError(f"{ngram_prompt_lookup_max=} must be > 0")
  835. if ngram_prompt_lookup_min < 1:
  836. raise ValueError(f"{ngram_prompt_lookup_min=} must be > 0")
  837. if ngram_prompt_lookup_min > ngram_prompt_lookup_max:
  838. raise ValueError(f"{ngram_prompt_lookup_min=} cannot be "
  839. f"larger than {ngram_prompt_lookup_max=}")
  840. # TODO: current we still need extract vocab_size from target model
  841. # config, in future, we may try refactoring it out, and set
  842. # draft related config as None here.
  843. draft_model_config = target_model_config
  844. draft_parallel_config = target_parallel_config
  845. else:
  846. ngram_prompt_lookup_max = 0
  847. ngram_prompt_lookup_min = 0
  848. draft_model_config = ModelConfig(
  849. model=speculative_model,
  850. tokenizer=target_model_config.tokenizer,
  851. tokenizer_mode=target_model_config.tokenizer_mode,
  852. trust_remote_code=target_model_config.trust_remote_code,
  853. dtype=target_model_config.dtype,
  854. seed=target_model_config.seed,
  855. revision=draft_revision,
  856. code_revision=draft_code_revision,
  857. tokenizer_revision=target_model_config.tokenizer_revision,
  858. max_model_len=None,
  859. quantization=draft_quantization,
  860. enforce_eager=target_model_config.enforce_eager,
  861. max_seq_len_to_capture=target_model_config.
  862. max_seq_len_to_capture,
  863. max_logprobs=target_model_config.max_logprobs,
  864. )
  865. draft_model_config.max_model_len = (
  866. SpeculativeConfig._maybe_override_draft_max_model_len(
  867. speculative_max_model_len,
  868. draft_model_config.max_model_len,
  869. target_model_config.max_model_len,
  870. ))
  871. draft_parallel_config = (
  872. SpeculativeConfig.create_draft_parallel_config(
  873. target_parallel_config))
  874. return SpeculativeConfig(draft_model_config, draft_parallel_config,
  875. num_speculative_tokens,
  876. speculative_disable_by_batch_size,
  877. ngram_prompt_lookup_max,
  878. ngram_prompt_lookup_min)
  879. @staticmethod
  880. def _maybe_override_draft_max_model_len(
  881. speculative_max_model_len: Optional[int],
  882. draft_max_model_len: int,
  883. target_max_model_len: int,
  884. ) -> int:
  885. """Determine the max sequence len for the draft model. This is usually
  886. the draft_max_model_len, but may be the target_max_model_len if it is
  887. less than the draft_max_model_len, or may be speculative_max_model_len
  888. if it is specified.
  889. This is necessary so that sequences do not exceed the capacity of the
  890. draft model or the target model.
  891. speculative_max_model_len is mainly used for testing that sequences can
  892. skip speculation.
  893. """
  894. if speculative_max_model_len is not None:
  895. if speculative_max_model_len > draft_max_model_len:
  896. raise ValueError(f"{speculative_max_model_len=} cannot be "
  897. f"larger than {draft_max_model_len=}")
  898. if speculative_max_model_len > target_max_model_len:
  899. raise ValueError(f"{speculative_max_model_len=} cannot be "
  900. f"larger than {target_max_model_len=}")
  901. return speculative_max_model_len
  902. return min(
  903. draft_max_model_len,
  904. target_max_model_len,
  905. )
  906. @staticmethod
  907. def create_draft_parallel_config(
  908. target_parallel_config: ParallelConfig) -> ParallelConfig:
  909. """Create a parallel config for use by the draft worker.
  910. This is mostly a copy of the target parallel config. In the future the
  911. draft worker can have a different parallel strategy, e.g. TP=1.
  912. """
  913. draft_parallel_config = ParallelConfig(
  914. pipeline_parallel_size=target_parallel_config.
  915. pipeline_parallel_size,
  916. tensor_parallel_size=target_parallel_config.tensor_parallel_size,
  917. distributed_executor_backend=target_parallel_config.
  918. distributed_executor_backend,
  919. max_parallel_loading_workers=target_parallel_config.
  920. max_parallel_loading_workers,
  921. disable_custom_all_reduce=target_parallel_config.
  922. disable_custom_all_reduce,
  923. tokenizer_pool_config=target_parallel_config.tokenizer_pool_config,
  924. ray_workers_use_nsight=target_parallel_config.
  925. ray_workers_use_nsight,
  926. placement_group=target_parallel_config.placement_group,
  927. )
  928. return draft_parallel_config
  929. def __init__(
  930. self,
  931. draft_model_config: ModelConfig,
  932. draft_parallel_config: ParallelConfig,
  933. num_speculative_tokens: int,
  934. speculative_disable_by_batch_size: Optional[int],
  935. ngram_prompt_lookup_max: Optional[int],
  936. ngram_prompt_lookup_min: Optional[int],
  937. ):
  938. """Create a SpeculativeConfig object.
  939. Args:
  940. draft_model_config: ModelConfig for the draft model.
  941. draft_parallel_config: ParallelConfig for the draft model.
  942. num_speculative_tokens: The number of tokens to sample from the
  943. draft model before scoring with the target model.
  944. speculative_disable_by_batch_size: Disable speculative
  945. decoding for new incoming requests when the number of
  946. enqueue requests is larger than this value.
  947. ngram_prompt_lookup_max: Max size of ngram token window.
  948. ngram_prompt_lookup_min: Min size of ngram token window.
  949. """
  950. self.draft_model_config = draft_model_config
  951. self.draft_parallel_config = draft_parallel_config
  952. self.num_speculative_tokens = num_speculative_tokens
  953. self.speculative_disable_by_batch_size = \
  954. speculative_disable_by_batch_size
  955. self.ngram_prompt_lookup_max = ngram_prompt_lookup_max or 0
  956. self.ngram_prompt_lookup_min = ngram_prompt_lookup_min or 0
  957. self._verify_args()
  958. def _verify_args(self) -> None:
  959. if self.num_speculative_tokens <= 0:
  960. raise ValueError("Expected num_speculative_tokens to be greater "
  961. f"than zero ({self.num_speculative_tokens}).")
  962. if self.draft_model_config:
  963. self.draft_model_config.verify_with_parallel_config(
  964. self.draft_parallel_config)
  965. @property
  966. def num_lookahead_slots(self) -> int:
  967. """The number of additional slots the scheduler should allocate per
  968. step, in addition to the slots allocated for each known token.
  969. This is equal to the number of speculative tokens, as each speculative
  970. token must be scored.
  971. """
  972. return self.num_speculative_tokens
  973. def __repr__(self) -> str:
  974. if self.ngram_prompt_lookup_max > 0:
  975. draft_model = "[ngram]"
  976. else:
  977. draft_model = self.draft_model_config.model
  978. num_spec_tokens = self.num_speculative_tokens
  979. return f"SpeculativeConfig({draft_model=}, {num_spec_tokens=})"
  980. @dataclass
  981. class LoRAConfig:
  982. max_lora_rank: int
  983. max_loras: int
  984. fully_sharded_loras: bool = False
  985. max_cpu_loras: Optional[int] = None
  986. lora_dtype: Optional[torch.dtype] = None
  987. lora_extra_vocab_size: int = 256
  988. # This is a constant.
  989. lora_vocab_padding_size: ClassVar[int] = 256
  990. long_lora_scaling_factors: Optional[Tuple[float]] = None
  991. def __post_init__(self):
  992. # Keep this in sync with kernels/punica/bgmv/bgmv_config.h
  993. possible_max_ranks = (8, 16, 32, 64)
  994. possible_lora_extra_vocab_size = (0, 256, 512)
  995. if self.max_lora_rank not in possible_max_ranks:
  996. raise ValueError(
  997. f"max_lora_rank ({self.max_lora_rank}) must be one of "
  998. f"{possible_max_ranks}.")
  999. if self.lora_extra_vocab_size not in possible_lora_extra_vocab_size:
  1000. raise ValueError(
  1001. f"lora_extra_vocab_size ({self.lora_extra_vocab_size}) "
  1002. f"must be one of {possible_lora_extra_vocab_size}.")
  1003. if self.max_loras < 1:
  1004. raise ValueError(f"max_loras ({self.max_loras}) must be >= 1.")
  1005. if self.max_cpu_loras is None:
  1006. self.max_cpu_loras = self.max_loras
  1007. elif self.max_cpu_loras < self.max_loras:
  1008. raise ValueError(
  1009. f"max_cpu_loras ({self.max_cpu_loras}) must be >= "
  1010. f"max_loras ({self.max_loras})")
  1011. def verify_with_model_config(self, model_config: ModelConfig):
  1012. if self.lora_dtype in (None, "auto"):
  1013. self.lora_dtype = model_config.dtype
  1014. elif isinstance(self.lora_dtype, str):
  1015. self.lora_dtype = getattr(torch, self.lora_dtype)
  1016. if model_config.quantization and model_config.quantization not in [
  1017. "awq", "gptq"
  1018. ]:
  1019. # TODO support all other quants
  1020. logger.warning(f"{model_config.quantization} quantization is not "
  1021. "tested with LoRA yet.")
  1022. def verify_with_scheduler_config(self, scheduler_config: SchedulerConfig):
  1023. if scheduler_config.max_num_batched_tokens > 65528:
  1024. raise ValueError(
  1025. "Due to limitations of the custom LoRA CUDA kernel, "
  1026. "max_num_batched_tokens must be <= 65528 when "
  1027. "LoRA is enabled.")
  1028. @dataclass
  1029. class VisionLanguageConfig:
  1030. """Configs the input data format and how models should run for
  1031. vision language models."""
  1032. class ImageInputType(enum.Enum):
  1033. """Image input type into the vision language model.
  1034. An image roughly goes through the following transformation:
  1035. Raw image --> pixel values --> image features --> image embeddings.
  1036. The difference between different image input types is where the
  1037. image encoder (pixel values --> image features) is run.
  1038. Different image input types also correspond to different tensor shapes.
  1039. For example, for Llava, PIXEL_VALUES: (1, 3, 336, 336).
  1040. IMAGE_FEATURES: (1, 576, 1024).
  1041. """
  1042. PIXEL_VALUES = enum.auto()
  1043. IMAGE_FEATURES = enum.auto()
  1044. image_input_type: ImageInputType
  1045. # The input id corresponding to image token.
  1046. image_token_id: int
  1047. # Used for running `run_prefill_max_token`.
  1048. # For models that support varying resolution, this corresponds to
  1049. # worst case scenario (biggest supported resolution).
  1050. image_input_shape: tuple
  1051. image_feature_size: int
  1052. # The image processor to load from HuggingFace
  1053. image_processor: Optional[str]
  1054. image_processor_revision: Optional[str]
  1055. @classmethod
  1056. def get_image_input_enum_type(
  1057. cls, value: str) -> "VisionLanguageConfig.ImageInputType":
  1058. """Get the image input type from a string."""
  1059. try:
  1060. return cls.ImageInputType[value.upper()]
  1061. except KeyError as e:
  1062. raise ValueError(f"{value} is not a valid choice. "
  1063. f"Expecting to choose from "
  1064. f"{[x.name for x in cls.ImageInputType]}.") from e
  1065. def as_cli_args_dict(self) -> Dict[str, Any]:
  1066. """Flatten vision language config to pure args.
  1067. Compatible with what llm entrypoint expects.
  1068. """
  1069. result: Dict[str, Any] = {}
  1070. for f in fields(self):
  1071. value = getattr(self, f.name)
  1072. if isinstance(value, enum.Enum):
  1073. result[f.name] = value.name.lower()
  1074. elif isinstance(value, tuple):
  1075. result[f.name] = ",".join([str(item) for item in value])
  1076. else:
  1077. result[f.name] = value
  1078. result["disable_image_processor"] = self.image_processor is None
  1079. return result
  1080. _STR_DTYPE_TO_TORCH_DTYPE = {
  1081. "half": torch.float16,
  1082. "float16": torch.float16,
  1083. "float": torch.float32,
  1084. "float32": torch.float32,
  1085. "bfloat16": torch.bfloat16,
  1086. }
  1087. _ROCM_NOT_SUPPORTED_DTYPE = ["float", "float32"]
  1088. def _get_and_verify_dtype(
  1089. config: PretrainedConfig,
  1090. dtype: Union[str, torch.dtype],
  1091. ) -> torch.dtype:
  1092. # NOTE: getattr(config, "torch_dtype", torch.float32) is not correct
  1093. # because config.torch_dtype can be None.
  1094. config_dtype = getattr(config, "torch_dtype", None)
  1095. if config_dtype is None:
  1096. config_dtype = torch.float32
  1097. if isinstance(dtype, str):
  1098. dtype = dtype.lower()
  1099. if dtype == "auto":
  1100. if config_dtype == torch.float32:
  1101. # Following the common practice, we use float16 for float32
  1102. # models.
  1103. torch_dtype = torch.float16
  1104. else:
  1105. torch_dtype = config_dtype
  1106. else:
  1107. if dtype not in _STR_DTYPE_TO_TORCH_DTYPE:
  1108. raise ValueError(f"Unknown dtype: {dtype}")
  1109. torch_dtype = _STR_DTYPE_TO_TORCH_DTYPE[dtype]
  1110. elif isinstance(dtype, torch.dtype):
  1111. torch_dtype = dtype
  1112. else:
  1113. raise ValueError(f"Unknown dtype: {dtype}")
  1114. if is_hip() and torch_dtype == torch.float32:
  1115. rocm_supported_dtypes = [
  1116. k for k, v in _STR_DTYPE_TO_TORCH_DTYPE.items()
  1117. if (k not in _ROCM_NOT_SUPPORTED_DTYPE)
  1118. ]
  1119. raise ValueError(f"dtype '{dtype}' is not supported in ROCm. "
  1120. f"Supported dtypes are {rocm_supported_dtypes}")
  1121. # Verify the dtype.
  1122. if torch_dtype != config_dtype:
  1123. if torch_dtype == torch.float32:
  1124. # Upcasting to float32 is allowed.
  1125. pass
  1126. elif config_dtype == torch.float32:
  1127. # Downcasting from float32 to float16 or bfloat16 is allowed.
  1128. pass
  1129. else:
  1130. # Casting between float16 and bfloat16 is allowed with a warning.
  1131. logger.warning(f"Casting {config_dtype} to {torch_dtype}.")
  1132. return torch_dtype
  1133. def _get_and_verify_max_len(
  1134. hf_config: PretrainedConfig,
  1135. max_model_len: Optional[int],
  1136. disable_sliding_window: bool,
  1137. sliding_window_len: Optional[int],
  1138. ) -> int:
  1139. """Get and verify the model's maximum length."""
  1140. derived_max_model_len = float("inf")
  1141. possible_keys = [
  1142. # Cohere: needs to prioritize this over "max_position_embeddings"
  1143. "model_max_length",
  1144. # OPT
  1145. "max_position_embeddings",
  1146. # GPT-2
  1147. "n_positions",
  1148. # MPT
  1149. "max_seq_len",
  1150. # ChatGLM2
  1151. "seq_length",
  1152. # Command-R
  1153. "model_max_length",
  1154. # Others
  1155. "max_sequence_length",
  1156. "max_seq_length",
  1157. "seq_len",
  1158. ]
  1159. # Choose the smallest "max_length" from the possible keys.
  1160. max_len_key = None
  1161. for key in possible_keys:
  1162. max_len = getattr(hf_config, key, None)
  1163. if max_len is not None:
  1164. max_len_key = key if max_len < derived_max_model_len \
  1165. else max_len_key
  1166. derived_max_model_len = min(derived_max_model_len, max_len)
  1167. # If sliding window is manually disabled, max_length should be less
  1168. # than the sliding window length in the model config.
  1169. if disable_sliding_window and sliding_window_len is not None:
  1170. max_len_key = "sliding_window" \
  1171. if sliding_window_len < derived_max_model_len else max_len_key
  1172. derived_max_model_len = min(derived_max_model_len, sliding_window_len)
  1173. # If none of the keys were found in the config, use a default and
  1174. # log a warning.
  1175. if derived_max_model_len == float("inf"):
  1176. if max_model_len is not None:
  1177. # If max_model_len is specified, we use it.
  1178. return max_model_len
  1179. default_max_len = 2048
  1180. logger.warning(
  1181. "The model's config.json does not contain any of the following "
  1182. "keys to determine the original maximum length of the model: "
  1183. f"{possible_keys}. Assuming the model's maximum length is "
  1184. f"{default_max_len}.")
  1185. derived_max_model_len = default_max_len
  1186. rope_scaling = getattr(hf_config, "rope_scaling", None)
  1187. if rope_scaling is not None:
  1188. rope_type = rope_scaling.get("type", rope_scaling.get("rope_type"))
  1189. if rope_type not in {"su", "longrope", "llama3"}:
  1190. if disable_sliding_window:
  1191. # TODO: Find a model that supports rope_scaling
  1192. # with sliding window to see if this case should be allowed.
  1193. raise NotImplementedError(
  1194. "Disabling sliding window is not supported for models "
  1195. "with rope_scaling. Please raise an issue so we can "
  1196. "investigate.")
  1197. assert "factor" in rope_scaling
  1198. scaling_factor = rope_scaling["factor"]
  1199. if rope_type == "yarn":
  1200. derived_max_model_len = rope_scaling[
  1201. "original_max_position_embeddings"]
  1202. derived_max_model_len *= scaling_factor
  1203. if max_model_len is None:
  1204. max_model_len = derived_max_model_len
  1205. elif max_model_len > derived_max_model_len:
  1206. # hope this works
  1207. scaling_factor = max_model_len / derived_max_model_len
  1208. hf_config.rope_scaling = {"factor": scaling_factor, "type": "dynamic"}
  1209. logger.warning(
  1210. f"User-specified max_model_len {max_model_len} is higher than "
  1211. f"the original {derived_max_model_len}. "
  1212. "Attempting to use RoPE scaling.")
  1213. derived_max_model_len = max_model_len
  1214. return int(max_model_len)
  1215. @dataclass
  1216. class DecodingConfig:
  1217. """Dataclass which contains the decoding strategy of the engine"""
  1218. # Which guided decoding algo to use. 'outlines' / 'lm-format-enforcer'
  1219. guided_decoding_backend: str = 'outlines'
  1220. def __post_init__(self):
  1221. valid_guided_backends = ['outlines', 'lm-format-enforcer']
  1222. backend = self.guided_decoding_backend
  1223. if backend not in valid_guided_backends:
  1224. raise ValueError(f"Invalid guided_decoding_backend '{backend},"
  1225. f"must be one of {valid_guided_backends}")
  1226. @dataclass(frozen=True)
  1227. class EngineConfig:
  1228. """Dataclass which contains all engine-related configuration. This
  1229. simplifies passing around the distinct configurations in the codebase.
  1230. """
  1231. model_config: ModelConfig
  1232. cache_config: CacheConfig
  1233. parallel_config: ParallelConfig
  1234. scheduler_config: SchedulerConfig
  1235. device_config: DeviceConfig
  1236. load_config: LoadConfig
  1237. lora_config: Optional[LoRAConfig]
  1238. vision_language_config: Optional[VisionLanguageConfig]
  1239. speculative_config: Optional[SpeculativeConfig]
  1240. decoding_config: Optional[DecodingConfig]
  1241. def __post_init__(self):
  1242. """Verify configs are valid & consistent with each other.
  1243. """
  1244. self.model_config.verify_with_parallel_config(self.parallel_config)
  1245. self.cache_config.verify_with_parallel_config(self.parallel_config)
  1246. if self.lora_config:
  1247. self.lora_config.verify_with_model_config(self.model_config)
  1248. self.lora_config.verify_with_scheduler_config(
  1249. self.scheduler_config)
  1250. def to_dict(self):
  1251. """Return the configs as a dictionary, for use in **kwargs.
  1252. """
  1253. return dict(
  1254. (field.name, getattr(self, field.name)) for field in fields(self))