config.py 89 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027
  1. import enum
  2. import json
  3. import os
  4. from dataclasses import dataclass, field, fields
  5. from typing import (TYPE_CHECKING, Any, ClassVar, Dict, List, Mapping,
  6. Optional, Tuple, Type, Union)
  7. import torch
  8. from loguru import logger
  9. from transformers import PretrainedConfig
  10. import aphrodite.common.envs as envs
  11. from aphrodite.common.utils import (GiB_bytes, cuda_device_count_stateless,
  12. get_cpu_memory, is_cpu, is_hip, is_neuron,
  13. is_openvino, is_xpu, print_warning_once)
  14. from aphrodite.distributed import get_current_tp_rank_partition_size
  15. from aphrodite.modeling.models import ModelRegistry
  16. from aphrodite.platforms import current_platform
  17. from aphrodite.quantization import QUANTIZATION_METHODS
  18. from aphrodite.transformers_utils.config import (ConfigFormat, get_config,
  19. get_hf_image_processor_config,
  20. get_hf_text_config)
  21. from aphrodite.triton_utils import HAS_TRITON
  22. if TYPE_CHECKING:
  23. from ray.util.placement_group import PlacementGroup
  24. from aphrodite.executor.executor_base import ExecutorBase
  25. from aphrodite.modeling.model_loader.loader import BaseModelLoader
  26. from aphrodite.transformers_utils.tokenizer_group.base_tokenizer_group import ( # noqa: E501
  27. BaseTokenizerGroup)
  28. # If true, will load models from ModelScope instead of Hugging Face Hub.
  29. APHRODITE_USE_MODELSCOPE = envs.APHRODITE_USE_MODELSCOPE
  30. _EMBEDDING_MODEL_MAX_NUM_BATCHED_TOKENS = 32768
  31. _MULTIMODAL_MODEL_MAX_NUM_BATCHED_TOKENS = 4096
  32. _PP_SUPPORTED_MODELS = [
  33. "AquilaModel",
  34. "AquilaForCausalLM",
  35. "InternLMForCausalLM",
  36. "LlamaForCausalLM",
  37. "LLaMAForCausalLM",
  38. "MistralForCausalLM",
  39. "Phi3ForCausalLM",
  40. "MixtralForCausalLM",
  41. "NemotronForCausalLM",
  42. "Qwen2ForCausalLM",
  43. "Qwen2MoeForCausalLM",
  44. "QWenLMHeadModel",
  45. "InternLM2ForCausalLM",
  46. "InternVLChatModel",
  47. "Qwen2VLForConditionalGeneration",
  48. ]
  49. _OPTIMIZED_QUANTS = [
  50. "awq_marlin",
  51. "compressed-tensors",
  52. "compressed_tensors",
  53. "experts_int8",
  54. "fbgemm_fp8",
  55. "fp2",
  56. "fp3",
  57. "fp4",
  58. "fp5",
  59. "fp6",
  60. "fp7",
  61. "fp8",
  62. "gptq_marlin",
  63. "gptq_marlin_24",
  64. "marlin",
  65. "modelopt",
  66. "quant_llm",
  67. ]
  68. class ModelConfig:
  69. """Configuration for the model.
  70. Args:
  71. model: Name or path of the huggingface model to use.
  72. It is also used as the content for `model_name` tag in metrics
  73. output when `served_model_name` is not specified.
  74. tokenizer: Name or path of the huggingface tokenizer to use.
  75. tokenizer_mode: Tokenizer mode. "auto" will use the fast tokenizer if
  76. available, "slow" will always use the slow tokenizer, and
  77. "mistral" will always use the tokenizer from `mistral_common`.
  78. trust_remote_code: Trust remote code (e.g., from HuggingFace) when
  79. downloading the model and tokenizer.
  80. dtype: Data type for model weights and activations. The "auto" option
  81. will use FP16 precision for FP32 and FP16 models, and BF16 precision
  82. for BF16 models.
  83. seed: Random seed for reproducibility.
  84. revision: The specific model version to use. It can be a branch name,
  85. a tag name, or a commit id. If unspecified, will use the default
  86. version.
  87. code_revision: The specific revision to use for the model code on
  88. Hugging Face Hub. It can be a branch name, a tag name, or a
  89. commit id. If unspecified, will use the default version.
  90. rope_scaling: Dictionary containing the scaling configuration for the
  91. RoPE embeddings. When using this flag, don't update
  92. `max_position_embeddings` to the expected new maximum.
  93. tokenizer_revision: The specific tokenizer version to use. It can be a
  94. branch name, a tag name, or a commit id. If unspecified, will use
  95. the default version.
  96. max_model_len: Maximum length of a sequence (including prompt and
  97. output). If None, will be derived from the model.
  98. quantization: Quantization method that was used to quantize the model
  99. weights. If None, we assume the model weights are not quantized.
  100. deepspeed_fp_bits: Number of bits to use for DeepSpeed FP quantization.
  101. Supported number of bits are: 4, 6, 8, 12.
  102. quant_llm_fp_bits: Number of bits to use for QuantLLM FP quantization.
  103. Supported number of bits are: 5, 6, 7.
  104. quantization_param_path: Path to JSON file containing scaling factors.
  105. Used to load KV cache scaling factors into the model when KV cache
  106. type is FP8_E4M3 on ROCm (AMD GPU). In the future these will also
  107. be used to load activation and weight scaling factors when the
  108. model dtype is FP8_E4M3 on ROCm.
  109. enforce_eager: Whether to enforce eager execution. If True, we will
  110. disable CUDA graph and always execute the model in eager mode.
  111. If False, we will use CUDA graph and eager execution in hybrid.
  112. If None, the user did not specify, so default to False.
  113. max_context_len_to_capture: Maximum context len covered by CUDA graphs.
  114. When a sequence has context length larger than this, we fall back
  115. to eager mode (DEPRECATED. Use max_seq_len_to_capture instead).
  116. max_seq_len_to_capture: Maximum sequence len covered by CUDA graphs.
  117. When a sequence has context length larger than this, we fall back
  118. to eager mode. Additionally for encoder-decoder models, if the
  119. sequence length of the encoder input is larger than this, we fall
  120. back to the eager mode.
  121. disable_sliding_window: Whether to disable sliding window. If True,
  122. we will disable the sliding window functionality of the model.
  123. If the model does not support sliding window, this argument is
  124. ignored.
  125. skip_tokenizer_init: If true, skip initialization of tokenizer and
  126. detokenizer.
  127. served_model_name: The model name used in metrics tag `model_name`,
  128. matches the model name exposed via the APIs. If multiple model
  129. names provided, the first name will be used. If not specified,
  130. the model name will be the same as `model`.
  131. limit_mm_per_prompt: Maximum number of data instances per modality
  132. per prompt. Only applicable for multimodal models.
  133. config_format: The config format which will be loaded. Defaults to
  134. 'auto' which defaults to 'hf'.
  135. mm_processor_kwargs: Arguments to be forwarded to the model's processor
  136. for multi-modal data, e.g., image processor.
  137. override_neuron_config: Initialize non default neuron config or
  138. override default neuron config that are specific to Neuron devices,
  139. this argument will be used to configure the neuron config that
  140. can not be gathered from the Aphrodite arguments.
  141. """
  142. def __init__(
  143. self,
  144. model: str,
  145. tokenizer: str,
  146. tokenizer_mode: str,
  147. trust_remote_code: bool,
  148. dtype: Union[str, torch.dtype],
  149. seed: int,
  150. revision: Optional[str] = None,
  151. code_revision: Optional[str] = None,
  152. rope_scaling: Optional[dict] = None,
  153. rope_theta: Optional[float] = None,
  154. tokenizer_revision: Optional[str] = None,
  155. max_model_len: Optional[int] = None,
  156. spec_target_max_model_len: Optional[int] = None,
  157. quantization: Optional[str] = None,
  158. deepspeed_fp_bits: Optional[int] = None,
  159. quant_llm_fp_bits: Optional[int] = None,
  160. quant_llm_exp_bits: Optional[int] = None,
  161. quantization_param_path: Optional[str] = None,
  162. enforce_eager: Optional[bool] = None,
  163. max_context_len_to_capture: Optional[int] = None,
  164. max_seq_len_to_capture: Optional[int] = None,
  165. max_logprobs: int = 5,
  166. disable_sliding_window: bool = False,
  167. skip_tokenizer_init: bool = False,
  168. served_model_name: Optional[Union[str, List[str]]] = None,
  169. limit_mm_per_prompt: Optional[Mapping[str, int]] = None,
  170. use_async_output_proc: bool = True,
  171. config_format: ConfigFormat = ConfigFormat.AUTO,
  172. mm_processor_kwargs: Optional[Dict[str, Any]] = None,
  173. override_neuron_config: Optional[Dict[str, Any]] = None
  174. ) -> None:
  175. self.model = model
  176. self.tokenizer = tokenizer
  177. self.tokenizer_mode = tokenizer_mode
  178. self.trust_remote_code = trust_remote_code
  179. self.seed = seed
  180. self.revision = revision
  181. self.code_revision = code_revision
  182. self.rope_scaling = rope_scaling
  183. self.rope_theta = rope_theta
  184. # The tokenizer version is consistent with the model version by default.
  185. if tokenizer_revision is None:
  186. self.tokenizer_revision = revision
  187. else:
  188. self.tokenizer_revision = tokenizer_revision
  189. self.quantization = quantization
  190. self.deepspeed_fp_bits = deepspeed_fp_bits
  191. self.quant_llm_fp_bits = quant_llm_fp_bits
  192. self.quant_llm_exp_bits = quant_llm_exp_bits
  193. self.quantization_param_path = quantization_param_path
  194. self.enforce_eager = enforce_eager
  195. self.max_context_len_to_capture = max_context_len_to_capture
  196. if self.max_context_len_to_capture is not None:
  197. raise ValueError("`max_context_len_to_capture` is deprecated. "
  198. "Use `max_seq_len_to_capture` instead.")
  199. self.max_seq_len_to_capture = (max_seq_len_to_capture
  200. or max_context_len_to_capture)
  201. self.max_logprobs = max_logprobs
  202. self.disable_sliding_window = disable_sliding_window
  203. self.skip_tokenizer_init = skip_tokenizer_init
  204. self.hf_config = get_config(self.model, trust_remote_code, revision,
  205. code_revision, rope_scaling, rope_theta,
  206. config_format)
  207. self.hf_text_config = get_hf_text_config(self.hf_config)
  208. self.hf_image_processor_config = get_hf_image_processor_config(
  209. self.model, revision)
  210. self.dtype = _get_and_verify_dtype(self.hf_text_config, dtype)
  211. self.use_async_output_proc = use_async_output_proc
  212. self.mm_processor_kwargs = mm_processor_kwargs
  213. # Set enforce_eager to False if the value is unset.
  214. if self.enforce_eager is None:
  215. self.enforce_eager = False
  216. sliding_window = getattr(self.hf_text_config, "sliding_window", None)
  217. has_interleaved_attention = (sliding_window is not None) and (
  218. isinstance(sliding_window, list) or
  219. (self.hf_text_config.model_type in ["gemma2"]))
  220. if (not self.disable_sliding_window and has_interleaved_attention):
  221. sliding_window_len_min = get_min_sliding_window(
  222. self.hf_text_config.sliding_window)
  223. print_warning_once(
  224. f"{self.hf_text_config.model_type} has interleaved attention, "
  225. "which is currently not supported by vLLM. Disabling sliding "
  226. "window and capping the max length to the sliding window size "
  227. f"({sliding_window_len_min}).")
  228. self.disable_sliding_window = True
  229. self.max_model_len = _get_and_verify_max_len(
  230. hf_config=self.hf_text_config,
  231. max_model_len=max_model_len,
  232. disable_sliding_window=self.disable_sliding_window,
  233. sliding_window_len=self.get_hf_config_sliding_window(),
  234. spec_target_max_model_len=spec_target_max_model_len,
  235. rope_scaling_arg=self.rope_scaling)
  236. self.served_model_name = get_served_model_name(model,
  237. served_model_name)
  238. self.multimodal_config = self._init_multimodal_config(
  239. limit_mm_per_prompt)
  240. if not self.skip_tokenizer_init:
  241. self._verify_tokenizer_mode()
  242. self.override_neuron_config = override_neuron_config if is_neuron(
  243. ) else None
  244. self._verify_embedding_mode()
  245. self._verify_quantization()
  246. self._verify_cuda_graph()
  247. def _init_multimodal_config(
  248. self, limit_mm_per_prompt: Optional[Mapping[str, int]]
  249. ) -> Optional["MultiModalConfig"]:
  250. architectures = getattr(self.hf_config, "architectures", [])
  251. if any(
  252. ModelRegistry.is_multimodal_model(arch)
  253. for arch in architectures):
  254. return MultiModalConfig(limit_per_prompt=limit_mm_per_prompt or {})
  255. else:
  256. if limit_mm_per_prompt:
  257. raise ValueError(
  258. "limit_mm_per_prompt is only supported for multimodal "
  259. "models.")
  260. return None
  261. def _verify_tokenizer_mode(self) -> None:
  262. tokenizer_mode = self.tokenizer_mode.lower()
  263. if tokenizer_mode not in ["auto", "slow", "mistral"]:
  264. raise ValueError(
  265. f"Unknown tokenizer mode: {self.tokenizer_mode}. Must be "
  266. "either 'auto', 'slow' or 'mistral'.")
  267. self.tokenizer_mode = tokenizer_mode
  268. def _verify_embedding_mode(self) -> None:
  269. architectures = getattr(self.hf_config, "architectures", [])
  270. self.embedding_mode = any(
  271. ModelRegistry.is_embedding_model(arch) for arch in architectures)
  272. def _parse_quant_hf_config(self):
  273. quant_cfg = getattr(self.hf_config, "quantization_config", None)
  274. if quant_cfg is None:
  275. # compress-tensors uses a "compression_config" key
  276. quant_cfg = getattr(self.hf_config, "compression_config", None)
  277. return quant_cfg
  278. def _verify_quantization(self) -> None:
  279. supported_quantization = [*QUANTIZATION_METHODS]
  280. rocm_supported_quantization = [
  281. "awq", "gptq", "squeezellm", "fp8",
  282. "compressed_tensors", "compressed-tensors",
  283. "fbgemm_fp8"
  284. ]
  285. tpu_supported_quantization = ["tpu_int8"]
  286. neuron_supported_quantization = ["neuron_quant"]
  287. if self.quantization is not None:
  288. self.quantization = self.quantization.lower()
  289. # Parse quantization method from the HF model config, if available.
  290. quant_cfg = self._parse_quant_hf_config()
  291. if quant_cfg is not None:
  292. quant_method = quant_cfg.get("quant_method", "").lower()
  293. # Detect which checkpoint is it
  294. for _, method in QUANTIZATION_METHODS.items():
  295. quantization_override = method.override_quantization_method(
  296. quant_cfg, self.quantization)
  297. if quantization_override:
  298. if quantization_override == "awq_marlin":
  299. quant_method = quant_method
  300. logger.warning(
  301. "awq_marlin kernels are temporarily disabled, "
  302. "they will be re-enabled with a future release. "
  303. "Falling back to AWQ kernels.")
  304. else:
  305. quant_method = quantization_override
  306. self.quantization = quantization_override
  307. break
  308. # Verify quantization configurations.
  309. if self.quantization is None:
  310. self.quantization = quant_method
  311. elif self.quantization != quant_method:
  312. raise ValueError(
  313. "Quantization method specified in the model config "
  314. f"({quant_method}) does not match the quantization "
  315. f"method specified in the `quantization` argument "
  316. f"({self.quantization}).")
  317. if self.quantization == "deepspeedfp":
  318. gs = 32 if self.deepspeed_fp_bits == 4 else 128
  319. self.hf_config.quantization_config = {
  320. "bits": self.deepspeed_fp_bits,
  321. "group_size": int(os.environ.get("DEEPSPEED_GROUP_SIZE", gs)),
  322. "quant_method": "deepspeedfp"
  323. }
  324. VALID_QUANT_LLM_FP_BITS = [2, 3, 4, 5, 6, 7]
  325. VALID_QUANT_LLM_EXPONENTS = [1, 2, 3, 4, 5]
  326. # The formula is mantissa_bits = fp_bits - exp_bits - 1
  327. # The default exp_bits for each fp_bits are as follows:
  328. DEFAULT_EXP_BITS = {
  329. 2: 1,
  330. 3: 2,
  331. 4: 2,
  332. 5: 2,
  333. 6: 2,
  334. 7: 3,
  335. }
  336. if self.quantization == "quant_llm":
  337. if self.quant_llm_fp_bits is None:
  338. raise ValueError(
  339. "quant_llm_fp_bits must be specified when using "
  340. "quant_llm quantization."
  341. )
  342. if self.quant_llm_fp_bits not in VALID_QUANT_LLM_FP_BITS:
  343. raise ValueError(
  344. f"Invalid quant_llm_fp_bits: {self.quant_llm_fp_bits}. "
  345. f"Must be one of {VALID_QUANT_LLM_FP_BITS}."
  346. )
  347. if self.quant_llm_exp_bits is None:
  348. self.quant_llm_exp_bits = DEFAULT_EXP_BITS[
  349. self.quant_llm_fp_bits]
  350. else:
  351. if self.quant_llm_exp_bits not in VALID_QUANT_LLM_EXPONENTS:
  352. raise ValueError(
  353. f"Invalid exponent bits: {self.quant_llm_exp_bits}. "
  354. f"Must be one of {VALID_QUANT_LLM_EXPONENTS}."
  355. )
  356. self.hf_config.quantization_config = {
  357. "bits": self.quant_llm_fp_bits,
  358. "exp_bits": self.quant_llm_exp_bits,
  359. "quant_method": "quant_llm"
  360. }
  361. online_quant_methods = ["fp2", "fp3", "fp4", "fp5", "fp6", "fp7"]
  362. if self.quantization is not None and self.quantization in \
  363. online_quant_methods:
  364. fp_bits = int(self.quantization[2])
  365. if fp_bits not in VALID_QUANT_LLM_FP_BITS:
  366. raise ValueError(
  367. f"Invalid quant_llm_fp_bits: {fp_bits}. "
  368. f"Must be one of {VALID_QUANT_LLM_FP_BITS}."
  369. )
  370. if fp_bits in [2, 3]:
  371. logger.warning("FP2 and FP3 quantization methods lead to "
  372. "significant accuracy loss. Use them with "
  373. "caution. Model may be incoherent.")
  374. exp_bits = DEFAULT_EXP_BITS[fp_bits]
  375. self.hf_config.quantization_config = {
  376. "bits": fp_bits,
  377. "exp_bits": exp_bits,
  378. "quant_method": self.quantization
  379. }
  380. self.dtype = torch.float16
  381. self.enforce_eager = True
  382. if self.quantization is not None:
  383. if self.quantization not in supported_quantization:
  384. raise ValueError(
  385. f"Unknown quantization method: {self.quantization}. Must "
  386. f"be one of {supported_quantization}.")
  387. if is_hip(
  388. ) and self.quantization not in rocm_supported_quantization:
  389. raise ValueError(
  390. f"{self.quantization} quantization is currently not "
  391. "supported in ROCm.")
  392. if current_platform.is_tpu(
  393. ) and self.quantization not in tpu_supported_quantization:
  394. raise ValueError(
  395. f"{self.quantization} quantization is currently not "
  396. f"supported in TPU Backend.")
  397. if self.quantization not in _OPTIMIZED_QUANTS:
  398. logger.warning(
  399. f"{self.quantization} quantization is not fully "
  400. "optimized yet. The speed can be slower than "
  401. "non-quantized models.")
  402. if self.quantization == "deepspeedfp" and self.deepspeed_fp_bits \
  403. is None:
  404. raise ValueError(
  405. "deepspeed_fp_bits must be specified when using "
  406. "deepspeedfp quantization.")
  407. if (self.quantization == "awq" and is_hip()
  408. and not envs.APHRODITE_USE_TRITON_AWQ):
  409. logger.warning(
  410. "Using AWQ quantization with ROCm, but "
  411. "APHRODITE_USE_TRITON_AWQ is not set, enabling "
  412. "APHRODITE_USE_TRITON_AWQ.")
  413. envs.APHRODITE_USE_TRITON_AWQ = True
  414. if is_neuron(
  415. ) and self.quantization not in neuron_supported_quantization:
  416. raise ValueError(
  417. f"{self.quantization} quantization is currently not "
  418. f"supported in Neuron Backend.")
  419. def _verify_cuda_graph(self) -> None:
  420. if self.max_seq_len_to_capture is None:
  421. self.max_seq_len_to_capture = self.max_model_len
  422. self.max_seq_len_to_capture = min(self.max_seq_len_to_capture,
  423. self.max_model_len)
  424. def verify_async_output_proc(self, parallel_config, speculative_config,
  425. device_config) -> None:
  426. if not self.use_async_output_proc:
  427. # Nothing to check
  428. return
  429. if parallel_config.pipeline_parallel_size > 1:
  430. logger.warning("Async output processing can not be enabled "
  431. "with pipeline parallel")
  432. self.use_async_output_proc = False
  433. return
  434. if device_config.device_type not in ("cuda", "tpu"):
  435. logger.warning(
  436. "Async output processing is only supported for CUDA or TPU. "
  437. "Disabling it for other platforms.")
  438. self.use_async_output_proc = False
  439. return
  440. if envs.APHRODITE_USE_RAY_SPMD_WORKER:
  441. logger.warning(
  442. "Async output processing can not be enabled with ray spmd")
  443. self.use_async_output_proc = False
  444. return
  445. if device_config.device_type == "cuda" and self.enforce_eager:
  446. logger.warning(
  447. "To see benefits of async output processing, enable CUDA "
  448. "graph. Since, enforce-eager is enabled, async output "
  449. "processor cannot be used")
  450. self.use_async_output_proc = not self.enforce_eager
  451. return
  452. # Async postprocessor is not necessary with embedding mode
  453. # since there is no token generation
  454. if self.embedding_mode:
  455. self.use_async_output_proc = False
  456. if speculative_config:
  457. logger.warning("Async output processing is not supported with"
  458. " speculative decoding currently.")
  459. self.use_async_output_proc = False
  460. def verify_with_parallel_config(
  461. self,
  462. parallel_config: "ParallelConfig",
  463. ) -> None:
  464. total_num_attention_heads = getattr(self.hf_text_config,
  465. "num_attention_heads", 0)
  466. tensor_parallel_size = parallel_config.tensor_parallel_size
  467. if (total_num_attention_heads % tensor_parallel_size != 0
  468. and self.quantization is not None):
  469. raise ValueError(
  470. f"Total number of attention heads "
  471. f"({total_num_attention_heads})"
  472. " must be divisible by tensor parallel size "
  473. f"({tensor_parallel_size}) when quantization is used.")
  474. pipeline_parallel_size = parallel_config.pipeline_parallel_size
  475. architectures = getattr(self.hf_config, "architectures", [])
  476. if not all(arch in _PP_SUPPORTED_MODELS
  477. for arch in architectures) and pipeline_parallel_size > 1:
  478. raise NotImplementedError(
  479. "Pipeline parallelism is only supported for the following "
  480. f" architectures: {_PP_SUPPORTED_MODELS}. You are using "
  481. f"the following architecture: {architectures}.")
  482. if self.quantization == "bitsandbytes" and self.enforce_eager is False:
  483. logger.warning("CUDA graph is not supported on BitAndBytes yet, "
  484. "fallback to the eager mode.")
  485. self.enforce_eager = True
  486. if pipeline_parallel_size > 1 and self.use_async_output_proc:
  487. logger.warning("Async output processor is not supported with "
  488. "pipeline parallelism currently. Disabling it.")
  489. self.use_async_output_proc = False
  490. def is_attention_free(self) -> bool:
  491. """Returns True if the model has no attention, i.e. the model has no
  492. state that grows with the size of the context.
  493. """
  494. # Return true if the model is mamba.
  495. # This check should be augmented with more models in the future,
  496. # and made more robust if possible.
  497. if hasattr(self.hf_text_config,
  498. "model_type") and self.hf_text_config.model_type == 'mamba':
  499. return True
  500. return False
  501. def get_hf_config_sliding_window(
  502. self) -> Union[Optional[int], List[Optional[int]]]:
  503. """Get the sliding window size, or None if disabled.
  504. """
  505. # Some models, like Qwen2 and Qwen1.5, use `use_sliding_window` in
  506. # addition to sliding window size. We check if that field is present
  507. # and if it's False, return None.
  508. if (hasattr(self.hf_text_config, "use_sliding_window")
  509. and not self.hf_text_config.use_sliding_window):
  510. return None
  511. return getattr(self.hf_text_config, "sliding_window", None)
  512. def get_sliding_window(self) -> Optional[Union[int, List[Optional[int]]]]:
  513. """Get the sliding window size, or None if disabled.
  514. """
  515. # If user disables sliding window, return None.
  516. if self.disable_sliding_window:
  517. return None
  518. # Otherwise get the value from the hf config.
  519. return self.get_hf_config_sliding_window()
  520. def get_vocab_size(self) -> int:
  521. return self.hf_text_config.vocab_size
  522. def get_hidden_size(self) -> int:
  523. return self.hf_text_config.hidden_size
  524. def get_head_size(self) -> int:
  525. # TODO remove hard code
  526. spec_model_types = ["medusa", "mlp_speculator"]
  527. if hasattr(self.hf_text_config, "model_type"
  528. ) and self.hf_text_config.model_type == 'deepseek_v2':
  529. # FlashAttention supports only head_size 32, 64, 128, 256,
  530. # we need to pad head_size 192 to 256
  531. return 256
  532. if self.is_attention_free() or \
  533. self.hf_text_config.model_type in spec_model_types:
  534. return 0
  535. if hasattr(self.hf_text_config, "head_dim"):
  536. return self.hf_text_config.head_dim
  537. # FIXME: This may not be true for all models.
  538. return (self.hf_text_config.hidden_size //
  539. self.hf_text_config.num_attention_heads)
  540. def get_total_num_kv_heads(self) -> int:
  541. """Returns the total number of KV heads."""
  542. # For GPTBigCode & Falcon:
  543. # NOTE: for falcon, when new_decoder_architecture is True, the
  544. # multi_query flag is ignored and we use n_head_kv for the number of
  545. # KV heads.
  546. falcon_model_types = ["falcon", "RefinedWeb", "RefinedWebModel"]
  547. new_decoder_arch_falcon = (
  548. self.hf_config.model_type in falcon_model_types
  549. and getattr(self.hf_config, "new_decoder_architecture", False))
  550. if not new_decoder_arch_falcon and getattr(self.hf_text_config,
  551. "multi_query", False):
  552. # Multi-query attention, only one KV head.
  553. # Currently, tensor parallelism is not supported in this case.
  554. return 1
  555. # For DBRX and MPT
  556. if self.hf_config.model_type == "mpt":
  557. if "kv_n_heads" in self.hf_config.attn_config:
  558. return self.hf_config.attn_config["kv_n_heads"]
  559. return self.hf_config.num_attention_heads
  560. if self.hf_config.model_type == "dbrx":
  561. return getattr(self.hf_config.attn_config, "kv_n_heads",
  562. self.hf_config.num_attention_heads)
  563. if self.is_attention_free():
  564. return 0
  565. attributes = [
  566. # For Falcon:
  567. "n_head_kv",
  568. "num_kv_heads",
  569. # For LLaMA-2:
  570. "num_key_value_heads",
  571. # For ChatGLM:
  572. "multi_query_group_num",
  573. ]
  574. for attr in attributes:
  575. num_kv_heads = getattr(self.hf_text_config, attr, None)
  576. if num_kv_heads is not None:
  577. return num_kv_heads
  578. # For non-grouped-query attention models, the number of KV heads is
  579. # equal to the number of attention heads.
  580. return self.hf_text_config.num_attention_heads
  581. def get_num_kv_heads(self,
  582. parallel_config: "ParallelConfig",
  583. tp_rank: int = 0) -> int:
  584. """Returns the number of KV heads per GPU."""
  585. total_num_kv_heads = self.get_total_num_kv_heads()
  586. # If tensor parallelism is used, we divide the number of KV heads by
  587. # the tensor parallel size. We will replicate the KV heads in the
  588. # case where the number of KV heads is smaller than the tensor
  589. # parallel size so each GPU has at least one KV head.
  590. result = get_current_tp_rank_partition_size(
  591. total_num_kv_heads, tp_rank, parallel_config.tensor_parallel_size)
  592. return max(1, result)
  593. def get_num_attention_heads(self,
  594. parallel_config: "ParallelConfig",
  595. tp_rank: int = 0) -> int:
  596. if getattr(self.hf_text_config, "num_attention_heads", None) is None:
  597. return 0
  598. num_total_kv_heads = self.get_total_num_kv_heads()
  599. num_kv_heads = self.get_num_kv_heads(parallel_config, tp_rank)
  600. num_total_attention_heads = self.hf_text_config.num_attention_heads
  601. num_heads_per_kv_head = num_total_attention_heads // num_total_kv_heads
  602. # For GQA attention we make sure the whole attention head group is
  603. # together on the same GPU.
  604. return num_kv_heads * num_heads_per_kv_head
  605. def get_num_layers(self, parallel_config: "ParallelConfig") -> int:
  606. from aphrodite.distributed.utils import get_pp_indices
  607. total_num_hidden_layers = getattr(self.hf_text_config,
  608. "num_hidden_layers", 0)
  609. pp_rank = parallel_config.rank // parallel_config.tensor_parallel_size
  610. pp_size = parallel_config.pipeline_parallel_size
  611. start, end = get_pp_indices(total_num_hidden_layers, pp_rank, pp_size)
  612. return end - start
  613. def contains_seqlen_agnostic_layers(
  614. self, parallel_config: "ParallelConfig") -> bool:
  615. """True for Mamba/SSM models (Jamba)"""
  616. return self._get_num_seqlen_agnostic_layers(parallel_config) > 0
  617. def get_layers_block_type(self,
  618. parallel_config: "ParallelConfig") -> List[str]:
  619. num_layers = self.get_num_layers(parallel_config)
  620. if self.is_attention_free():
  621. assert (self.hf_config.model_type == "mamba")
  622. return ["mamba"] * num_layers
  623. # Transformers supports layers_block_type @property
  624. return getattr(self.hf_config, "layers_block_type",
  625. ["attention"] * num_layers)
  626. def get_num_attention_layers(self,
  627. parallel_config: "ParallelConfig") -> int:
  628. return len([
  629. t for t in self.get_layers_block_type(parallel_config)
  630. if t == "attention"
  631. ])
  632. def _get_num_seqlen_agnostic_layers(
  633. self, parallel_config: "ParallelConfig") -> int:
  634. return len([
  635. t for t in self.get_layers_block_type(parallel_config)
  636. if t != "attention"
  637. ])
  638. def get_multimodal_config(self) -> "MultiModalConfig":
  639. """
  640. Get the multimodal configuration of the model.
  641. Raises:
  642. ValueError: If the model is not multimodal.
  643. """
  644. if self.multimodal_config is None:
  645. raise ValueError("The model is not multimodal.")
  646. return self.multimodal_config
  647. @property
  648. def is_encoder_decoder_model(self) -> bool:
  649. """Extract the HF encoder/decoder model flag."""
  650. return getattr(self.hf_config, "is_encoder_decoder", False)
  651. @property
  652. def is_embedding_model(self) -> bool:
  653. """Extract the embedding model flag."""
  654. return self.embedding_mode
  655. @property
  656. def is_multimodal_model(self) -> bool:
  657. return self.multimodal_config is not None
  658. class CacheConfig:
  659. """Configuration for the KV cache.
  660. Args:
  661. block_size: Size of a cache block in number of tokens.
  662. gpu_memory_utilization: Fraction of GPU memory to use for the
  663. Aphrodite execution.
  664. swap_space: Size of the CPU swap space per GPU (in GiB).
  665. cache_dtype: Data type for kv cache storage.
  666. num_gpu_blocks_override: Number of GPU blocks to use. This overrides the
  667. profiled num_gpu_blocks if specified. Does nothing if None.
  668. """
  669. def __init__(
  670. self,
  671. block_size: int,
  672. gpu_memory_utilization: float,
  673. swap_space: float,
  674. cache_dtype: str,
  675. is_attention_free: bool = False,
  676. num_gpu_blocks_override: Optional[int] = None,
  677. sliding_window: Optional[int] = None,
  678. enable_prefix_caching: bool = False,
  679. cpu_offload_gb: float = 0.0,
  680. ) -> None:
  681. self.block_size = block_size
  682. self.gpu_memory_utilization = gpu_memory_utilization
  683. self.swap_space_bytes = swap_space * GiB_bytes
  684. self.num_gpu_blocks_override = num_gpu_blocks_override
  685. self.cache_dtype = cache_dtype
  686. self.is_attention_free = is_attention_free
  687. self.sliding_window = sliding_window
  688. self.enable_prefix_caching = enable_prefix_caching
  689. self.cpu_offload_gb = cpu_offload_gb
  690. self._verify_args()
  691. self._verify_cache_dtype()
  692. self._verify_prefix_caching()
  693. # Will be set after profiling.
  694. self.num_gpu_blocks = None
  695. self.num_cpu_blocks = None
  696. def metrics_info(self):
  697. # convert cache_config to dict(key: str, value: str) for prometheus
  698. # metrics info
  699. return {key: str(value) for key, value in self.__dict__.items()}
  700. def _verify_args(self) -> None:
  701. if self.gpu_memory_utilization > 1.0:
  702. raise ValueError(
  703. "GPU memory utilization must be less than 1.0. Got "
  704. f"{self.gpu_memory_utilization}.")
  705. def _verify_cache_dtype(self) -> None:
  706. if self.cache_dtype == "auto":
  707. pass
  708. elif self.cache_dtype in ("fp8", "fp8_e4m3", "fp8_e5m2"):
  709. logger.info(
  710. "Using fp8 data type to store kv cache. It reduces the GPU "
  711. "memory footprint and boosts the performance. "
  712. "Meanwhile, it may cause accuracy drop without a proper "
  713. "scaling factor")
  714. else:
  715. raise ValueError(f"Unknown kv cache dtype: {self.cache_dtype}")
  716. def _verify_prefix_caching(self) -> None:
  717. if not self.enable_prefix_caching:
  718. return
  719. if self.sliding_window is not None:
  720. raise NotImplementedError(
  721. "Prefix caching is not supported with sliding window. "
  722. "Run with --disable-sliding-window to use prefix caching.")
  723. if self.cache_dtype == "fp8":
  724. capability = current_platform.get_device_capability()
  725. capability = capability[0] * 10 + capability[1]
  726. if capability < 89:
  727. raise NotImplementedError(
  728. "FP8 KV cache with prefix caching is only supported on "
  729. "GPUs with compute capability 8.9 or higher (e.g., "
  730. "4090, H100). Your GPU has compute capability "
  731. f"{capability}")
  732. if not HAS_TRITON and self.enable_prefix_caching:
  733. raise ValueError("Triton is not installed, "
  734. "prefix caching will not work.")
  735. def verify_with_parallel_config(
  736. self,
  737. parallel_config: "ParallelConfig",
  738. ) -> None:
  739. total_cpu_memory = get_cpu_memory()
  740. # FIXME: Here, it is assumed that the GPUs in a tensor parallel
  741. # group are in the same node. However, the GPUs may span multiple nodes.
  742. num_gpus_per_node = parallel_config.tensor_parallel_size
  743. cpu_memory_usage = self.swap_space_bytes * num_gpus_per_node
  744. msg = (f"{cpu_memory_usage / GiB_bytes:.2f} GiB out of the "
  745. f"{total_cpu_memory / GiB_bytes:.2f} GiB total CPU memory "
  746. "is allocated for the swap space.")
  747. if cpu_memory_usage > 0.7 * total_cpu_memory:
  748. raise ValueError("Too large swap space. " + msg)
  749. elif cpu_memory_usage > 0.4 * total_cpu_memory:
  750. logger.warning("Possibly too large swap space. " + msg)
  751. @dataclass
  752. class TokenizerPoolConfig:
  753. """Configuration for the tokenizer pool.
  754. Args:
  755. pool_size: Number of tokenizer workers in the pool.
  756. pool_type: Type of the pool.
  757. extra_config: Additional config for the pool.
  758. The way the config will be used depends on the
  759. pool type.
  760. """
  761. pool_size: int
  762. pool_type: Union[str, Type["BaseTokenizerGroup"]]
  763. extra_config: dict
  764. def __post_init__(self):
  765. if self.pool_type not in ("ray", ) and not isinstance(
  766. self.pool_type, type):
  767. raise ValueError(f"Unknown pool type: {self.pool_type}")
  768. if not isinstance(self.extra_config, dict):
  769. raise ValueError("extra_config must be a dictionary.")
  770. @classmethod
  771. def create_config(
  772. cls, tokenizer_pool_size: int, tokenizer_pool_type: str,
  773. tokenizer_pool_extra_config: Optional[Union[str, dict]]
  774. ) -> Optional["TokenizerPoolConfig"]:
  775. """Create a TokenizerPoolConfig from the given parameters.
  776. If tokenizer_pool_size is 0, return None.
  777. Args:
  778. tokenizer_pool_size: Number of tokenizer workers in the pool.
  779. tokenizer_pool_type: Type of the pool.
  780. tokenizer_pool_extra_config: Additional config for the pool.
  781. The way the config will be used depends on the
  782. pool type. This can be a JSON string (will be parsed).
  783. """
  784. if tokenizer_pool_size:
  785. if isinstance(tokenizer_pool_extra_config, str):
  786. tokenizer_pool_extra_config_parsed = json.loads(
  787. tokenizer_pool_extra_config)
  788. else:
  789. tokenizer_pool_extra_config_parsed = (
  790. tokenizer_pool_extra_config or {})
  791. tokenizer_pool_config = cls(tokenizer_pool_size,
  792. tokenizer_pool_type,
  793. tokenizer_pool_extra_config_parsed)
  794. else:
  795. tokenizer_pool_config = None
  796. return tokenizer_pool_config
  797. class LoadFormat(str, enum.Enum):
  798. AUTO = "auto"
  799. PT = "pt"
  800. SAFETENSORS = "safetensors"
  801. NPCACHE = "npcache"
  802. DUMMY = "dummy"
  803. TENSORIZER = "tensorizer"
  804. SHARDED_STATE = "sharded_state"
  805. GGUF = "gguf"
  806. BITSANDBYTES = "bitsandbytes"
  807. MISTRAL = "mistral"
  808. @dataclass
  809. class LoadConfig:
  810. """
  811. download_dir: Directory to download and load the weights, default to the
  812. default cache directory of huggingface.
  813. load_format: The format of the model weights to load:
  814. "auto" will try to load the weights in the safetensors format and
  815. fall back to the pytorch bin format if safetensors format is
  816. not available.
  817. "pt" will load the weights in the pytorch bin format.
  818. "safetensors" will load the weights in the safetensors format.
  819. "npcache" will load the weights in pytorch format and store
  820. a numpy cache to speed up the loading.
  821. "dummy" will initialize the weights with random values, which is
  822. mainly for profiling.
  823. "tensorizer" will use CoreWeave's tensorizer library for
  824. fast weight loading.
  825. ignore_patterns: The list of patterns to ignore when loading the model.
  826. Default to "original/**/*" to avoid repeated loading of llama's
  827. checkpoints.
  828. """
  829. load_format: Union[str, LoadFormat, "BaseModelLoader"] = LoadFormat.AUTO
  830. download_dir: Optional[str] = None
  831. model_loader_extra_config: Optional[Union[str, dict]] = field(
  832. default_factory=dict)
  833. ignore_patterns: Optional[Union[List[str], str]] = None
  834. def __post_init__(self):
  835. model_loader_extra_config = self.model_loader_extra_config or {}
  836. if isinstance(model_loader_extra_config, str):
  837. self.model_loader_extra_config = json.loads(
  838. model_loader_extra_config)
  839. self._verify_load_format()
  840. if self.ignore_patterns is not None and len(self.ignore_patterns) > 0:
  841. logger.info(
  842. "Ignoring the following patterns when downloading weights: "
  843. f"{self.ignore_patterns}")
  844. else:
  845. self.ignore_patterns = ["original/**/*"]
  846. def _verify_load_format(self) -> None:
  847. if not isinstance(self.load_format, str):
  848. return
  849. load_format = self.load_format.lower()
  850. self.load_format = LoadFormat(load_format)
  851. rocm_not_supported_load_format: List[str] = []
  852. if is_hip() and load_format in rocm_not_supported_load_format:
  853. rocm_supported_load_format = [
  854. f for f in LoadFormat.__members__
  855. if (f not in rocm_not_supported_load_format)
  856. ]
  857. raise ValueError(
  858. f"load format '{load_format}' is not supported in ROCm. "
  859. f"Supported load formats are "
  860. f"{rocm_supported_load_format}")
  861. class ParallelConfig:
  862. """Configuration for the distributed execution.
  863. Args:
  864. pipeline_parallel_size: Number of pipeline parallel groups.
  865. tensor_parallel_size: Number of tensor parallel groups.
  866. worker_use_ray: Deprecated, use distributed_executor_backend instead.
  867. max_parallel_loading_workers: Maximum number of multiple batches
  868. when load model sequentially. To avoid RAM OOM when using tensor
  869. parallel and large models.
  870. disable_custom_all_reduce: Disable the custom all-reduce kernel and
  871. fall back to NCCL.
  872. tokenizer_pool_config: Config for the tokenizer pool.
  873. If None, will use synchronous tokenization.
  874. ray_workers_use_nsight: Whether to profile Ray workers with nsight, see
  875. https://docs.ray.io/en/latest/ray-observability/user-guides/profiling.html#profiling-nsight-profiler.
  876. placement_group: ray distributed model workers placement group.
  877. distributed_executor_backend: Backend to use for distributed model
  878. workers, either "ray" or "mp" (multiprocessing). If either
  879. pipeline_parallel_size or tensor_parallel_size is greater than 1,
  880. will default to "ray" if Ray is installed or "mp" otherwise.
  881. """
  882. def __init__(
  883. self,
  884. pipeline_parallel_size: int,
  885. tensor_parallel_size: int,
  886. worker_use_ray: Optional[bool] = None,
  887. max_parallel_loading_workers: Optional[int] = None,
  888. disable_custom_all_reduce: bool = False,
  889. tokenizer_pool_config: Optional[TokenizerPoolConfig] = None,
  890. ray_workers_use_nsight: bool = False,
  891. placement_group: Optional["PlacementGroup"] = None,
  892. distributed_executor_backend: Optional[Union[
  893. str, Type["ExecutorBase"]]] = None,
  894. ) -> None:
  895. self.pipeline_parallel_size = pipeline_parallel_size
  896. self.tensor_parallel_size = tensor_parallel_size
  897. self.distributed_executor_backend = distributed_executor_backend
  898. self.max_parallel_loading_workers = max_parallel_loading_workers
  899. self.disable_custom_all_reduce = disable_custom_all_reduce
  900. self.tokenizer_pool_config = tokenizer_pool_config
  901. self.ray_workers_use_nsight = ray_workers_use_nsight
  902. self.placement_group = placement_group
  903. self.world_size = pipeline_parallel_size * self.tensor_parallel_size
  904. if worker_use_ray:
  905. if self.distributed_executor_backend is None:
  906. self.distributed_executor_backend = "ray"
  907. elif not self.use_ray:
  908. raise ValueError(f"worker-use-ray can't be used with "
  909. f"distributed executor backend "
  910. f"'{self.distributed_executor_backend}'.")
  911. if current_platform.is_tpu() and self.world_size > 1:
  912. if self.distributed_executor_backend is None:
  913. self.distributed_executor_backend = "ray"
  914. if self.distributed_executor_backend != "ray":
  915. raise ValueError(
  916. "TPU backend only supports Ray for distributed inference.")
  917. if self.distributed_executor_backend is None and self.world_size > 1:
  918. # We use multiprocessing by default if world_size fits on the
  919. # current node and we aren't in a ray placement group.
  920. from aphrodite.executor import ray_utils
  921. backend = "mp"
  922. ray_found = ray_utils.ray_is_available()
  923. if not is_cpu() and cuda_device_count_stateless() < self.world_size:
  924. if not ray_found:
  925. raise ValueError("Unable to load Ray which is "
  926. "required for multi-node inference, "
  927. "please install Ray with `pip install "
  928. "ray`.") from ray_utils.ray_import_err
  929. backend = "ray"
  930. elif ray_found:
  931. if self.placement_group:
  932. backend = "ray"
  933. else:
  934. from ray import is_initialized as ray_is_initialized
  935. if ray_is_initialized():
  936. from ray.util import get_current_placement_group
  937. if get_current_placement_group():
  938. backend = "ray"
  939. self.distributed_executor_backend = backend
  940. logger.info(
  941. f"Defaulting to use {backend} for distributed inference.")
  942. self._verify_args()
  943. self.rank = 0
  944. @property
  945. def use_ray(self) -> bool:
  946. return self.distributed_executor_backend == "ray" or (
  947. isinstance(self.distributed_executor_backend, type)
  948. and self.distributed_executor_backend.uses_ray)
  949. def _verify_args(self) -> None:
  950. # Lazy import to avoid circular import
  951. from aphrodite.executor.executor_base import ExecutorBase
  952. if self.distributed_executor_backend not in (
  953. "ray", "mp", None) and not (isinstance(
  954. self.distributed_executor_backend, type) and issubclass(
  955. self.distributed_executor_backend, ExecutorBase)):
  956. raise ValueError(
  957. "Unrecognized distributed executor backend "
  958. f"{self.distributed_executor_backend}. Supported "
  959. "values are 'ray', 'mp' or custom ExecutorBase subclass.")
  960. if self.use_ray:
  961. from aphrodite.executor import ray_utils
  962. ray_utils.assert_ray_available()
  963. if is_hip():
  964. self.disable_custom_all_reduce = True
  965. logger.info(
  966. "Disabled the custom all-reduce kernel because it is not "
  967. "supported on AMD GPUs.")
  968. if self.ray_workers_use_nsight and not self.use_ray:
  969. raise ValueError("Unable to use nsight profiling unless workers "
  970. "run with Ray.")
  971. class SchedulerConfig:
  972. """Scheduler configuration.
  973. Args:
  974. max_num_batched_tokens: Maximum number of tokens to be processed in
  975. a single iteration.
  976. max_num_seqs: Maximum number of sequences to be processed in a single
  977. iteration.
  978. max_model_len: Maximum length of a sequence (including prompt
  979. and generated text).
  980. is_attention_free: True if the running model does not have state that
  981. grows as the context size increases.
  982. use_v2_block_manager: Whether to use the BlockSpaceManagerV2 or not.
  983. num_lookahead_slots: The number of slots to allocate per sequence per
  984. step, beyond the known token ids. This is used in speculative
  985. decoding to store KV activations of tokens which may or may not be
  986. accepted.
  987. delay_factor: Apply a delay (of delay factor multiplied by previous
  988. prompt latency) before scheduling next prompt.
  989. enable_chunked_prefill: If True, prefill requests can be chunked based
  990. on the remaining max_num_batched_tokens.
  991. embedding_mode: Whether the running model is for embedding.
  992. preemption_mode: Whether to perform preemption by swapping or
  993. recomputation. If not specified, we determine the mode as follows:
  994. We use recomputation by default since it incurs lower overhead than
  995. swapping. However, when the sequence group has multiple sequences
  996. (e.g., beam search), recomputation is not currently supported. In
  997. such a case, we use swapping instead.
  998. send_delta_data: Private API. If used, scheduler sends delta data to
  999. workers instead of an entire data. It should be enabled only
  1000. when SPMD worker architecture is enabled. I.e.,
  1001. APHRODITE_USE_RAY_SPMD_WORKER=1
  1002. single_user_mode: If True, we only allocate blocks for one sequence
  1003. and use the maximum sequence length as the number of tokens.
  1004. """
  1005. def __init__(self,
  1006. max_num_batched_tokens: Optional[int],
  1007. max_num_seqs: int,
  1008. max_model_len: int,
  1009. cache_config: Optional["CacheConfig"] = None,
  1010. is_attention_free: bool = False,
  1011. use_v2_block_manager: bool = False,
  1012. num_lookahead_slots: int = 0,
  1013. delay_factor: float = 0.0,
  1014. enable_chunked_prefill: bool = False,
  1015. embedding_mode: bool = False,
  1016. is_multimodal_model: bool = False,
  1017. preemption_mode: Optional[str] = None,
  1018. num_scheduler_steps: int = 1,
  1019. multi_step_stream_outputs: bool = False,
  1020. send_delta_data: bool = False,
  1021. single_user_mode: bool = False) -> None:
  1022. if max_num_batched_tokens is None:
  1023. if enable_chunked_prefill:
  1024. # It is the values that have the best balance between ITL
  1025. # and TTFT on A100. Note it is not optimized for throughput.
  1026. max_num_batched_tokens = 512
  1027. else:
  1028. # If max_model_len is too short, use 2048 as the default value
  1029. # for higher throughput.
  1030. max_num_batched_tokens = max(max_model_len, 2048)
  1031. if embedding_mode:
  1032. # For embedding, choose specific value for higher throughput
  1033. max_num_batched_tokens = max(
  1034. max_num_batched_tokens,
  1035. _EMBEDDING_MODEL_MAX_NUM_BATCHED_TOKENS,
  1036. )
  1037. if is_multimodal_model:
  1038. # The value needs to be at least the number of multimodal tokens
  1039. max_num_batched_tokens = max(
  1040. max_num_batched_tokens,
  1041. _MULTIMODAL_MODEL_MAX_NUM_BATCHED_TOKENS,
  1042. )
  1043. self.max_num_batched_tokens = max_num_batched_tokens
  1044. if enable_chunked_prefill:
  1045. logger.info(
  1046. "Chunked prefill is enabled with "
  1047. f"max_num_batched_tokens={self.max_num_batched_tokens}.")
  1048. if single_user_mode:
  1049. max_num_seqs = 1
  1050. if cache_config and cache_config.enable_prefix_caching:
  1051. if not envs.APHRODITE_FORCE_SINGLE_USER_PREFIX_CACHE:
  1052. logger.warning(
  1053. "Prefix caching is not supported in single user mode, "
  1054. "this is not recommended and may lead to memory "
  1055. "issues. Set APHRODITE_FORCE_SINGLE_USER_PREFIX_CACHE=1"
  1056. " to force prefix caching.")
  1057. cache_config.enable_prefix_caching = False
  1058. else:
  1059. logger.warning(
  1060. "Prefix caching is enabled in single user mode, "
  1061. "this is not recommended and may lead to memory "
  1062. "issues.")
  1063. self.max_num_seqs = max_num_seqs
  1064. self.max_model_len = max_model_len
  1065. self.cache_config = cache_config
  1066. self.is_attention_free = is_attention_free
  1067. self.use_v2_block_manager = use_v2_block_manager
  1068. self.num_lookahead_slots = num_lookahead_slots
  1069. self.delay_factor = delay_factor
  1070. self.chunked_prefill_enabled = enable_chunked_prefill
  1071. self.embedding_mode = embedding_mode
  1072. self.preemption_mode = preemption_mode
  1073. self.num_scheduler_steps = num_scheduler_steps
  1074. self.multi_step_stream_outputs = multi_step_stream_outputs
  1075. self.send_delta_data = send_delta_data
  1076. self.single_user_mode = single_user_mode
  1077. self._verify_args()
  1078. def _verify_args(self) -> None:
  1079. if (self.max_num_batched_tokens < self.max_model_len
  1080. and not self.chunked_prefill_enabled):
  1081. raise ValueError(
  1082. f"max_num_batched_tokens ({self.max_num_batched_tokens}) is "
  1083. f"smaller than max_model_len ({self.max_model_len}). "
  1084. "This effectively limits the maximum sequence length to "
  1085. "max_num_batched_tokens and makes Aphrodite reject longer "
  1086. "sequences. Please increase max_num_batched_tokens or "
  1087. "decrease max_model_len.")
  1088. if self.max_num_batched_tokens < self.max_num_seqs:
  1089. raise ValueError(
  1090. f"max_num_batched_tokens ({self.max_num_batched_tokens}) must "
  1091. "be greater than or equal to max_num_seqs "
  1092. f"({self.max_num_seqs}).")
  1093. if self.num_lookahead_slots < 0:
  1094. raise ValueError(
  1095. "num_lookahead_slots "
  1096. f"({self.num_lookahead_slots}) must be greater than or "
  1097. "equal to 0.")
  1098. if self.num_scheduler_steps < 1:
  1099. raise ValueError(
  1100. "num_scheduler_steps "
  1101. f"({self.num_scheduler_steps}) must be greater than or "
  1102. "equal to 1.")
  1103. @property
  1104. def is_multi_step(self) -> bool:
  1105. return self.num_scheduler_steps > 1
  1106. class DeviceConfig:
  1107. def __init__(self, device: str = "auto") -> None:
  1108. if device == "auto":
  1109. # Automated device type detection
  1110. if is_neuron():
  1111. self.device_type = "neuron"
  1112. elif is_openvino():
  1113. self.device_type = "openvino"
  1114. elif current_platform.is_tpu():
  1115. self.device_type = "tpu"
  1116. elif is_cpu():
  1117. self.device_type = "cpu"
  1118. elif is_xpu():
  1119. self.device_type = "xpu"
  1120. else:
  1121. # We don't call torch.cuda.is_available() here to
  1122. # avoid initializing CUDA before workers are forked
  1123. self.device_type = "cuda"
  1124. else:
  1125. # Device type is assigned explicitly
  1126. self.device_type = device
  1127. # Some device types require processing inputs on CPU
  1128. if self.device_type in ["neuron", "openvino"]:
  1129. self.device = torch.device("cpu")
  1130. elif self.device_type in ["tpu"]:
  1131. self.device = None
  1132. else:
  1133. # Set device with device type
  1134. self.device = torch.device(self.device_type)
  1135. class SpeculativeConfig:
  1136. """Configuration for speculative decoding.
  1137. The configuration is currently specialized to draft-model speculative
  1138. decoding with top-1 proposals.
  1139. """
  1140. @staticmethod
  1141. def maybe_create_spec_config(
  1142. target_model_config: ModelConfig,
  1143. target_parallel_config: ParallelConfig,
  1144. target_dtype: str,
  1145. speculative_model: Optional[str],
  1146. speculative_model_quantization: Optional[str],
  1147. speculative_draft_tensor_parallel_size: Optional[int],
  1148. num_speculative_tokens: Optional[int],
  1149. speculative_max_model_len: Optional[int],
  1150. enable_chunked_prefill: bool,
  1151. use_v2_block_manager: bool,
  1152. disable_log_stats: bool,
  1153. speculative_disable_by_batch_size: Optional[int],
  1154. ngram_prompt_lookup_max: Optional[int],
  1155. ngram_prompt_lookup_min: Optional[int],
  1156. draft_token_acceptance_method: str,
  1157. typical_acceptance_sampler_posterior_threshold: Optional[float],
  1158. typical_acceptance_sampler_posterior_alpha: Optional[float],
  1159. disable_logprobs: Optional[bool],
  1160. ) -> Optional["SpeculativeConfig"]:
  1161. """Create a SpeculativeConfig if possible, else return None.
  1162. This function attempts to create a SpeculativeConfig object based on the
  1163. provided parameters. If the necessary conditions are met, it returns an
  1164. instance of SpeculativeConfig. Otherwise, it returns None.
  1165. Args:
  1166. target_model_config (ModelConfig): The configuration of the target
  1167. model.
  1168. target_parallel_config (ParallelConfig): The parallel configuration
  1169. for the target model.
  1170. target_dtype (str): The data type used for the target model.
  1171. speculative_model (Optional[str]): The name of the speculative
  1172. model, if provided.
  1173. num_speculative_tokens (Optional[int]): The number of speculative
  1174. tokens, if provided. Will default to the number in the draft
  1175. model config if present, otherwise is required.
  1176. speculative_model_quantization (Optional[str]): Quantization method
  1177. that was used to quantize the speculative model weights. If
  1178. None, we assume the model weights are not quantized.
  1179. speculative_draft_tensor_parallel_size (Optional[int]): The degree
  1180. of the tensor parallelism for the draft model.
  1181. speculative_max_model_len (Optional[int]): The maximum model len of
  1182. the speculative model. Used when testing the ability to skip
  1183. speculation for some sequences.
  1184. enable_chunked_prefill (bool): Whether Aphrodite is configured to
  1185. use chunked prefill or not. Used for raising an error since its
  1186. not yet compatible with spec decode.
  1187. use_v2_block_manager (bool): Whether Aphrodite is configured to
  1188. use the v2 block manager or not. Used for raising an error
  1189. since the v2 block manager is required with spec decode.
  1190. speculative_disable_by_batch_size (Optional[int]): Disable
  1191. speculative decoding for new incoming requests when the number
  1192. of enqueue requests is larger than this value, if provided.
  1193. ngram_prompt_lookup_max (Optional[int]): Max size of ngram token
  1194. window, if provided.
  1195. ngram_prompt_lookup_min (Optional[int]): Min size of ngram token
  1196. window, if provided.
  1197. draft_token_acceptance_method (str): The method to use for
  1198. accepting draft tokens. This can take two possible
  1199. values 'rejection_sampler' and 'typical_acceptance_sampler'
  1200. for RejectionSampler and TypicalAcceptanceSampler
  1201. respectively.
  1202. typical_acceptance_sampler_posterior_threshold (Optional[float]):
  1203. A threshold value that sets a lower bound on the posterior
  1204. probability of a token in the target model for it to be
  1205. accepted. This threshold is used only when we use the
  1206. TypicalAcceptanceSampler for token acceptance.
  1207. typical_acceptance_sampler_posterior_alpha (Optional[float]):
  1208. A scaling factor for the entropy-based threshold in the
  1209. TypicalAcceptanceSampler.
  1210. disable_logprobs (Optional[bool]): If set to True, token log
  1211. probabilities are not returned during speculative decoding.
  1212. If set to False, token log probabilities are returned
  1213. according to the log probability settings in SamplingParams.
  1214. If not specified, it defaults to True.
  1215. Returns:
  1216. Optional["SpeculativeConfig"]: An instance of SpeculativeConfig if
  1217. the necessary conditions are met, else None.
  1218. """
  1219. if speculative_model is None:
  1220. if num_speculative_tokens is not None:
  1221. raise ValueError("num_speculative_tokens was provided without "
  1222. "speculative_model.")
  1223. return None
  1224. if (speculative_disable_by_batch_size is not None
  1225. and speculative_disable_by_batch_size < 2):
  1226. raise ValueError("Expected the batch size threshold of disabling "
  1227. "speculative decoding is > 1, but got "
  1228. f"{speculative_disable_by_batch_size=}")
  1229. if enable_chunked_prefill:
  1230. raise ValueError(
  1231. "Speculative decoding and chunked prefill are "
  1232. f"currently mutually exclusive ({enable_chunked_prefill=}).")
  1233. if not use_v2_block_manager:
  1234. raise ValueError(
  1235. "Speculative decoding requires usage of the V2 "
  1236. "block manager. Enable it with --use-v2-block-manager.")
  1237. # TODO: The user should be able to specify revision/max model len
  1238. # for the draft model. It is not currently supported.
  1239. draft_revision = None
  1240. draft_code_revision = None
  1241. draft_quantization = speculative_model_quantization
  1242. if speculative_model == "[ngram]":
  1243. if ngram_prompt_lookup_min is None:
  1244. ngram_prompt_lookup_min = 1
  1245. if ngram_prompt_lookup_max is None or ngram_prompt_lookup_max < 1:
  1246. raise ValueError(f"{ngram_prompt_lookup_max=} must be > 0")
  1247. if ngram_prompt_lookup_min < 1:
  1248. raise ValueError(f"{ngram_prompt_lookup_min=} must be > 0")
  1249. if ngram_prompt_lookup_min > ngram_prompt_lookup_max:
  1250. raise ValueError(f"{ngram_prompt_lookup_min=} cannot be "
  1251. f"larger than {ngram_prompt_lookup_max=}")
  1252. # TODO: current we still need extract vocab_size from target model
  1253. # config, in future, we may try refactoring it out, and set
  1254. # draft related config as None here.
  1255. draft_model_config = target_model_config
  1256. draft_parallel_config = target_parallel_config
  1257. else:
  1258. ngram_prompt_lookup_max = 0
  1259. ngram_prompt_lookup_min = 0
  1260. draft_model_config = ModelConfig(
  1261. model=speculative_model,
  1262. tokenizer=target_model_config.tokenizer,
  1263. tokenizer_mode=target_model_config.tokenizer_mode,
  1264. trust_remote_code=target_model_config.trust_remote_code,
  1265. dtype=target_model_config.dtype,
  1266. seed=target_model_config.seed,
  1267. revision=draft_revision,
  1268. code_revision=draft_code_revision,
  1269. tokenizer_revision=target_model_config.tokenizer_revision,
  1270. max_model_len=None,
  1271. spec_target_max_model_len=target_model_config.max_model_len,
  1272. quantization=draft_quantization,
  1273. enforce_eager=target_model_config.enforce_eager,
  1274. max_seq_len_to_capture=target_model_config.
  1275. max_seq_len_to_capture,
  1276. max_logprobs=target_model_config.max_logprobs,
  1277. )
  1278. draft_hf_config = draft_model_config.hf_config
  1279. if (num_speculative_tokens is not None
  1280. and hasattr(draft_hf_config, "num_lookahead_tokens")):
  1281. draft_hf_config.num_lookahead_tokens = num_speculative_tokens
  1282. n_predict = getattr(draft_hf_config, "n_predict", None)
  1283. if n_predict is not None:
  1284. if num_speculative_tokens is None:
  1285. # Default to max value defined in draft model config.
  1286. num_speculative_tokens = n_predict
  1287. elif num_speculative_tokens > n_predict:
  1288. # Verify provided value doesn't exceed the maximum
  1289. # supported by the draft model.
  1290. raise ValueError(
  1291. "This speculative model supports a maximum of "
  1292. f"num_speculative_tokens={n_predict}, but "
  1293. f"{num_speculative_tokens=} was provided.")
  1294. draft_model_config.max_model_len = (
  1295. SpeculativeConfig._maybe_override_draft_max_model_len(
  1296. speculative_max_model_len,
  1297. draft_model_config.max_model_len,
  1298. target_model_config.max_model_len,
  1299. ))
  1300. draft_parallel_config = (
  1301. SpeculativeConfig.create_draft_parallel_config(
  1302. target_parallel_config,
  1303. speculative_draft_tensor_parallel_size))
  1304. if num_speculative_tokens is None:
  1305. raise ValueError(
  1306. "num_speculative_tokens must be provided with "
  1307. "speculative_model unless the draft model config contains an "
  1308. "n_predict parameter.")
  1309. if typical_acceptance_sampler_posterior_threshold is None:
  1310. typical_acceptance_sampler_posterior_threshold = 0.09
  1311. if typical_acceptance_sampler_posterior_alpha is None:
  1312. typical_acceptance_sampler_posterior_alpha = 0.3
  1313. if disable_logprobs is None:
  1314. disable_logprobs = True
  1315. return SpeculativeConfig(
  1316. draft_model_config,
  1317. draft_parallel_config,
  1318. num_speculative_tokens,
  1319. speculative_disable_by_batch_size,
  1320. ngram_prompt_lookup_max,
  1321. ngram_prompt_lookup_min,
  1322. draft_token_acceptance_method=draft_token_acceptance_method,
  1323. typical_acceptance_sampler_posterior_threshold=\
  1324. typical_acceptance_sampler_posterior_threshold,
  1325. typical_acceptance_sampler_posterior_alpha=\
  1326. typical_acceptance_sampler_posterior_alpha,
  1327. disable_logprobs=disable_logprobs,
  1328. disable_log_stats=disable_log_stats,
  1329. )
  1330. @staticmethod
  1331. def _maybe_override_draft_max_model_len(
  1332. speculative_max_model_len: Optional[int],
  1333. draft_max_model_len: int,
  1334. target_max_model_len: int,
  1335. ) -> int:
  1336. """Determine the max sequence len for the draft model. This is usually
  1337. the draft_max_model_len, but may be the target_max_model_len if it is
  1338. less than the draft_max_model_len, or may be speculative_max_model_len
  1339. if it is specified.
  1340. This is necessary so that sequences do not exceed the capacity of the
  1341. draft model or the target model.
  1342. speculative_max_model_len is mainly used for testing that sequences can
  1343. skip speculation.
  1344. """
  1345. if speculative_max_model_len is not None:
  1346. if speculative_max_model_len > draft_max_model_len:
  1347. raise ValueError(f"{speculative_max_model_len=} cannot be "
  1348. f"larger than {draft_max_model_len=}")
  1349. if speculative_max_model_len > target_max_model_len:
  1350. raise ValueError(f"{speculative_max_model_len=} cannot be "
  1351. f"larger than {target_max_model_len=}")
  1352. return speculative_max_model_len
  1353. return min(
  1354. draft_max_model_len,
  1355. target_max_model_len,
  1356. )
  1357. @staticmethod
  1358. def create_draft_parallel_config(
  1359. target_parallel_config: ParallelConfig,
  1360. speculative_draft_tensor_parallel_size: Optional[int]
  1361. ) -> ParallelConfig:
  1362. """Create a parallel config for use by the draft worker.
  1363. This is mostly a copy of the target parallel config, except the tp_size.
  1364. """
  1365. if speculative_draft_tensor_parallel_size is None:
  1366. speculative_draft_tensor_parallel_size = \
  1367. target_parallel_config.tensor_parallel_size
  1368. elif speculative_draft_tensor_parallel_size != 1:
  1369. # TODO: allow tp values larger than 1
  1370. raise ValueError(
  1371. f"{speculative_draft_tensor_parallel_size=} cannot be "
  1372. f"other value than 1")
  1373. draft_parallel_config = ParallelConfig(
  1374. pipeline_parallel_size=target_parallel_config.
  1375. pipeline_parallel_size,
  1376. tensor_parallel_size=speculative_draft_tensor_parallel_size,
  1377. distributed_executor_backend=target_parallel_config.
  1378. distributed_executor_backend,
  1379. max_parallel_loading_workers=target_parallel_config.
  1380. max_parallel_loading_workers,
  1381. disable_custom_all_reduce=target_parallel_config.
  1382. disable_custom_all_reduce,
  1383. tokenizer_pool_config=target_parallel_config.tokenizer_pool_config,
  1384. ray_workers_use_nsight=target_parallel_config.
  1385. ray_workers_use_nsight,
  1386. placement_group=target_parallel_config.placement_group,
  1387. )
  1388. return draft_parallel_config
  1389. def __init__(
  1390. self,
  1391. draft_model_config: ModelConfig,
  1392. draft_parallel_config: ParallelConfig,
  1393. num_speculative_tokens: int,
  1394. speculative_disable_by_batch_size: Optional[int],
  1395. ngram_prompt_lookup_max: Optional[int],
  1396. ngram_prompt_lookup_min: Optional[int],
  1397. draft_token_acceptance_method: str,
  1398. typical_acceptance_sampler_posterior_threshold: float,
  1399. typical_acceptance_sampler_posterior_alpha: float,
  1400. disable_logprobs: bool,
  1401. disable_log_stats: bool,
  1402. ):
  1403. """Create a SpeculativeConfig object.
  1404. Args:
  1405. draft_model_config: ModelConfig for the draft model.
  1406. draft_parallel_config: ParallelConfig for the draft model.
  1407. num_speculative_tokens: The number of tokens to sample from the
  1408. draft model before scoring with the target model.
  1409. speculative_disable_by_batch_size: Disable speculative
  1410. decoding for new incoming requests when the number of
  1411. enqueue requests is larger than this value.
  1412. ngram_prompt_lookup_max: Max size of ngram token window.
  1413. ngram_prompt_lookup_min: Min size of ngram token window.
  1414. draft_token_acceptance_method (str): The method to use for
  1415. accepting draft tokens. This can take two possible
  1416. values 'rejection_sampler' and 'typical_acceptance_sampler'
  1417. for RejectionSampler and TypicalAcceptanceSampler
  1418. respectively.
  1419. typical_acceptance_sampler_posterior_threshold (Optional[float]):
  1420. A threshold value that sets a lower bound on the posterior
  1421. probability of a token in the target model for it to be
  1422. accepted. This threshold is used only when we use the
  1423. TypicalAcceptanceSampler for token acceptance.
  1424. typical_acceptance_sampler_posterior_alpha (Optional[float]):
  1425. A scaling factor for the entropy-based threshold in the
  1426. TypicalAcceptanceSampler.
  1427. disable_logprobs: If set to True, token log probabilities will not
  1428. be returned even if requested by sampling parameters. This
  1429. reduces latency by skipping logprob calculation in proposal
  1430. sampling, target sampling, and after accepted tokens are
  1431. determined. If set to False, log probabilities will be
  1432. returned.
  1433. disable_log_stats: Whether to disable periodic printing of stage
  1434. times in speculative decoding.
  1435. """
  1436. self.draft_model_config = draft_model_config
  1437. self.draft_parallel_config = draft_parallel_config
  1438. self.num_speculative_tokens = num_speculative_tokens
  1439. self.speculative_disable_by_batch_size = \
  1440. speculative_disable_by_batch_size
  1441. self.ngram_prompt_lookup_max = ngram_prompt_lookup_max or 0
  1442. self.ngram_prompt_lookup_min = ngram_prompt_lookup_min or 0
  1443. self.draft_token_acceptance_method = draft_token_acceptance_method
  1444. self.typical_acceptance_sampler_posterior_threshold = \
  1445. typical_acceptance_sampler_posterior_threshold
  1446. self.typical_acceptance_sampler_posterior_alpha = \
  1447. typical_acceptance_sampler_posterior_alpha
  1448. self.disable_logprobs = disable_logprobs
  1449. self.disable_log_stats = disable_log_stats
  1450. self._verify_args()
  1451. def _verify_args(self) -> None:
  1452. if self.num_speculative_tokens <= 0:
  1453. raise ValueError("Expected num_speculative_tokens to be greater "
  1454. f"than zero ({self.num_speculative_tokens}).")
  1455. if self.draft_model_config:
  1456. self.draft_model_config.verify_with_parallel_config(
  1457. self.draft_parallel_config)
  1458. # Validate and set draft token acceptance related settings.
  1459. if (self.draft_token_acceptance_method is None):
  1460. raise ValueError("draft_token_acceptance_method is not set. "
  1461. "Expected values are rejection_sampler or "
  1462. "typical_acceptance_sampler.")
  1463. if (self.draft_token_acceptance_method != 'rejection_sampler'
  1464. and self.draft_token_acceptance_method !=
  1465. 'typical_acceptance_sampler'):
  1466. raise ValueError(
  1467. "Expected draft_token_acceptance_method to be either "
  1468. "rejection_sampler or typical_acceptance_sampler. Instead it "
  1469. f"is {self.draft_token_acceptance_method}")
  1470. if (self.typical_acceptance_sampler_posterior_threshold < 0
  1471. or self.typical_acceptance_sampler_posterior_alpha < 0):
  1472. raise ValueError(
  1473. "Expected typical_acceptance_sampler_posterior_threshold "
  1474. "and typical_acceptance_sampler_posterior_alpha to be > 0. "
  1475. "Instead found "
  1476. f"typical_acceptance_sampler_posterior_threshold = "
  1477. f"{self.typical_acceptance_sampler_posterior_threshold} and "
  1478. f"typical_acceptance_sampler_posterior_alpha = "
  1479. f"{self.typical_acceptance_sampler_posterior_alpha}")
  1480. @property
  1481. def num_lookahead_slots(self) -> int:
  1482. """The number of additional slots the scheduler should allocate per
  1483. step, in addition to the slots allocated for each known token.
  1484. This is equal to the number of speculative tokens, as each speculative
  1485. token must be scored.
  1486. """
  1487. return self.num_speculative_tokens
  1488. def __repr__(self) -> str:
  1489. if self.ngram_prompt_lookup_max > 0:
  1490. draft_model = "[ngram]"
  1491. else:
  1492. draft_model = self.draft_model_config.model
  1493. num_spec_tokens = self.num_speculative_tokens
  1494. return f"SpeculativeConfig({draft_model=}, {num_spec_tokens=})"
  1495. @dataclass
  1496. class LoRAConfig:
  1497. max_lora_rank: int
  1498. max_loras: int
  1499. fully_sharded_loras: bool = False
  1500. max_cpu_loras: Optional[int] = None
  1501. lora_dtype: Optional[torch.dtype] = None
  1502. lora_extra_vocab_size: int = 256
  1503. # This is a constant.
  1504. lora_vocab_padding_size: ClassVar[int] = 256
  1505. long_lora_scaling_factors: Optional[Tuple[float]] = None
  1506. def __post_init__(self):
  1507. # Setting the maximum rank to 256 should be able to satisfy the vast
  1508. # majority of applications.
  1509. possible_max_ranks = (8, 16, 32, 64, 128, 256)
  1510. possible_lora_extra_vocab_size = (0, 256, 512)
  1511. if self.max_lora_rank not in possible_max_ranks:
  1512. raise ValueError(
  1513. f"max_lora_rank ({self.max_lora_rank}) must be one of "
  1514. f"{possible_max_ranks}.")
  1515. if self.lora_extra_vocab_size not in possible_lora_extra_vocab_size:
  1516. raise ValueError(
  1517. f"lora_extra_vocab_size ({self.lora_extra_vocab_size}) "
  1518. f"must be one of {possible_lora_extra_vocab_size}.")
  1519. if self.max_loras < 1:
  1520. raise ValueError(f"max_loras ({self.max_loras}) must be >= 1.")
  1521. if self.max_cpu_loras is None:
  1522. self.max_cpu_loras = self.max_loras
  1523. elif self.max_cpu_loras < self.max_loras:
  1524. raise ValueError(
  1525. f"max_cpu_loras ({self.max_cpu_loras}) must be >= "
  1526. f"max_loras ({self.max_loras})")
  1527. def verify_with_model_config(self, model_config: ModelConfig):
  1528. if self.lora_dtype in (None, "auto"):
  1529. self.lora_dtype = model_config.dtype
  1530. elif isinstance(self.lora_dtype, str):
  1531. self.lora_dtype = getattr(torch, self.lora_dtype)
  1532. if model_config.quantization and model_config.quantization not in [
  1533. "awq", "gptq"
  1534. ]:
  1535. # TODO support all other quants
  1536. logger.warning(f"{model_config.quantization} quantization is not "
  1537. "tested with LoRA yet.")
  1538. def verify_with_scheduler_config(self, scheduler_config: SchedulerConfig):
  1539. if scheduler_config.chunked_prefill_enabled:
  1540. logger.warning(
  1541. "Chunked Prefill with LoRA is not rigorously tested.")
  1542. def verify_with_parallel_config(self, parallel_config: ParallelConfig):
  1543. if self.lora_vocab_padding_size % parallel_config.world_size != 0:
  1544. raise ValueError("LoRA vocab padding size must be divisible "
  1545. "by world size.")
  1546. @dataclass
  1547. class PromptAdapterConfig:
  1548. max_prompt_adapters: int
  1549. max_prompt_adapter_token: int
  1550. max_cpu_prompt_adapters: Optional[int] = None
  1551. prompt_adapter_dtype: Optional[torch.dtype] = None
  1552. def __post_init__(self):
  1553. if self.max_prompt_adapters < 1:
  1554. raise ValueError(f"max_prompt_adapters "
  1555. f"({self.max_prompt_adapters}) must be >= 1.")
  1556. if self.max_prompt_adapter_token == 0:
  1557. raise ValueError("max_prompt_adapter_token must be set.")
  1558. if self.max_cpu_prompt_adapters is None:
  1559. self.max_cpu_prompt_adapters = self.max_prompt_adapters
  1560. def verify_with_model_config(self, model_config: ModelConfig):
  1561. if self.prompt_adapter_dtype in (None, "auto"):
  1562. self.prompt_adapter_dtype = model_config.dtype
  1563. elif isinstance(self.prompt_adapter_dtype, str):
  1564. self.prompt_adapter_dtype = getattr(torch,
  1565. self.prompt_adapter_dtype)
  1566. @dataclass
  1567. class MultiModalConfig:
  1568. """Controls the behavior of multimodal models."""
  1569. limit_per_prompt: Mapping[str, int] = field(default_factory=dict)
  1570. """
  1571. The maximum number of multi-modal input instances allowed per prompt
  1572. for each :class:`~aphrodite.multimodal.MultiModalPlugin`.
  1573. """
  1574. # TODO: Add configs to init vision tower or not.
  1575. _STR_DTYPE_TO_TORCH_DTYPE = {
  1576. "half": torch.float16,
  1577. "float16": torch.float16,
  1578. "float": torch.float32,
  1579. "float32": torch.float32,
  1580. "bfloat16": torch.bfloat16,
  1581. }
  1582. _ROCM_NOT_SUPPORTED_DTYPE = ["float", "float32"]
  1583. def _get_and_verify_dtype(
  1584. config: PretrainedConfig,
  1585. dtype: Union[str, torch.dtype],
  1586. ) -> torch.dtype:
  1587. # NOTE: getattr(config, "torch_dtype", torch.float32) is not correct
  1588. # because config.torch_dtype can be None.
  1589. config_dtype = getattr(config, "torch_dtype", None)
  1590. if config_dtype is None:
  1591. config_dtype = torch.float32
  1592. if isinstance(dtype, str):
  1593. dtype = dtype.lower()
  1594. if dtype == "auto":
  1595. if config_dtype == torch.float32:
  1596. if config.model_type == "gemma2":
  1597. logger.info(
  1598. "For Gemma 2, we downcast float32 to bfloat16 instead "
  1599. "of float16 by default. Please specify `dtype` if you "
  1600. "want to use float16.")
  1601. torch_dtype = torch.bfloat16
  1602. else:
  1603. # Following the common practice, we use float16 for float32
  1604. # models.
  1605. torch_dtype = torch.float16
  1606. else:
  1607. torch_dtype = config_dtype
  1608. else:
  1609. if dtype not in _STR_DTYPE_TO_TORCH_DTYPE:
  1610. raise ValueError(f"Unknown dtype: {dtype}")
  1611. torch_dtype = _STR_DTYPE_TO_TORCH_DTYPE[dtype]
  1612. elif isinstance(dtype, torch.dtype):
  1613. torch_dtype = dtype
  1614. else:
  1615. raise ValueError(f"Unknown dtype: {dtype}")
  1616. if is_hip() and torch_dtype == torch.float32:
  1617. rocm_supported_dtypes = [
  1618. k for k, v in _STR_DTYPE_TO_TORCH_DTYPE.items()
  1619. if (k not in _ROCM_NOT_SUPPORTED_DTYPE)
  1620. ]
  1621. raise ValueError(f"dtype '{dtype}' is not supported in ROCm. "
  1622. f"Supported dtypes are {rocm_supported_dtypes}")
  1623. # Verify the dtype.
  1624. if torch_dtype != config_dtype:
  1625. if torch_dtype == torch.float32:
  1626. # Upcasting to float32 is allowed.
  1627. pass
  1628. elif config_dtype == torch.float32:
  1629. # Downcasting from float32 to float16 or bfloat16 is allowed.
  1630. pass
  1631. else:
  1632. # Casting between float16 and bfloat16 is allowed with a warning.
  1633. logger.warning(f"Casting {config_dtype} to {torch_dtype}.")
  1634. return torch_dtype
  1635. def _get_and_verify_max_len(
  1636. hf_config: PretrainedConfig,
  1637. max_model_len: Optional[int],
  1638. disable_sliding_window: bool,
  1639. sliding_window_len: Optional[Union[int, List[Optional[int]]]],
  1640. rope_scaling_arg: Optional[Dict[str, Any]],
  1641. spec_target_max_model_len: Optional[int] = None,
  1642. ) -> int:
  1643. """Get and verify the model's maximum length."""
  1644. derived_max_model_len = float("inf")
  1645. possible_keys = [
  1646. # Cohere: needs to prioritize this over "max_position_embeddings"
  1647. "model_max_length",
  1648. # OPT
  1649. "max_position_embeddings",
  1650. # GPT-2
  1651. "n_positions",
  1652. # MPT
  1653. "max_seq_len",
  1654. # ChatGLM2
  1655. "seq_length",
  1656. # Command-R
  1657. "model_max_length",
  1658. # Others
  1659. "max_sequence_length",
  1660. "max_seq_length",
  1661. "seq_len",
  1662. ]
  1663. # Choose the smallest "max_length" from the possible keys.
  1664. max_len_key = None
  1665. for key in possible_keys:
  1666. max_len = getattr(hf_config, key, None)
  1667. if max_len is not None:
  1668. max_len_key = key if max_len < derived_max_model_len \
  1669. else max_len_key
  1670. derived_max_model_len = min(derived_max_model_len, max_len)
  1671. # If sliding window is manually disabled, max_length should be less
  1672. # than the sliding window length in the model config.
  1673. if disable_sliding_window and sliding_window_len is not None:
  1674. sliding_window_len_min = get_min_sliding_window(sliding_window_len)
  1675. max_len_key = "sliding_window" \
  1676. if sliding_window_len_min < derived_max_model_len else max_len_key
  1677. derived_max_model_len = min(derived_max_model_len,
  1678. sliding_window_len_min)
  1679. # If none of the keys were found in the config, use a default and
  1680. # log a warning.
  1681. if derived_max_model_len == float("inf"):
  1682. if max_model_len is not None:
  1683. # If max_model_len is specified, we use it.
  1684. return max_model_len
  1685. if spec_target_max_model_len is not None:
  1686. # If this is a speculative draft model, we use the max model len
  1687. # from the target model.
  1688. return spec_target_max_model_len
  1689. default_max_len = 2048
  1690. logger.warning(
  1691. "The model's config.json does not contain any of the following "
  1692. "keys to determine the original maximum length of the model: "
  1693. f"{possible_keys}. Assuming the model's maximum length is "
  1694. f"{default_max_len}.")
  1695. derived_max_model_len = default_max_len
  1696. rope_scaling = getattr(hf_config, "rope_scaling", None)
  1697. if rope_scaling is not None:
  1698. rope_type = rope_scaling.get("type", rope_scaling.get("rope_type"))
  1699. if rope_type not in {"su", "longrope", "llama3"}:
  1700. if disable_sliding_window:
  1701. # TODO: Find a model that supports rope_scaling
  1702. # with sliding window to see if this case should be allowed.
  1703. raise NotImplementedError(
  1704. "Disabling sliding window is not supported for models "
  1705. "with rope_scaling. Please raise an issue so we can "
  1706. "investigate.")
  1707. if rope_type == "mrope":
  1708. scaling_factor = 1
  1709. else:
  1710. assert "factor" in rope_scaling
  1711. scaling_factor = rope_scaling["factor"]
  1712. if rope_type == "yarn":
  1713. derived_max_model_len = rope_scaling[
  1714. "original_max_position_embeddings"]
  1715. derived_max_model_len *= scaling_factor
  1716. # If the user specified a max length, make sure it is smaller than the
  1717. # derived length from the HF model config.
  1718. if max_model_len is None:
  1719. max_model_len = int(derived_max_model_len)
  1720. elif max_model_len > derived_max_model_len:
  1721. # Some models might have a separate key for specifying model_max_length
  1722. # that will be bigger than derived_max_model_len. We compare user input
  1723. # with model_max_length and allow this override when it's smaller.
  1724. model_max_length = getattr(hf_config, "model_max_length", None)
  1725. if envs.APHRODITE_DYNAMIC_ROPE_SCALING:
  1726. scaling_factor = max_model_len / derived_max_model_len
  1727. hf_config.rope_scaling = {"factor": scaling_factor,
  1728. "type": "dynamic"}
  1729. logger.info(
  1730. "Using dynamic RoPE scaling to extend the model's max context "
  1731. f"length from {derived_max_model_len} to {max_model_len}.")
  1732. derived_max_model_len = max_model_len
  1733. elif model_max_length is not None and max_model_len <= model_max_length:
  1734. if disable_sliding_window:
  1735. # TODO: Find a model that has model_max_length
  1736. # with sliding window to see if this case should be allowed.
  1737. raise NotImplementedError(
  1738. "Disabling sliding window is not supported for models "
  1739. "model_max_length in the config. Please raise an issue "
  1740. "so we can investigate.")
  1741. else:
  1742. raise ValueError(
  1743. f"User-specified max_model_len ({max_model_len}) is greater "
  1744. f"than the derived max_model_len ({max_len_key}="
  1745. f"{derived_max_model_len} or model_max_length="
  1746. f"{model_max_length} in model's config.json). To allow "
  1747. "greater lengths, please set the env var "
  1748. "APHRODITE_DYNAMIC_ROPE_SCALING=1")
  1749. return int(max_model_len)
  1750. def get_min_sliding_window(
  1751. sliding_window: Union[int, List[Optional[int]]]) -> int:
  1752. if isinstance(sliding_window, list):
  1753. return min(s for s in sliding_window if s is not None)
  1754. return sliding_window
  1755. def get_served_model_name(model: str,
  1756. served_model_name: Optional[Union[str, List[str]]]):
  1757. """
  1758. If the input is a non-empty list, the first model_name in
  1759. `served_model_name` is taken.
  1760. If the input is a non-empty string, it is used directly.
  1761. For cases where the input is either an empty string or an
  1762. empty list, the fallback is to use `self.model`.
  1763. """
  1764. if not served_model_name:
  1765. return model
  1766. if isinstance(served_model_name, list):
  1767. return served_model_name[0]
  1768. return served_model_name
  1769. @dataclass
  1770. class DecodingConfig:
  1771. """Dataclass which contains the decoding strategy of the engine"""
  1772. # Which guided decoding algo to use. 'outlines' / 'lm-format-enforcer'
  1773. guided_decoding_backend: str = 'lm-format-enforcer'
  1774. def __post_init__(self):
  1775. valid_guided_backends = ['outlines', 'lm-format-enforcer']
  1776. backend = self.guided_decoding_backend
  1777. if backend not in valid_guided_backends:
  1778. raise ValueError(f"Invalid guided_decoding_backend '{backend},"
  1779. f"must be one of {valid_guided_backends}")
  1780. @dataclass(frozen=True)
  1781. class EngineConfig:
  1782. """Dataclass which contains all engine-related configuration. This
  1783. simplifies passing around the distinct configurations in the codebase.
  1784. """
  1785. model_config: ModelConfig
  1786. cache_config: CacheConfig
  1787. parallel_config: ParallelConfig
  1788. scheduler_config: SchedulerConfig
  1789. device_config: DeviceConfig
  1790. load_config: LoadConfig
  1791. lora_config: Optional[LoRAConfig]
  1792. speculative_config: Optional[SpeculativeConfig]
  1793. decoding_config: Optional[DecodingConfig]
  1794. prompt_adapter_config: Optional[PromptAdapterConfig]
  1795. def __post_init__(self):
  1796. """Verify configs are valid & consistent with each other.
  1797. """
  1798. self.model_config.verify_async_output_proc(self.parallel_config,
  1799. self.speculative_config,
  1800. self.device_config)
  1801. self.model_config.verify_with_parallel_config(self.parallel_config)
  1802. self.cache_config.verify_with_parallel_config(self.parallel_config)
  1803. if self.lora_config:
  1804. self.lora_config.verify_with_model_config(self.model_config)
  1805. self.lora_config.verify_with_scheduler_config(
  1806. self.scheduler_config)
  1807. self.lora_config.verify_with_parallel_config(self.parallel_config)
  1808. if self.prompt_adapter_config:
  1809. self.prompt_adapter_config.verify_with_model_config(
  1810. self.model_config)
  1811. def to_dict(self):
  1812. """Return the configs as a dictionary, for use in **kwargs.
  1813. """
  1814. return dict(
  1815. (field.name, getattr(self, field.name)) for field in fields(self))