args_tools.py 53 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186
  1. import argparse
  2. import dataclasses
  3. import json
  4. from dataclasses import dataclass
  5. from typing import (TYPE_CHECKING, Any, Dict, List, Mapping, Optional, Tuple,
  6. Type, Union)
  7. from loguru import logger
  8. import aphrodite.common.envs as envs
  9. from aphrodite.common.config import (CacheConfig, ConfigFormat, DecodingConfig,
  10. DeviceConfig, EngineConfig, LoadConfig,
  11. LoadFormat, LoRAConfig, ModelConfig,
  12. ParallelConfig, PromptAdapterConfig,
  13. SchedulerConfig, SpeculativeConfig,
  14. TokenizerPoolConfig)
  15. from aphrodite.common.utils import FlexibleArgumentParser, is_cpu
  16. from aphrodite.executor.executor_base import ExecutorBase
  17. from aphrodite.quantization import QUANTIZATION_METHODS
  18. from aphrodite.transformers_utils.utils import check_gguf_file
  19. from aphrodite.triton_utils import HAS_TRITON
  20. if TYPE_CHECKING:
  21. from aphrodite.transformers_utils.tokenizer_group import BaseTokenizerGroup
  22. APHRODITE_USE_RAY_SPMD_WORKER = envs.APHRODITE_USE_RAY_SPMD_WORKER
  23. DEVICE_OPTIONS = [
  24. "auto",
  25. "cuda",
  26. "neuron",
  27. "cpu",
  28. "openvino",
  29. "tpu",
  30. "xpu",
  31. ]
  32. def nullable_kvs(val: str) -> Optional[Mapping[str, int]]:
  33. if len(val) == 0:
  34. return None
  35. out_dict: Dict[str, int] = {}
  36. for item in val.split(","):
  37. try:
  38. key, value = item.split("=")
  39. except TypeError as exc:
  40. msg = "Each item should be in the form KEY=VALUE"
  41. raise ValueError(msg) from exc
  42. try:
  43. out_dict[key] = int(value)
  44. except ValueError as exc:
  45. msg = f"Failed to parse value of item {key}={value}"
  46. raise ValueError(msg) from exc
  47. return out_dict
  48. @dataclass
  49. class EngineArgs:
  50. """Arguments for Aphrodite engine."""
  51. # Model Options
  52. model: str
  53. seed: int = 0
  54. served_model_name: Optional[Union[str, List[str]]] = None
  55. tokenizer: Optional[str] = None
  56. revision: Optional[str] = None
  57. code_revision: Optional[str] = None
  58. tokenizer_revision: Optional[str] = None
  59. tokenizer_mode: str = "auto"
  60. trust_remote_code: bool = False
  61. download_dir: Optional[str] = None
  62. max_model_len: Optional[int] = None
  63. max_context_len_to_capture: Optional[int] = None
  64. max_seq_len_to_capture: Optional[int] = None
  65. rope_scaling: Optional[dict] = None
  66. rope_theta: Optional[float] = None
  67. model_loader_extra_config: Optional[dict] = None
  68. enforce_eager: Optional[bool] = None
  69. skip_tokenizer_init: bool = False
  70. tokenizer_pool_size: int = 0
  71. # Note: Specifying a tokenizer pool by passing a class
  72. # is intended for expert use only. The API may change without
  73. # notice.
  74. tokenizer_pool_type: Union[str, Type["BaseTokenizerGroup"]] = "ray"
  75. tokenizer_pool_extra_config: Optional[dict] = None
  76. limit_mm_per_prompt: Optional[Mapping[str, int]] = None
  77. max_logprobs: int = 10 # OpenAI default is 5, setting to 10 because ST
  78. # Device Options
  79. device: str = "auto"
  80. # Load Options
  81. load_format: str = "auto"
  82. config_format: str = "auto"
  83. dtype: str = "auto"
  84. ignore_patterns: Optional[Union[str, List[str]]] = None
  85. # Parallel Options
  86. worker_use_ray: Optional[bool] = False
  87. tensor_parallel_size: int = 1
  88. pipeline_parallel_size: int = 1
  89. ray_workers_use_nsight: bool = False
  90. disable_custom_all_reduce: bool = False
  91. # Note: Specifying a custom executor backend by passing a class
  92. # is intended for expert use only. The API may change without
  93. # notice.
  94. distributed_executor_backend: Optional[Union[str,
  95. Type[ExecutorBase]]] = None
  96. max_parallel_loading_workers: Optional[int] = None
  97. # Quantization Options
  98. quantization: Optional[str] = None
  99. quantization_param_path: Optional[str] = None
  100. preemption_mode: Optional[str] = None
  101. deepspeed_fp_bits: Optional[int] = None
  102. quant_llm_fp_bits: Optional[int] = None
  103. quant_llm_exp_bits: Optional[int] = None
  104. # Cache Options
  105. kv_cache_dtype: str = "auto"
  106. block_size: int = 16
  107. enable_prefix_caching: Optional[bool] = False
  108. num_gpu_blocks_override: Optional[int] = None
  109. disable_sliding_window: bool = False
  110. gpu_memory_utilization: float = 0.90
  111. swap_space: float = 4 # GiB
  112. cpu_offload_gb: float = 0 # GiB
  113. # Scheduler Options
  114. use_v2_block_manager: bool = False
  115. scheduler_delay_factor: float = 0.0
  116. enable_chunked_prefill: Optional[bool] = None
  117. guided_decoding_backend: str = 'lm-format-enforcer'
  118. max_num_batched_tokens: Optional[int] = None
  119. max_num_seqs: int = 256
  120. num_scheduler_steps: int = 1
  121. single_user_mode: bool = False
  122. # Speculative Decoding Options
  123. num_lookahead_slots: int = 0
  124. speculative_model: Optional[str] = None
  125. speculative_model_quantization: Optional[str] = None
  126. num_speculative_tokens: Optional[int] = None
  127. speculative_max_model_len: Optional[int] = None
  128. ngram_prompt_lookup_max: Optional[int] = None
  129. ngram_prompt_lookup_min: Optional[int] = None
  130. speculative_draft_tensor_parallel_size: Optional[int] = None
  131. speculative_disable_by_batch_size: Optional[int] = None
  132. spec_decoding_acceptance_method: str = 'rejection_sampler'
  133. typical_acceptance_sampler_posterior_threshold: Optional[float] = None
  134. typical_acceptance_sampler_posterior_alpha: Optional[float] = None
  135. disable_logprobs_during_spec_decoding: Optional[bool] = None
  136. # Adapter Options
  137. enable_lora: bool = False
  138. max_loras: int = 1
  139. max_lora_rank: int = 16
  140. lora_extra_vocab_size: int = 256
  141. lora_dtype: str = "auto"
  142. max_cpu_loras: Optional[int] = None
  143. long_lora_scaling_factors: Optional[Tuple[float]] = None
  144. fully_sharded_loras: bool = False
  145. qlora_adapter_name_or_path: Optional[str] = None
  146. enable_prompt_adapter: bool = False
  147. max_prompt_adapters: int = 1
  148. max_prompt_adapter_token: int = 0
  149. # Log Options
  150. disable_log_stats: bool = False
  151. disable_async_output_proc: bool = False
  152. override_neuron_config: Optional[Dict[str, Any]] = None
  153. def __post_init__(self):
  154. if self.tokenizer is None:
  155. self.tokenizer = self.model
  156. if is_cpu():
  157. self.distributed_executor_backend = None
  158. @staticmethod
  159. def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser:
  160. """Shared CLI arguments for the Aphrodite engine."""
  161. # Model Options
  162. parser.add_argument(
  163. "--model",
  164. type=str,
  165. default="EleutherAI/pythia-70m-deduped",
  166. help="Category: Model Options\n"
  167. "name or path of the huggingface model to use",
  168. )
  169. parser.add_argument("--seed",
  170. type=int,
  171. default=EngineArgs.seed,
  172. help="Category: Model Options\n"
  173. "random seed")
  174. parser.add_argument(
  175. "--served-model-name",
  176. nargs="+",
  177. type=str,
  178. default=None,
  179. help="Category: API Options\n"
  180. "The model name(s) used in the API. If multiple "
  181. "names are provided, the server will respond to any "
  182. "of the provided names. The model name in the model "
  183. "field of a response will be the first name in this "
  184. "list. If not specified, the model name will be the "
  185. "same as the `--model` argument. Noted that this name(s)"
  186. "will also be used in `model_name` tag content of "
  187. "prometheus metrics, if multiple names provided, metrics"
  188. "tag will take the first one.")
  189. parser.add_argument(
  190. "--tokenizer",
  191. type=str,
  192. default=EngineArgs.tokenizer,
  193. help="Category: Model Options\n"
  194. "name or path of the huggingface tokenizer to use",
  195. )
  196. parser.add_argument(
  197. "--revision",
  198. type=str,
  199. default=None,
  200. help="Category: Model Options\n"
  201. "the specific model version to use. It can be a branch "
  202. "name, a tag name, or a commit id. If unspecified, will use "
  203. "the default version.",
  204. )
  205. parser.add_argument(
  206. "--code-revision",
  207. type=str,
  208. default=None,
  209. help="Category: Model Options\n"
  210. "the specific revision to use for the model code on "
  211. "Hugging Face Hub. It can be a branch name, a tag name, or a "
  212. "commit id. If unspecified, will use the default version.",
  213. )
  214. parser.add_argument(
  215. "--tokenizer-revision",
  216. type=str,
  217. default=None,
  218. help="Category: Model Options\n"
  219. "the specific tokenizer version to use. It can be a branch "
  220. "name, a tag name, or a commit id. If unspecified, will use "
  221. "the default version.",
  222. )
  223. parser.add_argument(
  224. "--tokenizer-mode",
  225. type=str,
  226. default=EngineArgs.tokenizer_mode,
  227. choices=['auto', 'slow', 'mistral'],
  228. help='The tokenizer mode.\n\n* "auto" will use the '
  229. 'fast tokenizer if available.\n* "slow" will '
  230. 'always use the slow tokenizer. \n* '
  231. '"mistral" will always use the `mistral_common` tokenizer.')
  232. parser.add_argument(
  233. "--trust-remote-code",
  234. action="store_true",
  235. help="Category: Model Options\n"
  236. "trust remote code from huggingface",
  237. )
  238. parser.add_argument(
  239. "--download-dir",
  240. type=str,
  241. default=EngineArgs.download_dir,
  242. help="Category: Model Options\n"
  243. "directory to download and load the weights, "
  244. "default to the default cache dir of "
  245. "huggingface",
  246. )
  247. parser.add_argument(
  248. "--max-model-len",
  249. type=int,
  250. default=EngineArgs.max_model_len,
  251. help="Category: Model Options\n"
  252. "model context length. If unspecified, "
  253. "will be automatically derived from the model.",
  254. )
  255. parser.add_argument("--max-context-len-to-capture",
  256. type=int,
  257. default=EngineArgs.max_context_len_to_capture,
  258. help="Category: Model Options\n"
  259. "Maximum context length covered by CUDA "
  260. "graphs. When a sequence has context length "
  261. "larger than this, we fall back to eager mode. "
  262. "(DEPRECATED. Use --max-seq_len-to-capture instead"
  263. ")")
  264. parser.add_argument("--max-seq-len-to-capture",
  265. type=int,
  266. default=EngineArgs.max_seq_len_to_capture,
  267. help='Maximum sequence length covered by CUDA '
  268. 'graphs. When a sequence has context length '
  269. 'larger than this, we fall back to eager mode. '
  270. 'Additionally for encoder-decoder models, if the '
  271. 'sequence length of the encoder input is larger '
  272. 'than this, we fall back to the eager mode.')
  273. parser.add_argument('--rope-scaling',
  274. default=None,
  275. type=json.loads,
  276. help='Category: Model Options\n'
  277. 'RoPE scaling configuration in JSON format. '
  278. 'For example, {"type":"dynamic","factor":2.0}')
  279. parser.add_argument('--rope-theta',
  280. default=None,
  281. type=float,
  282. help='Category: Model Options\n'
  283. 'RoPE theta. Use with `rope_scaling`. In '
  284. 'some cases, changing the RoPE theta improves the '
  285. 'performance of the scaled model.')
  286. parser.add_argument("--model-loader-extra-config",
  287. type=str,
  288. default=EngineArgs.model_loader_extra_config,
  289. help="Category: Model Options\n"
  290. "Extra config for model loader. "
  291. "This will be passed to the model loader "
  292. "corresponding to the chosen load_format. "
  293. "This should be a JSON string that will be "
  294. "parsed into a dictionary.")
  295. parser.add_argument(
  296. "--enforce-eager",
  297. action=StoreBoolean,
  298. default=EngineArgs.enforce_eager,
  299. nargs="?",
  300. const="True",
  301. help="Category: Model Options\n"
  302. "Always use eager-mode PyTorch. If False, "
  303. "will use eager mode and CUDA graph in hybrid "
  304. "for maximal performance and flexibility.",
  305. )
  306. parser.add_argument("--skip-tokenizer-init",
  307. action="store_true",
  308. help="Category: Model Options\n"
  309. "Skip initialization of tokenizer and detokenizer")
  310. parser.add_argument("--tokenizer-pool-size",
  311. type=int,
  312. default=EngineArgs.tokenizer_pool_size,
  313. help="Category: Model Options\n"
  314. "Size of tokenizer pool to use for "
  315. "asynchronous tokenization. If 0, will "
  316. "use synchronous tokenization.")
  317. parser.add_argument("--tokenizer-pool-type",
  318. type=str,
  319. default=EngineArgs.tokenizer_pool_type,
  320. help="Category: Model Options\n"
  321. "The type of tokenizer pool to use for "
  322. "asynchronous tokenization. Ignored if "
  323. "tokenizer_pool_size is 0.")
  324. parser.add_argument("--tokenizer-pool-extra-config",
  325. type=str,
  326. default=EngineArgs.tokenizer_pool_extra_config,
  327. help="Category: Model Options\n"
  328. "Extra config for tokenizer pool. "
  329. "This should be a JSON string that will be "
  330. "parsed into a dictionary. Ignored if "
  331. "tokenizer_pool_size is 0.")
  332. # Multimodal related configs
  333. parser.add_argument(
  334. '--limit-mm-per-prompt',
  335. type=nullable_kvs,
  336. default=EngineArgs.limit_mm_per_prompt,
  337. # The default value is given in
  338. # MultiModalRegistry.init_mm_limits_per_prompt
  339. help=('For each multimodal plugin, limit how many '
  340. 'input instances to allow for each prompt. '
  341. 'Expects a comma-separated list of items, '
  342. 'e.g.: `image=16,video=2` allows a maximum of 16 '
  343. 'images and 2 videos per prompt. Defaults to 1 for '
  344. 'each modality.'))
  345. parser.add_argument(
  346. "--max-logprobs",
  347. type=int,
  348. default=EngineArgs.max_logprobs,
  349. help="Category: Model Options\n"
  350. "maximum number of log probabilities to "
  351. "return.",
  352. )
  353. # Device Options
  354. parser.add_argument(
  355. "--device",
  356. type=str,
  357. default=EngineArgs.device,
  358. choices=DEVICE_OPTIONS,
  359. help=("Category: Model Options\n"
  360. "Device to use for model execution."),
  361. )
  362. # Load Options
  363. parser.add_argument(
  364. '--load-format',
  365. type=str,
  366. default=EngineArgs.load_format,
  367. choices=[f.value for f in LoadFormat],
  368. help='Category: Model Options\n'
  369. 'The format of the model weights to load.\n\n'
  370. '* "auto" will try to load the weights in the safetensors format '
  371. 'and fall back to the pytorch bin format if safetensors format '
  372. 'is not available.\n'
  373. '* "pt" will load the weights in the pytorch bin format.\n'
  374. '* "safetensors" will load the weights in the safetensors format.\n'
  375. '* "npcache" will load the weights in pytorch format and store '
  376. 'a numpy cache to speed up the loading.\n'
  377. '* "dummy" will initialize the weights with random values, '
  378. 'which is mainly for profiling.\n'
  379. '* "tensorizer" will load the weights using tensorizer from '
  380. 'CoreWeave. See the Tensorize Aphrodite Model script in the '
  381. 'Examples section for more information.\n'
  382. '* "bitsandbytes" will load the weights using bitsandbytes '
  383. 'quantization.\n')
  384. parser.add_argument(
  385. '--config-format',
  386. default=EngineArgs.config_format,
  387. choices=[f.value for f in ConfigFormat],
  388. help='The format of the model config to load.\n\n'
  389. '* "auto" will try to load the config in hf format '
  390. 'if available else it will try to load in mistral format. '
  391. 'Mistral format is specific to mistral models and is not '
  392. 'compatible with other models.')
  393. parser.add_argument(
  394. '--dtype',
  395. type=str,
  396. default=EngineArgs.dtype,
  397. choices=[
  398. 'auto', 'half', 'float16', 'bfloat16', 'float', 'float32'
  399. ],
  400. help='Category: Model Options\n'
  401. 'Data type for model weights and activations.\n\n'
  402. '* "auto" will use FP16 precision for FP32 and FP16 models, and '
  403. 'BF16 precision for BF16 models.\n'
  404. '* "half" for FP16. Recommended for AWQ quantization.\n'
  405. '* "float16" is the same as "half".\n'
  406. '* "bfloat16" for a balance between precision and range.\n'
  407. '* "float" is shorthand for FP32 precision.\n'
  408. '* "float32" for FP32 precision.')
  409. parser.add_argument(
  410. '--ignore-patterns',
  411. action="append",
  412. type=str,
  413. default=[],
  414. help="Category: Model Options\n"
  415. "The pattern(s) to ignore when loading the model."
  416. "Defaults to 'original/**/*' to avoid repeated loading of llama's "
  417. "checkpoints.")
  418. # Parallel Options
  419. parser.add_argument(
  420. '--worker-use-ray',
  421. action='store_true',
  422. help='Category: Parallel Options\n'
  423. 'Deprecated, use --distributed-executor-backend=ray.')
  424. parser.add_argument(
  425. "--tensor-parallel-size",
  426. "-tp",
  427. type=int,
  428. default=EngineArgs.tensor_parallel_size,
  429. help="Category: Parallel Options\n"
  430. "number of tensor parallel replicas, i.e. the number of GPUs "
  431. "to use.")
  432. parser.add_argument(
  433. "--pipeline-parallel-size",
  434. "-pp",
  435. type=int,
  436. default=EngineArgs.pipeline_parallel_size,
  437. help="Category: Parallel Options\n"
  438. "number of pipeline stages. Currently not supported.")
  439. parser.add_argument(
  440. "--ray-workers-use-nsight",
  441. action="store_true",
  442. help="Category: Parallel Options\n"
  443. "If specified, use nsight to profile ray workers",
  444. )
  445. parser.add_argument(
  446. "--disable-custom-all-reduce",
  447. action="store_true",
  448. default=EngineArgs.disable_custom_all_reduce,
  449. help="Category: Model Options\n"
  450. "See ParallelConfig",
  451. )
  452. parser.add_argument(
  453. '--distributed-executor-backend',
  454. choices=['ray', 'mp'],
  455. default=EngineArgs.distributed_executor_backend,
  456. help='Category: Parallel Options\n'
  457. 'Backend to use for distributed serving. When more than 1 GPU '
  458. 'is used, will be automatically set to "ray" if installed '
  459. 'or "mp" (multiprocessing) otherwise.')
  460. parser.add_argument(
  461. "--max-parallel-loading-workers",
  462. type=int,
  463. default=EngineArgs.max_parallel_loading_workers,
  464. help="Category: Parallel Options\n"
  465. "load model sequentially in multiple batches, "
  466. "to avoid RAM OOM when using tensor "
  467. "parallel and large models",
  468. )
  469. # Quantization Options
  470. parser.add_argument(
  471. "--quantization",
  472. "-q",
  473. type=str,
  474. choices=[*QUANTIZATION_METHODS, None],
  475. default=EngineArgs.quantization,
  476. help="Category: Quantization Options\n"
  477. "Method used to quantize the weights. If "
  478. "None, we first check the `quantization_config` "
  479. "attribute in the model config file. If that is "
  480. "None, we assume the model weights are not "
  481. "quantized and use `dtype` to determine the data "
  482. "type of the weights.",
  483. )
  484. parser.add_argument(
  485. '--quantization-param-path',
  486. type=str,
  487. default=None,
  488. help='Category: Quantization Options\n'
  489. 'Path to the JSON file containing the KV cache '
  490. 'scaling factors. This should generally be supplied, when '
  491. 'KV cache dtype is FP8. Otherwise, KV cache scaling factors '
  492. 'default to 1.0, which may cause accuracy issues. '
  493. 'FP8_E5M2 (without scaling) is only supported on cuda version'
  494. 'greater than 11.8. On ROCm (AMD GPU), FP8_E4M3 is instead '
  495. 'supported for common inference criteria. ')
  496. parser.add_argument(
  497. '--preemption-mode',
  498. type=str,
  499. default=None,
  500. help='Category: Scheduler Options\n'
  501. 'If \'recompute\', the engine performs preemption by block '
  502. 'swapping; If \'swap\', the engine performs preemption by block '
  503. 'swapping.')
  504. parser.add_argument("--deepspeed-fp-bits",
  505. type=int,
  506. default=None,
  507. help="Category: Quantization Options\n"
  508. "Number of floating bits to use for the deepspeed "
  509. "quantization. Supported bits are: 4, 6, 8, 12.")
  510. parser.add_argument("--quant-llm-fp-bits",
  511. type=int,
  512. default=None,
  513. help="Category: Quantization Options\n"
  514. "Number of floating bits to use for the quant_llm "
  515. "quantization. Supported bits are: 4 to 15.")
  516. parser.add_argument("--quant-llm-exp-bits",
  517. type=int,
  518. default=None,
  519. help="Category: Quantization Options\n"
  520. "Number of exponent bits to use for the quant_llm "
  521. "quantization. Supported bits are: 1 to 5.")
  522. # Cache Options
  523. parser.add_argument(
  524. '--kv-cache-dtype',
  525. type=str,
  526. choices=['auto', 'fp8', 'fp8_e5m2', 'fp8_e4m3'],
  527. default=EngineArgs.kv_cache_dtype,
  528. help='Category: Cache Options\n'
  529. 'Data type for kv cache storage. If "auto", will use model '
  530. 'data type. CUDA 11.8+ supports fp8 (=fp8_e4m3) and fp8_e5m2. '
  531. 'ROCm (AMD GPU) supports fp8 (=fp8_e4m3)')
  532. parser.add_argument(
  533. "--block-size",
  534. type=int,
  535. default=EngineArgs.block_size,
  536. choices=[8, 16, 32],
  537. help="Category: Cache Options\n"
  538. "token block size for contiguous chunks of "
  539. "tokens. This is ignored on neuron devices and "
  540. "set to max-model-len."
  541. )
  542. parser.add_argument(
  543. "--enable-prefix-caching",
  544. "--context-shift",
  545. action="store_true",
  546. help="Category: Cache Options\n"
  547. "Enable automatic prefix caching.",
  548. )
  549. parser.add_argument(
  550. "--num-gpu-blocks-override",
  551. type=int,
  552. default=None,
  553. help="Category: Cache Options Options\n"
  554. "If specified, ignore GPU profiling result and use this "
  555. "number of GPU blocks. Used for testing preemption.")
  556. parser.add_argument('--disable-sliding-window',
  557. action='store_true',
  558. help='Category: KV Cache Options\n'
  559. 'Disables sliding window, '
  560. 'capping to sliding window size')
  561. parser.add_argument(
  562. "--gpu-memory-utilization",
  563. "-gmu",
  564. type=float,
  565. default=EngineArgs.gpu_memory_utilization,
  566. help="Category: Cache Options\n"
  567. "The fraction of GPU memory to be used for "
  568. "the model executor, which can range from 0 to 1."
  569. "If unspecified, will use the default value of 0.9.",
  570. )
  571. parser.add_argument(
  572. "--swap-space",
  573. type=float,
  574. default=EngineArgs.swap_space,
  575. help="Category: Cache Options\n"
  576. "CPU swap space size (GiB) per GPU",
  577. )
  578. parser.add_argument(
  579. '--cpu-offload-gb',
  580. type=float,
  581. default=0,
  582. help='Category: Cache Options\n'
  583. 'The space in GiB to offload to CPU, per GPU. '
  584. 'Default is 0, which means no offloading. Intuitively, '
  585. 'this argument can be seen as a virtual way to increase '
  586. 'the GPU memory size. For example, if you have one 24 GB '
  587. 'GPU and set this to 10, virtually you can think of it as '
  588. 'a 34 GB GPU. Then you can load a 13B model with BF16 weight,'
  589. 'which requires at least 26GB GPU memory. Note that this '
  590. 'requires fast CPU-GPU interconnect, as part of the model is'
  591. 'loaded from CPU memory to GPU memory on the fly in each '
  592. 'model forward pass.')
  593. # Scheduler Options
  594. parser.add_argument("--use-v2-block-manager",
  595. action="store_true",
  596. help="Category: Scheduler Options\n"
  597. "Use the v2 block manager.")
  598. parser.add_argument(
  599. "--scheduler-delay-factor",
  600. "-sdf",
  601. type=float,
  602. default=EngineArgs.scheduler_delay_factor,
  603. help="Category: Scheduler Options\n"
  604. "Apply a delay (of delay factor multiplied by previous "
  605. "prompt latency) before scheduling next prompt.")
  606. parser.add_argument(
  607. "--enable-chunked-prefill",
  608. action=StoreBoolean,
  609. default=EngineArgs.enable_chunked_prefill,
  610. nargs="?",
  611. const="True",
  612. help="Category: Scheduler Options\n"
  613. "If True, the prefill requests can be chunked based on the "
  614. "max_num_batched_tokens.")
  615. parser.add_argument(
  616. '--guided-decoding-backend',
  617. type=str,
  618. default='lm-format-enforcer',
  619. choices=['outlines', 'lm-format-enforcer'],
  620. help='Category: Scheduler Options\n'
  621. 'Which engine will be used for guided decoding'
  622. ' (JSON schema / regex etc) by default. Currently support '
  623. 'https://github.com/outlines-dev/outlines and '
  624. 'https://github.com/noamgat/lm-format-enforcer.'
  625. ' Can be overridden per request via guided_decoding_backend'
  626. ' parameter.')
  627. parser.add_argument(
  628. "--max-num-batched-tokens",
  629. type=int,
  630. default=EngineArgs.max_num_batched_tokens,
  631. help="Category: KV Cache Options\n"
  632. "maximum number of batched tokens per "
  633. "iteration",
  634. )
  635. parser.add_argument(
  636. "--max-num-seqs",
  637. type=int,
  638. default=EngineArgs.max_num_seqs,
  639. help="Category: API Options\n"
  640. "maximum number of sequences per iteration",
  641. )
  642. parser.add_argument('--single-user-mode',
  643. action='store_true',
  644. help='Category: API Options\n'
  645. 'If True, we only allocate blocks for one sequence '
  646. 'and use the maximum sequence length as the number '
  647. 'of tokens.')
  648. parser.add_argument('--num-scheduler-steps',
  649. type=int,
  650. default=1,
  651. help=('Maximum number of forward steps per '
  652. 'scheduler call.'))
  653. # Speculative Decoding Options
  654. parser.add_argument("--num-lookahead-slots",
  655. type=int,
  656. default=EngineArgs.num_lookahead_slots,
  657. help="Category: Speculative Decoding Options\n"
  658. "Experimental scheduling config necessary for "
  659. "speculative decoding. This will be replaced by "
  660. "speculative decoding config in the future; it is "
  661. "present for testing purposes until then.")
  662. parser.add_argument(
  663. "--speculative-model",
  664. type=str,
  665. default=EngineArgs.speculative_model,
  666. help="Category: Speculative Decoding Options\n"
  667. "The name of the draft model to be used in speculative decoding.")
  668. # Quantization settings for speculative model.
  669. parser.add_argument(
  670. '--speculative-model-quantization',
  671. type=str,
  672. choices=[*QUANTIZATION_METHODS, None],
  673. default=EngineArgs.speculative_model_quantization,
  674. help='Method used to quantize the weights of speculative model.'
  675. 'If None, we first check the `quantization_config` '
  676. 'attribute in the model config file. If that is '
  677. 'None, we assume the model weights are not '
  678. 'quantized and use `dtype` to determine the data '
  679. 'type of the weights.')
  680. parser.add_argument("--num-speculative-tokens",
  681. type=int,
  682. default=EngineArgs.num_speculative_tokens,
  683. help="Category: Speculative Decoding Options\n"
  684. "The number of speculative tokens to sample from "
  685. "the draft model in speculative decoding")
  686. parser.add_argument(
  687. "--speculative-max-model-len",
  688. type=str,
  689. default=EngineArgs.speculative_max_model_len,
  690. help="Category: Speculative Decoding Options\n"
  691. "The maximum sequence length supported by the "
  692. "draft model. Sequences over this length will skip "
  693. "speculation.")
  694. parser.add_argument(
  695. "--ngram-prompt-lookup-max",
  696. type=int,
  697. default=EngineArgs.ngram_prompt_lookup_max,
  698. help="Category: Speculative Decoding Options\n"
  699. "Max size of window for ngram prompt lookup in speculative "
  700. "decoding.")
  701. parser.add_argument(
  702. "--ngram-prompt-lookup-min",
  703. type=int,
  704. default=EngineArgs.ngram_prompt_lookup_min,
  705. help="Category: Speculative Decoding Options\n"
  706. "Min size of window for ngram prompt lookup in speculative "
  707. "decoding.")
  708. parser.add_argument(
  709. "--speculative-draft-tensor-parallel-size",
  710. "-spec-draft-tp",
  711. type=int,
  712. default=EngineArgs.speculative_draft_tensor_parallel_size,
  713. help="Category: Speculative Decoding Options\n"
  714. "Number of tensor parallel replicas for "
  715. "the draft model in speculative decoding.")
  716. parser.add_argument(
  717. "--speculative-disable-by-batch-size",
  718. type=int,
  719. default=EngineArgs.speculative_disable_by_batch_size,
  720. help="Category: Speculative Decoding Options\n"
  721. "Disable speculative decoding for new incoming requests "
  722. "if the number of enqueue requests is larger than this value.")
  723. parser.add_argument(
  724. '--spec-decoding-acceptance-method',
  725. type=str,
  726. default=EngineArgs.spec_decoding_acceptance_method,
  727. choices=['rejection_sampler', 'typical_acceptance_sampler'],
  728. help='Category: Speculative Decoding Options\n'
  729. 'Specify the acceptance method to use during draft token '
  730. 'verification in speculative decoding. Two types of acceptance '
  731. 'routines are supported: '
  732. '1) RejectionSampler which does not allow changing the '
  733. 'acceptance rate of draft tokens, '
  734. '2) TypicalAcceptanceSampler which is configurable, allowing for '
  735. 'a higher acceptance rate at the cost of lower quality, '
  736. 'and vice versa.')
  737. parser.add_argument(
  738. '--typical-acceptance-sampler-posterior-threshold',
  739. type=float,
  740. default=EngineArgs.typical_acceptance_sampler_posterior_threshold,
  741. help='Category: Speculative Decoding Options\n'
  742. 'Set the lower bound threshold for the posterior '
  743. 'probability of a token to be accepted. This threshold is '
  744. 'used by the TypicalAcceptanceSampler to make sampling decisions '
  745. 'during speculative decoding. Defaults to 0.09')
  746. parser.add_argument(
  747. '--typical-acceptance-sampler-posterior-alpha',
  748. type=float,
  749. default=EngineArgs.typical_acceptance_sampler_posterior_alpha,
  750. help='Category: Speculative Decoding Options\n'
  751. 'A scaling factor for the entropy-based threshold for token '
  752. 'acceptance in the TypicalAcceptanceSampler. Typically defaults '
  753. 'to sqrt of --typical-acceptance-sampler-posterior-threshold '
  754. 'i.e. 0.3')
  755. parser.add_argument(
  756. '--disable-logprobs-during-spec-decoding',
  757. type=bool,
  758. default=EngineArgs.disable_logprobs_during_spec_decoding,
  759. help='Category: Speculative Decoding Options\n'
  760. 'If set to True, token log probabilities are not returned '
  761. 'during speculative decoding. If set to False, log probabilities '
  762. 'are returned according to the settings in SamplingParams. If '
  763. 'not specified, it defaults to True. Disabling log probabilities '
  764. 'during speculative decoding reduces latency by skipping logprob '
  765. 'calculation in proposal sampling, target sampling, and after '
  766. 'accepted tokens are determined.')
  767. # Adapter Options
  768. parser.add_argument(
  769. "--enable-lora",
  770. action="store_true",
  771. help="Category: Adapter Options\n"
  772. "If True, enable handling of LoRA adapters.",
  773. )
  774. parser.add_argument(
  775. "--max-loras",
  776. type=int,
  777. default=EngineArgs.max_loras,
  778. help="Category: Adapter Options\n"
  779. "Max number of LoRAs in a single batch.",
  780. )
  781. parser.add_argument(
  782. "--max-lora-rank",
  783. type=int,
  784. default=EngineArgs.max_lora_rank,
  785. help="Category: Adapter Options\n"
  786. "Max LoRA rank.",
  787. )
  788. parser.add_argument(
  789. "--lora-extra-vocab-size",
  790. type=int,
  791. default=EngineArgs.lora_extra_vocab_size,
  792. help=("Category: Adapter Options\n"
  793. "Maximum size of extra vocabulary that can be "
  794. "present in a LoRA adapter (added to the base "
  795. "model vocabulary)."),
  796. )
  797. parser.add_argument(
  798. "--lora-dtype",
  799. type=str,
  800. default=EngineArgs.lora_dtype,
  801. choices=["auto", "float16", "bfloat16", "float32"],
  802. help=("Category: Adapter Options\n"
  803. "Data type for LoRA. If auto, will default to "
  804. "base model dtype."),
  805. )
  806. parser.add_argument(
  807. "--max-cpu-loras",
  808. type=int,
  809. default=EngineArgs.max_cpu_loras,
  810. help=("Category: Adapter Options\n"
  811. "Maximum number of LoRAs to store in CPU memory. "
  812. "Must be >= than max_num_seqs. "
  813. "Defaults to max_num_seqs."),
  814. )
  815. parser.add_argument(
  816. "--long-lora-scaling-factors",
  817. type=str,
  818. default=EngineArgs.long_lora_scaling_factors,
  819. help=("Category: Adapter Options\n"
  820. "Specify multiple scaling factors (which can "
  821. "be different from base model scaling factor "
  822. "- see eg. Long LoRA) to allow for multiple "
  823. "LoRA adapters trained with those scaling "
  824. "factors to be used at the same time. If not "
  825. "specified, only adapters trained with the "
  826. "base model scaling factor are allowed."))
  827. parser.add_argument(
  828. "--fully-sharded-loras",
  829. action='store_true',
  830. help=("Category: Adapter Options\n"
  831. "By default, only half of the LoRA computation is sharded "
  832. "with tensor parallelism. Enabling this will use the fully "
  833. "sharded layers. At high sequence length, max rank or "
  834. "tensor parallel size, this is likely faster."))
  835. parser.add_argument("--qlora-adapter-name-or-path",
  836. type=str,
  837. default=None,
  838. help="Category: Adapter Options\n"
  839. "Name or path of the LoRA adapter to use.")
  840. parser.add_argument('--enable-prompt-adapter',
  841. action='store_true',
  842. help='Category: Adapter Options\n'
  843. 'If True, enable handling of PromptAdapters.')
  844. parser.add_argument('--max-prompt-adapters',
  845. type=int,
  846. default=EngineArgs.max_prompt_adapters,
  847. help='Category: Adapter Options\n'
  848. 'Max number of PromptAdapters in a batch.')
  849. parser.add_argument('--max-prompt-adapter-token',
  850. type=int,
  851. default=EngineArgs.max_prompt_adapter_token,
  852. help='Category: Adapter Options\n'
  853. 'Max number of PromptAdapters tokens')
  854. # Log Options
  855. parser.add_argument(
  856. "--disable-log-stats",
  857. action="store_true",
  858. help="Category: Log Options\n"
  859. "disable logging statistics",
  860. )
  861. parser.add_argument(
  862. "--disable-async-output-proc",
  863. action="store_true",
  864. default=EngineArgs.disable_async_output_proc,
  865. help="Disable async output processing. THis may result in "
  866. "lower performance.")
  867. parser.add_argument(
  868. '--override-neuron-config',
  869. type=lambda configs: {
  870. str(key): value
  871. for key, value in
  872. (config.split(':') for config in configs.split(','))
  873. },
  874. default=None,
  875. help="override or set neuron device configuration.")
  876. return parser
  877. @classmethod
  878. def from_cli_args(cls, args: argparse.Namespace) -> "EngineArgs":
  879. # Get the list of attributes of this dataclass.
  880. attrs = [attr.name for attr in dataclasses.fields(cls)]
  881. # Set the attributes from the parsed arguments.
  882. engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})
  883. return engine_args
  884. def create_engine_config(self, ) -> EngineConfig:
  885. # gguf file needs a specific model loader and doesn't use hf_repo
  886. if check_gguf_file(self.model):
  887. self.quantization = self.load_format = "gguf"
  888. # bitsandbytes quantization needs a specific model loader
  889. # so we make sure the quant method and the load format are consistent
  890. if (self.quantization == "bitsandbytes" or
  891. self.qlora_adapter_name_or_path is not None) and \
  892. self.load_format != "bitsandbytes":
  893. raise ValueError(
  894. "BitsAndBytes quantization and QLoRA adapter only support "
  895. f"'bitsandbytes' load format, but got {self.load_format}")
  896. if (self.load_format == "bitsandbytes" or
  897. self.qlora_adapter_name_or_path is not None) and \
  898. self.quantization != "bitsandbytes":
  899. raise ValueError(
  900. "BitsAndBytes load format and QLoRA adapter only support "
  901. f"'bitsandbytes' quantization, but got {self.quantization}")
  902. assert self.cpu_offload_gb >= 0, (
  903. "CPU offload space must be non-negative"
  904. f", but got {self.cpu_offload_gb}")
  905. device_config = DeviceConfig(device=self.device)
  906. model_config = ModelConfig(
  907. model=self.model,
  908. tokenizer=self.tokenizer,
  909. tokenizer_mode=self.tokenizer_mode,
  910. trust_remote_code=self.trust_remote_code,
  911. dtype=self.dtype,
  912. seed=self.seed,
  913. revision=self.revision,
  914. code_revision=self.code_revision,
  915. rope_scaling=self.rope_scaling,
  916. rope_theta=self.rope_theta,
  917. tokenizer_revision=self.tokenizer_revision,
  918. max_model_len=self.max_model_len,
  919. quantization=self.quantization,
  920. deepspeed_fp_bits=self.deepspeed_fp_bits,
  921. quant_llm_fp_bits=self.quant_llm_fp_bits,
  922. quant_llm_exp_bits=self.quant_llm_exp_bits,
  923. quantization_param_path=self.quantization_param_path,
  924. enforce_eager=self.enforce_eager,
  925. max_context_len_to_capture=self.max_context_len_to_capture,
  926. max_seq_len_to_capture=self.max_seq_len_to_capture,
  927. max_logprobs=self.max_logprobs,
  928. disable_sliding_window=self.disable_sliding_window,
  929. skip_tokenizer_init=self.skip_tokenizer_init,
  930. served_model_name=self.served_model_name,
  931. limit_mm_per_prompt=self.limit_mm_per_prompt,
  932. use_async_output_proc=not self.disable_async_output_proc,
  933. config_format=self.config_format,
  934. override_neuron_config=self.override_neuron_config
  935. )
  936. if model_config.is_multimodal_model:
  937. if self.enable_prefix_caching:
  938. logger.warning(
  939. "--enable-prefix-caching is currently not "
  940. "supported for multimodal models and has been disabled.")
  941. self.enable_prefix_caching = False
  942. cache_config = CacheConfig(
  943. block_size=self.block_size if self.device != "neuron" else
  944. self.max_model_len,
  945. gpu_memory_utilization=self.gpu_memory_utilization,
  946. swap_space=self.swap_space,
  947. cache_dtype=self.kv_cache_dtype,
  948. is_attention_free=model_config.is_attention_free(),
  949. num_gpu_blocks_override=self.num_gpu_blocks_override,
  950. sliding_window=model_config.get_sliding_window(),
  951. enable_prefix_caching=self.enable_prefix_caching,
  952. cpu_offload_gb=self.cpu_offload_gb,
  953. )
  954. parallel_config = ParallelConfig(
  955. pipeline_parallel_size=self.pipeline_parallel_size,
  956. tensor_parallel_size=self.tensor_parallel_size,
  957. worker_use_ray=self.worker_use_ray,
  958. max_parallel_loading_workers=self.max_parallel_loading_workers,
  959. disable_custom_all_reduce=self.disable_custom_all_reduce,
  960. tokenizer_pool_config=TokenizerPoolConfig.create_config(
  961. tokenizer_pool_size=self.tokenizer_pool_size,
  962. tokenizer_pool_type=self.tokenizer_pool_type,
  963. tokenizer_pool_extra_config=self.tokenizer_pool_extra_config,
  964. ),
  965. ray_workers_use_nsight=self.ray_workers_use_nsight,
  966. distributed_executor_backend=self.distributed_executor_backend)
  967. max_model_len = model_config.max_model_len
  968. use_long_context = max_model_len > 32768
  969. if self.enable_chunked_prefill is None:
  970. # If not explicitly set, enable chunked prefill by default for
  971. # long context (> 32K) models. This is to avoid OOM errors in the
  972. # initial memory profiling phase.
  973. # Chunked prefill is currently disabled for multimodal models by
  974. # default.
  975. if use_long_context and not model_config.is_multimodal_model:
  976. is_gpu = device_config.device_type == "cuda"
  977. use_sliding_window = (model_config.get_sliding_window()
  978. is not None)
  979. use_spec_decode = self.speculative_model is not None
  980. has_seqlen_agnostic_layers = (
  981. model_config.contains_seqlen_agnostic_layers(
  982. parallel_config))
  983. if (is_gpu and not use_sliding_window and not use_spec_decode
  984. and not self.enable_lora
  985. and not self.enable_prompt_adapter
  986. and not has_seqlen_agnostic_layers):
  987. self.enable_chunked_prefill = True
  988. logger.warning(
  989. "Chunked prefill is enabled by default for models with "
  990. "max_model_len > 32K. Currently, chunked prefill might "
  991. "not work with some features or models. If you "
  992. "encounter any issues, please disable chunked prefill "
  993. "by setting --enable-chunked-prefill=False.")
  994. if self.enable_chunked_prefill is None:
  995. self.enable_chunked_prefill = False
  996. if not self.enable_chunked_prefill and use_long_context:
  997. logger.warning(
  998. f"The model has a long context length ({max_model_len}). "
  999. "This may cause OOM errors during the initial memory "
  1000. "profiling phase, or result in low performance due to small "
  1001. "KV cache space. Consider setting --max-model-len to a "
  1002. "smaller value.")
  1003. if self.num_scheduler_steps > 1 and not self.use_v2_block_manager:
  1004. self.use_v2_block_manager = True
  1005. logger.warning(
  1006. "Enabled BlockSpaceManagerV2 because it is "
  1007. "required for multi-step scheduling.")
  1008. speculative_config = SpeculativeConfig.maybe_create_spec_config(
  1009. target_model_config=model_config,
  1010. target_parallel_config=parallel_config,
  1011. target_dtype=self.dtype,
  1012. speculative_model=self.speculative_model,
  1013. speculative_model_quantization = \
  1014. self.speculative_model_quantization,
  1015. speculative_draft_tensor_parallel_size=self.
  1016. speculative_draft_tensor_parallel_size,
  1017. num_speculative_tokens=self.num_speculative_tokens,
  1018. speculative_disable_by_batch_size=self.
  1019. speculative_disable_by_batch_size,
  1020. speculative_max_model_len=self.speculative_max_model_len,
  1021. enable_chunked_prefill=self.enable_chunked_prefill,
  1022. use_v2_block_manager=self.use_v2_block_manager,
  1023. disable_log_stats=self.disable_log_stats,
  1024. ngram_prompt_lookup_max=self.ngram_prompt_lookup_max,
  1025. ngram_prompt_lookup_min=self.ngram_prompt_lookup_min,
  1026. draft_token_acceptance_method=\
  1027. self.spec_decoding_acceptance_method,
  1028. typical_acceptance_sampler_posterior_threshold=self.
  1029. typical_acceptance_sampler_posterior_threshold,
  1030. typical_acceptance_sampler_posterior_alpha=self.
  1031. typical_acceptance_sampler_posterior_alpha,
  1032. disable_logprobs=self.disable_logprobs_during_spec_decoding,
  1033. )
  1034. if self.num_scheduler_steps > 1:
  1035. if speculative_config is not None:
  1036. raise ValueError("Speculative decoding is not supported with "
  1037. "multi-step (--num-scheduler-steps > 1)")
  1038. if self.enable_chunked_prefill:
  1039. raise ValueError("Chunked prefill is not supported with "
  1040. "multi-step (--num-scheduler-steps > 1)")
  1041. # make sure num_lookahead_slots is set the higher value depending on
  1042. # if we are using speculative decoding or multi-step
  1043. num_lookahead_slots = max(self.num_lookahead_slots,
  1044. self.num_scheduler_steps - 1)
  1045. num_lookahead_slots = num_lookahead_slots \
  1046. if speculative_config is None \
  1047. else speculative_config.num_lookahead_slots
  1048. scheduler_config = SchedulerConfig(
  1049. max_num_batched_tokens=self.max_num_batched_tokens,
  1050. max_num_seqs=self.max_num_seqs,
  1051. max_model_len=model_config.max_model_len,
  1052. cache_config=cache_config,
  1053. is_attention_free=model_config.is_attention_free(),
  1054. use_v2_block_manager=self.use_v2_block_manager,
  1055. num_lookahead_slots=num_lookahead_slots,
  1056. delay_factor=self.scheduler_delay_factor,
  1057. enable_chunked_prefill=self.enable_chunked_prefill,
  1058. embedding_mode=model_config.embedding_mode,
  1059. is_multimodal_model=model_config.is_multimodal_model,
  1060. preemption_mode=self.preemption_mode,
  1061. num_scheduler_steps=self.num_scheduler_steps,
  1062. send_delta_data=(APHRODITE_USE_RAY_SPMD_WORKER and
  1063. parallel_config.use_ray),
  1064. single_user_mode=self.single_user_mode,
  1065. )
  1066. if not HAS_TRITON and self.enable_lora:
  1067. raise ValueError("Triton is not installed, LoRA will not work.")
  1068. lora_config = LoRAConfig(
  1069. max_lora_rank=self.max_lora_rank,
  1070. max_loras=self.max_loras,
  1071. fully_sharded_loras=self.fully_sharded_loras,
  1072. lora_extra_vocab_size=self.lora_extra_vocab_size,
  1073. long_lora_scaling_factors=self.long_lora_scaling_factors,
  1074. lora_dtype=self.lora_dtype,
  1075. max_cpu_loras=self.max_cpu_loras if self.max_cpu_loras
  1076. and self.max_cpu_loras > 0 else None) if self.enable_lora else None
  1077. if self.qlora_adapter_name_or_path is not None and \
  1078. self.qlora_adapter_name_or_path != "":
  1079. if self.model_loader_extra_config is None:
  1080. self.model_loader_extra_config = {}
  1081. self.model_loader_extra_config[
  1082. "qlora_adapter_name_or_path"] = self.qlora_adapter_name_or_path
  1083. load_config = LoadConfig(
  1084. load_format=self.load_format,
  1085. download_dir=self.download_dir,
  1086. model_loader_extra_config=self.model_loader_extra_config,
  1087. ignore_patterns=self.ignore_patterns)
  1088. prompt_adapter_config = PromptAdapterConfig(
  1089. max_prompt_adapters=self.max_prompt_adapters,
  1090. max_prompt_adapter_token=self.max_prompt_adapter_token) \
  1091. if self.enable_prompt_adapter else None
  1092. decoding_config = DecodingConfig(
  1093. guided_decoding_backend=self.guided_decoding_backend)
  1094. if (model_config.get_sliding_window() is not None
  1095. and scheduler_config.chunked_prefill_enabled
  1096. and not scheduler_config.use_v2_block_manager):
  1097. raise ValueError(
  1098. "Chunked prefill is not supported with sliding window. "
  1099. "Set --disable-sliding-window to disable sliding window.")
  1100. return EngineConfig(model_config=model_config,
  1101. cache_config=cache_config,
  1102. parallel_config=parallel_config,
  1103. scheduler_config=scheduler_config,
  1104. device_config=device_config,
  1105. lora_config=lora_config,
  1106. speculative_config=speculative_config,
  1107. load_config=load_config,
  1108. decoding_config=decoding_config,
  1109. prompt_adapter_config=prompt_adapter_config)
  1110. @dataclass
  1111. class AsyncEngineArgs(EngineArgs):
  1112. """Arguments for asynchronous Aphrodite engine."""
  1113. disable_log_requests: bool = False
  1114. uvloop: bool = False
  1115. @staticmethod
  1116. def add_cli_args(parser: FlexibleArgumentParser,
  1117. async_args_only: bool = False) -> FlexibleArgumentParser:
  1118. if not async_args_only:
  1119. parser = EngineArgs.add_cli_args(parser)
  1120. parser.add_argument('--disable-log-requests',
  1121. action='store_true',
  1122. help='Disable logging requests.')
  1123. parser.add_argument(
  1124. "--uvloop",
  1125. action="store_true",
  1126. help="Use the Uvloop asyncio event loop to possibly increase "
  1127. "performance")
  1128. return parser
  1129. class StoreBoolean(argparse.Action):
  1130. def __call__(self, parser, namespace, values, option_string=None):
  1131. if values.lower() == "true":
  1132. setattr(namespace, self.dest, True)
  1133. elif values.lower() == "false":
  1134. setattr(namespace, self.dest, False)
  1135. else:
  1136. raise ValueError(f"Invalid boolean value: {values}. "
  1137. "Expected 'true' or 'false'.")