args_tools.py 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142
  1. import argparse
  2. import dataclasses
  3. import json
  4. import os
  5. from dataclasses import dataclass
  6. from typing import (TYPE_CHECKING, Dict, List, Mapping, Optional, Tuple, Type,
  7. Union)
  8. from loguru import logger
  9. from aphrodite.common.config import (CacheConfig, ConfigFormat, DecodingConfig,
  10. DeviceConfig, EngineConfig, LoadConfig,
  11. LoadFormat, LoRAConfig, ModelConfig,
  12. ParallelConfig, PromptAdapterConfig,
  13. SchedulerConfig, SpeculativeConfig,
  14. TokenizerPoolConfig)
  15. from aphrodite.common.utils import FlexibleArgumentParser, is_cpu
  16. from aphrodite.executor.executor_base import ExecutorBase
  17. from aphrodite.quantization import QUANTIZATION_METHODS
  18. from aphrodite.transformers_utils.utils import check_gguf_file
  19. from aphrodite.triton_utils import HAS_TRITON
  20. if TYPE_CHECKING:
  21. from aphrodite.transformers_utils.tokenizer_group import BaseTokenizerGroup
  22. APHRODITE_USE_RAY_SPMD_WORKER = bool(
  23. os.getenv("APHRODITE_USE_RAY_SPMD_WORKER", 0))
  24. def nullable_kvs(val: str) -> Optional[Mapping[str, int]]:
  25. if len(val) == 0:
  26. return None
  27. out_dict: Dict[str, int] = {}
  28. for item in val.split(","):
  29. try:
  30. key, value = item.split("=")
  31. except TypeError as exc:
  32. msg = "Each item should be in the form KEY=VALUE"
  33. raise ValueError(msg) from exc
  34. try:
  35. out_dict[key] = int(value)
  36. except ValueError as exc:
  37. msg = f"Failed to parse value of item {key}={value}"
  38. raise ValueError(msg) from exc
  39. return out_dict
  40. @dataclass
  41. class EngineArgs:
  42. """Arguments for Aphrodite engine."""
  43. # Model Options
  44. model: str
  45. seed: int = 0
  46. served_model_name: Optional[Union[str, List[str]]] = None
  47. tokenizer: Optional[str] = None
  48. revision: Optional[str] = None
  49. code_revision: Optional[str] = None
  50. tokenizer_revision: Optional[str] = None
  51. tokenizer_mode: str = "auto"
  52. trust_remote_code: bool = False
  53. download_dir: Optional[str] = None
  54. max_model_len: Optional[int] = None
  55. max_context_len_to_capture: Optional[int] = None
  56. max_seq_len_to_capture: Optional[int] = None
  57. rope_scaling: Optional[dict] = None
  58. rope_theta: Optional[float] = None
  59. model_loader_extra_config: Optional[dict] = None
  60. enforce_eager: Optional[bool] = None
  61. skip_tokenizer_init: bool = False
  62. tokenizer_pool_size: int = 0
  63. # Note: Specifying a tokenizer pool by passing a class
  64. # is intended for expert use only. The API may change without
  65. # notice.
  66. tokenizer_pool_type: Union[str, Type["BaseTokenizerGroup"]] = "ray"
  67. tokenizer_pool_extra_config: Optional[dict] = None
  68. limit_mm_per_prompt: Optional[Mapping[str, int]] = None
  69. max_logprobs: int = 10 # OpenAI default is 5, setting to 10 because ST
  70. # Device Options
  71. device: str = "auto"
  72. # Load Options
  73. load_format: str = "auto"
  74. config_format: str = "auto"
  75. dtype: str = "auto"
  76. ignore_patterns: Optional[Union[str, List[str]]] = None
  77. # Parallel Options
  78. worker_use_ray: Optional[bool] = False
  79. tensor_parallel_size: int = 1
  80. pipeline_parallel_size: int = 1
  81. ray_workers_use_nsight: bool = False
  82. disable_custom_all_reduce: bool = False
  83. # Note: Specifying a custom executor backend by passing a class
  84. # is intended for expert use only. The API may change without
  85. # notice.
  86. distributed_executor_backend: Optional[Union[str,
  87. Type[ExecutorBase]]] = None
  88. max_parallel_loading_workers: Optional[int] = None
  89. # Quantization Options
  90. quantization: Optional[str] = None
  91. quantization_param_path: Optional[str] = None
  92. preemption_mode: Optional[str] = None
  93. deepspeed_fp_bits: Optional[int] = None
  94. quant_llm_fp_bits: Optional[int] = None
  95. quant_llm_exp_bits: Optional[int] = None
  96. # Cache Options
  97. kv_cache_dtype: str = "auto"
  98. block_size: int = 16
  99. enable_prefix_caching: Optional[bool] = False
  100. num_gpu_blocks_override: Optional[int] = None
  101. disable_sliding_window: bool = False
  102. gpu_memory_utilization: float = 0.90
  103. swap_space: float = 4 # GiB
  104. cpu_offload_gb: float = 0 # GiB
  105. # Scheduler Options
  106. use_v2_block_manager: bool = False
  107. scheduler_delay_factor: float = 0.0
  108. enable_chunked_prefill: bool = False
  109. guided_decoding_backend: str = 'lm-format-enforcer'
  110. max_num_batched_tokens: Optional[int] = None
  111. max_num_seqs: int = 256
  112. num_scheduler_steps: int = 1
  113. # Speculative Decoding Options
  114. num_lookahead_slots: int = 0
  115. speculative_model: Optional[str] = None
  116. speculative_model_quantization: Optional[str] = None
  117. num_speculative_tokens: Optional[int] = None
  118. speculative_max_model_len: Optional[int] = None
  119. ngram_prompt_lookup_max: Optional[int] = None
  120. ngram_prompt_lookup_min: Optional[int] = None
  121. speculative_draft_tensor_parallel_size: Optional[int] = None
  122. speculative_disable_by_batch_size: Optional[int] = None
  123. spec_decoding_acceptance_method: str = 'rejection_sampler'
  124. typical_acceptance_sampler_posterior_threshold: Optional[float] = None
  125. typical_acceptance_sampler_posterior_alpha: Optional[float] = None
  126. disable_logprobs_during_spec_decoding: Optional[bool] = None
  127. # Adapter Options
  128. enable_lora: bool = False
  129. max_loras: int = 1
  130. max_lora_rank: int = 16
  131. lora_extra_vocab_size: int = 256
  132. lora_dtype: str = "auto"
  133. max_cpu_loras: Optional[int] = None
  134. long_lora_scaling_factors: Optional[Tuple[float]] = None
  135. fully_sharded_loras: bool = False
  136. qlora_adapter_name_or_path: Optional[str] = None
  137. enable_prompt_adapter: bool = False
  138. max_prompt_adapters: int = 1
  139. max_prompt_adapter_token: int = 0
  140. # Log Options
  141. disable_log_stats: bool = False
  142. def __post_init__(self):
  143. if self.tokenizer is None:
  144. self.tokenizer = self.model
  145. if is_cpu():
  146. self.distributed_executor_backend = None
  147. @staticmethod
  148. def add_cli_args(parser: FlexibleArgumentParser) -> FlexibleArgumentParser:
  149. """Shared CLI arguments for the Aphrodite engine."""
  150. # Model Options
  151. parser.add_argument(
  152. "--model",
  153. type=str,
  154. default="EleutherAI/pythia-70m-deduped",
  155. help="Category: Model Options\n"
  156. "name or path of the huggingface model to use",
  157. )
  158. parser.add_argument("--seed",
  159. type=int,
  160. default=EngineArgs.seed,
  161. help="Category: Model Options\n"
  162. "random seed")
  163. parser.add_argument(
  164. "--served-model-name",
  165. nargs="+",
  166. type=str,
  167. default=None,
  168. help="Category: API Options\n"
  169. "The model name(s) used in the API. If multiple "
  170. "names are provided, the server will respond to any "
  171. "of the provided names. The model name in the model "
  172. "field of a response will be the first name in this "
  173. "list. If not specified, the model name will be the "
  174. "same as the `--model` argument. Noted that this name(s)"
  175. "will also be used in `model_name` tag content of "
  176. "prometheus metrics, if multiple names provided, metrics"
  177. "tag will take the first one.")
  178. parser.add_argument(
  179. "--tokenizer",
  180. type=str,
  181. default=EngineArgs.tokenizer,
  182. help="Category: Model Options\n"
  183. "name or path of the huggingface tokenizer to use",
  184. )
  185. parser.add_argument(
  186. "--revision",
  187. type=str,
  188. default=None,
  189. help="Category: Model Options\n"
  190. "the specific model version to use. It can be a branch "
  191. "name, a tag name, or a commit id. If unspecified, will use "
  192. "the default version.",
  193. )
  194. parser.add_argument(
  195. "--code-revision",
  196. type=str,
  197. default=None,
  198. help="Category: Model Options\n"
  199. "the specific revision to use for the model code on "
  200. "Hugging Face Hub. It can be a branch name, a tag name, or a "
  201. "commit id. If unspecified, will use the default version.",
  202. )
  203. parser.add_argument(
  204. "--tokenizer-revision",
  205. type=str,
  206. default=None,
  207. help="Category: Model Options\n"
  208. "the specific tokenizer version to use. It can be a branch "
  209. "name, a tag name, or a commit id. If unspecified, will use "
  210. "the default version.",
  211. )
  212. parser.add_argument(
  213. "--tokenizer-mode",
  214. type=str,
  215. default=EngineArgs.tokenizer_mode,
  216. choices=['auto', 'slow', 'mistral'],
  217. help='The tokenizer mode.\n\n* "auto" will use the '
  218. 'fast tokenizer if available.\n* "slow" will '
  219. 'always use the slow tokenizer. \n* '
  220. '"mistral" will always use the `mistral_common` tokenizer.')
  221. parser.add_argument(
  222. "--trust-remote-code",
  223. action="store_true",
  224. help="Category: Model Options\n"
  225. "trust remote code from huggingface",
  226. )
  227. parser.add_argument(
  228. "--download-dir",
  229. type=str,
  230. default=EngineArgs.download_dir,
  231. help="Category: Model Options\n"
  232. "directory to download and load the weights, "
  233. "default to the default cache dir of "
  234. "huggingface",
  235. )
  236. parser.add_argument(
  237. "--max-model-len",
  238. type=int,
  239. default=EngineArgs.max_model_len,
  240. help="Category: Model Options\n"
  241. "model context length. If unspecified, "
  242. "will be automatically derived from the model.",
  243. )
  244. parser.add_argument("--max-context-len-to-capture",
  245. type=int,
  246. default=EngineArgs.max_context_len_to_capture,
  247. help="Category: Model Options\n"
  248. "Maximum context length covered by CUDA "
  249. "graphs. When a sequence has context length "
  250. "larger than this, we fall back to eager mode. "
  251. "(DEPRECATED. Use --max-seq_len-to-capture instead"
  252. ")")
  253. parser.add_argument("--max-seq-len-to-capture",
  254. type=int,
  255. default=EngineArgs.max_seq_len_to_capture,
  256. help="Category: Model Options\n"
  257. "Maximum sequence length covered by CUDA "
  258. "graphs. When a sequence has context length "
  259. "larger than this, we fall back to eager mode.")
  260. parser.add_argument('--rope-scaling',
  261. default=None,
  262. type=json.loads,
  263. help='Category: Model Options\n'
  264. 'RoPE scaling configuration in JSON format. '
  265. 'For example, {"type":"dynamic","factor":2.0}')
  266. parser.add_argument('--rope-theta',
  267. default=None,
  268. type=float,
  269. help='Category: Model Options\n'
  270. 'RoPE theta. Use with `rope_scaling`. In '
  271. 'some cases, changing the RoPE theta improves the '
  272. 'performance of the scaled model.')
  273. parser.add_argument("--model-loader-extra-config",
  274. type=str,
  275. default=EngineArgs.model_loader_extra_config,
  276. help="Category: Model Options\n"
  277. "Extra config for model loader. "
  278. "This will be passed to the model loader "
  279. "corresponding to the chosen load_format. "
  280. "This should be a JSON string that will be "
  281. "parsed into a dictionary.")
  282. parser.add_argument(
  283. "--enforce-eager",
  284. action=StoreBoolean,
  285. default=EngineArgs.enforce_eager,
  286. nargs="?",
  287. const="True",
  288. help="Category: Model Options\n"
  289. "Always use eager-mode PyTorch. If False, "
  290. "will use eager mode and CUDA graph in hybrid "
  291. "for maximal performance and flexibility.",
  292. )
  293. parser.add_argument("--skip-tokenizer-init",
  294. action="store_true",
  295. help="Category: Model Options\n"
  296. "Skip initialization of tokenizer and detokenizer")
  297. parser.add_argument("--tokenizer-pool-size",
  298. type=int,
  299. default=EngineArgs.tokenizer_pool_size,
  300. help="Category: Model Options\n"
  301. "Size of tokenizer pool to use for "
  302. "asynchronous tokenization. If 0, will "
  303. "use synchronous tokenization.")
  304. parser.add_argument("--tokenizer-pool-type",
  305. type=str,
  306. default=EngineArgs.tokenizer_pool_type,
  307. help="Category: Model Options\n"
  308. "The type of tokenizer pool to use for "
  309. "asynchronous tokenization. Ignored if "
  310. "tokenizer_pool_size is 0.")
  311. parser.add_argument("--tokenizer-pool-extra-config",
  312. type=str,
  313. default=EngineArgs.tokenizer_pool_extra_config,
  314. help="Category: Model Options\n"
  315. "Extra config for tokenizer pool. "
  316. "This should be a JSON string that will be "
  317. "parsed into a dictionary. Ignored if "
  318. "tokenizer_pool_size is 0.")
  319. # Multimodal related configs
  320. parser.add_argument(
  321. '--limit-mm-per-prompt',
  322. type=nullable_kvs,
  323. default=EngineArgs.limit_mm_per_prompt,
  324. # The default value is given in
  325. # MultiModalRegistry.init_mm_limits_per_prompt
  326. help=('For each multimodal plugin, limit how many '
  327. 'input instances to allow for each prompt. '
  328. 'Expects a comma-separated list of items, '
  329. 'e.g.: `image=16,video=2` allows a maximum of 16 '
  330. 'images and 2 videos per prompt. Defaults to 1 for '
  331. 'each modality.'))
  332. parser.add_argument(
  333. "--max-logprobs",
  334. type=int,
  335. default=EngineArgs.max_logprobs,
  336. help="Category: Model Options\n"
  337. "maximum number of log probabilities to "
  338. "return.",
  339. )
  340. # Device Options
  341. parser.add_argument(
  342. "--device",
  343. type=str,
  344. default=EngineArgs.device,
  345. choices=[
  346. "auto", "cuda", "neuron", "cpu", "openvino", "tpu", "xpu"
  347. ],
  348. help=("Category: Model Options\n"
  349. "Device to use for model execution."),
  350. )
  351. # Load Options
  352. parser.add_argument(
  353. '--load-format',
  354. type=str,
  355. default=EngineArgs.load_format,
  356. choices=[f.value for f in LoadFormat],
  357. help='Category: Model Options\n'
  358. 'The format of the model weights to load.\n\n'
  359. '* "auto" will try to load the weights in the safetensors format '
  360. 'and fall back to the pytorch bin format if safetensors format '
  361. 'is not available.\n'
  362. '* "pt" will load the weights in the pytorch bin format.\n'
  363. '* "safetensors" will load the weights in the safetensors format.\n'
  364. '* "npcache" will load the weights in pytorch format and store '
  365. 'a numpy cache to speed up the loading.\n'
  366. '* "dummy" will initialize the weights with random values, '
  367. 'which is mainly for profiling.\n'
  368. '* "tensorizer" will load the weights using tensorizer from '
  369. 'CoreWeave. See the Tensorize Aphrodite Model script in the '
  370. 'Examples section for more information.\n'
  371. '* "bitsandbytes" will load the weights using bitsandbytes '
  372. 'quantization.\n')
  373. parser.add_argument(
  374. '--config-format',
  375. default=EngineArgs.config_format,
  376. choices=[f.value for f in ConfigFormat],
  377. help='The format of the model config to load.\n\n'
  378. '* "auto" will try to load the config in hf format '
  379. 'if available else it will try to load in mistral format. '
  380. 'Mistral format is specific to mistral models and is not '
  381. 'compatible with other models.')
  382. parser.add_argument(
  383. '--dtype',
  384. type=str,
  385. default=EngineArgs.dtype,
  386. choices=[
  387. 'auto', 'half', 'float16', 'bfloat16', 'float', 'float32'
  388. ],
  389. help='Category: Model Options\n'
  390. 'Data type for model weights and activations.\n\n'
  391. '* "auto" will use FP16 precision for FP32 and FP16 models, and '
  392. 'BF16 precision for BF16 models.\n'
  393. '* "half" for FP16. Recommended for AWQ quantization.\n'
  394. '* "float16" is the same as "half".\n'
  395. '* "bfloat16" for a balance between precision and range.\n'
  396. '* "float" is shorthand for FP32 precision.\n'
  397. '* "float32" for FP32 precision.')
  398. parser.add_argument(
  399. '--ignore-patterns',
  400. action="append",
  401. type=str,
  402. default=[],
  403. help="Category: Model Options\n"
  404. "The pattern(s) to ignore when loading the model."
  405. "Defaults to 'original/**/*' to avoid repeated loading of llama's "
  406. "checkpoints.")
  407. # Parallel Options
  408. parser.add_argument(
  409. '--worker-use-ray',
  410. action='store_true',
  411. help='Category: Parallel Options\n'
  412. 'Deprecated, use --distributed-executor-backend=ray.')
  413. parser.add_argument(
  414. "--tensor-parallel-size",
  415. "-tp",
  416. type=int,
  417. default=EngineArgs.tensor_parallel_size,
  418. help="Category: Parallel Options\n"
  419. "number of tensor parallel replicas, i.e. the number of GPUs "
  420. "to use.")
  421. parser.add_argument(
  422. "--pipeline-parallel-size",
  423. "-pp",
  424. type=int,
  425. default=EngineArgs.pipeline_parallel_size,
  426. help="Category: Parallel Options\n"
  427. "number of pipeline stages. Currently not supported.")
  428. parser.add_argument(
  429. "--ray-workers-use-nsight",
  430. action="store_true",
  431. help="Category: Parallel Options\n"
  432. "If specified, use nsight to profile ray workers",
  433. )
  434. parser.add_argument(
  435. "--disable-custom-all-reduce",
  436. action="store_true",
  437. default=EngineArgs.disable_custom_all_reduce,
  438. help="Category: Model Options\n"
  439. "See ParallelConfig",
  440. )
  441. parser.add_argument(
  442. '--distributed-executor-backend',
  443. choices=['ray', 'mp'],
  444. default=EngineArgs.distributed_executor_backend,
  445. help='Category: Parallel Options\n'
  446. 'Backend to use for distributed serving. When more than 1 GPU '
  447. 'is used, will be automatically set to "ray" if installed '
  448. 'or "mp" (multiprocessing) otherwise.')
  449. parser.add_argument(
  450. "--max-parallel-loading-workers",
  451. type=int,
  452. default=EngineArgs.max_parallel_loading_workers,
  453. help="Category: Parallel Options\n"
  454. "load model sequentially in multiple batches, "
  455. "to avoid RAM OOM when using tensor "
  456. "parallel and large models",
  457. )
  458. # Quantization Options
  459. parser.add_argument(
  460. "--quantization",
  461. "-q",
  462. type=str,
  463. choices=[*QUANTIZATION_METHODS, None],
  464. default=EngineArgs.quantization,
  465. help="Category: Quantization Options\n"
  466. "Method used to quantize the weights. If "
  467. "None, we first check the `quantization_config` "
  468. "attribute in the model config file. If that is "
  469. "None, we assume the model weights are not "
  470. "quantized and use `dtype` to determine the data "
  471. "type of the weights.",
  472. )
  473. parser.add_argument(
  474. '--quantization-param-path',
  475. type=str,
  476. default=None,
  477. help='Category: Quantization Options\n'
  478. 'Path to the JSON file containing the KV cache '
  479. 'scaling factors. This should generally be supplied, when '
  480. 'KV cache dtype is FP8. Otherwise, KV cache scaling factors '
  481. 'default to 1.0, which may cause accuracy issues. '
  482. 'FP8_E5M2 (without scaling) is only supported on cuda version'
  483. 'greater than 11.8. On ROCm (AMD GPU), FP8_E4M3 is instead '
  484. 'supported for common inference criteria. ')
  485. parser.add_argument(
  486. '--preemption-mode',
  487. type=str,
  488. default=None,
  489. help='Category: Scheduler Options\n'
  490. 'If \'recompute\', the engine performs preemption by block '
  491. 'swapping; If \'swap\', the engine performs preemption by block '
  492. 'swapping.')
  493. parser.add_argument("--deepspeed-fp-bits",
  494. type=int,
  495. default=None,
  496. help="Category: Quantization Options\n"
  497. "Number of floating bits to use for the deepspeed "
  498. "quantization. Supported bits are: 4, 6, 8, 12.")
  499. parser.add_argument("--quant-llm-fp-bits",
  500. type=int,
  501. default=None,
  502. help="Category: Quantization Options\n"
  503. "Number of floating bits to use for the quant_llm "
  504. "quantization. Supported bits are: 4 to 15.")
  505. parser.add_argument("--quant-llm-exp-bits",
  506. type=int,
  507. default=None,
  508. help="Category: Quantization Options\n"
  509. "Number of exponent bits to use for the quant_llm "
  510. "quantization. Supported bits are: 1 to 5.")
  511. # Cache Options
  512. parser.add_argument(
  513. '--kv-cache-dtype',
  514. type=str,
  515. choices=['auto', 'fp8', 'fp8_e5m2', 'fp8_e4m3'],
  516. default=EngineArgs.kv_cache_dtype,
  517. help='Category: Cache Options\n'
  518. 'Data type for kv cache storage. If "auto", will use model '
  519. 'data type. CUDA 11.8+ supports fp8 (=fp8_e4m3) and fp8_e5m2. '
  520. 'ROCm (AMD GPU) supports fp8 (=fp8_e4m3)')
  521. parser.add_argument(
  522. "--block-size",
  523. type=int,
  524. default=EngineArgs.block_size,
  525. choices=[8, 16, 32, 128, 256, 512, 1024, 2048],
  526. help="Category: Cache Options\n"
  527. "token block size",
  528. )
  529. parser.add_argument(
  530. "--enable-prefix-caching",
  531. "--context-shift",
  532. action="store_true",
  533. help="Category: Cache Options\n"
  534. "Enable automatic prefix caching.",
  535. )
  536. parser.add_argument(
  537. "--num-gpu-blocks-override",
  538. type=int,
  539. default=None,
  540. help="Category: Cache Options Options\n"
  541. "If specified, ignore GPU profiling result and use this "
  542. "number of GPU blocks. Used for testing preemption.")
  543. parser.add_argument('--disable-sliding-window',
  544. action='store_true',
  545. help='Category: KV Cache Options\n'
  546. 'Disables sliding window, '
  547. 'capping to sliding window size')
  548. parser.add_argument(
  549. "--gpu-memory-utilization",
  550. "-gmu",
  551. type=float,
  552. default=EngineArgs.gpu_memory_utilization,
  553. help="Category: Cache Options\n"
  554. "The fraction of GPU memory to be used for "
  555. "the model executor, which can range from 0 to 1."
  556. "If unspecified, will use the default value of 0.9.",
  557. )
  558. parser.add_argument(
  559. "--swap-space",
  560. type=float,
  561. default=EngineArgs.swap_space,
  562. help="Category: Cache Options\n"
  563. "CPU swap space size (GiB) per GPU",
  564. )
  565. parser.add_argument(
  566. '--cpu-offload-gb',
  567. type=float,
  568. default=0,
  569. help='Category: Cache Options\n'
  570. 'The space in GiB to offload to CPU, per GPU. '
  571. 'Default is 0, which means no offloading. Intuitively, '
  572. 'this argument can be seen as a virtual way to increase '
  573. 'the GPU memory size. For example, if you have one 24 GB '
  574. 'GPU and set this to 10, virtually you can think of it as '
  575. 'a 34 GB GPU. Then you can load a 13B model with BF16 weight,'
  576. 'which requires at least 26GB GPU memory. Note that this '
  577. 'requires fast CPU-GPU interconnect, as part of the model is'
  578. 'loaded from CPU memory to GPU memory on the fly in each '
  579. 'model forward pass.')
  580. # Scheduler Options
  581. parser.add_argument("--use-v2-block-manager",
  582. action="store_true",
  583. help="Category: Scheduler Options\n"
  584. "Use the v2 block manager.")
  585. parser.add_argument(
  586. "--scheduler-delay-factor",
  587. "-sdf",
  588. type=float,
  589. default=EngineArgs.scheduler_delay_factor,
  590. help="Category: Scheduler Options\n"
  591. "Apply a delay (of delay factor multiplied by previous "
  592. "prompt latency) before scheduling next prompt.")
  593. parser.add_argument(
  594. "--enable-chunked-prefill",
  595. action=StoreBoolean,
  596. default=EngineArgs.enable_chunked_prefill,
  597. nargs="?",
  598. const="True",
  599. help="Category: Scheduler Options\n"
  600. "If True, the prefill requests can be chunked based on the "
  601. "max_num_batched_tokens.")
  602. parser.add_argument(
  603. '--guided-decoding-backend',
  604. type=str,
  605. default='lm-format-enforcer',
  606. choices=['outlines', 'lm-format-enforcer'],
  607. help='Category: Scheduler Options\n'
  608. 'Which engine will be used for guided decoding'
  609. ' (JSON schema / regex etc) by default. Currently support '
  610. 'https://github.com/outlines-dev/outlines and '
  611. 'https://github.com/noamgat/lm-format-enforcer.'
  612. ' Can be overridden per request via guided_decoding_backend'
  613. ' parameter.')
  614. parser.add_argument(
  615. "--max-num-batched-tokens",
  616. type=int,
  617. default=EngineArgs.max_num_batched_tokens,
  618. help="Category: KV Cache Options\n"
  619. "maximum number of batched tokens per "
  620. "iteration",
  621. )
  622. parser.add_argument(
  623. "--max-num-seqs",
  624. type=int,
  625. default=EngineArgs.max_num_seqs,
  626. help="Category: API Options\n"
  627. "maximum number of sequences per iteration",
  628. )
  629. parser.add_argument('--num-scheduler-steps',
  630. type=int,
  631. default=1,
  632. help=('Maximum number of forward steps per '
  633. 'scheduler call.'))
  634. # Speculative Decoding Options
  635. parser.add_argument("--num-lookahead-slots",
  636. type=int,
  637. default=EngineArgs.num_lookahead_slots,
  638. help="Category: Speculative Decoding Options\n"
  639. "Experimental scheduling config necessary for "
  640. "speculative decoding. This will be replaced by "
  641. "speculative decoding config in the future; it is "
  642. "present for testing purposes until then.")
  643. parser.add_argument(
  644. "--speculative-model",
  645. type=str,
  646. default=EngineArgs.speculative_model,
  647. help="Category: Speculative Decoding Options\n"
  648. "The name of the draft model to be used in speculative decoding.")
  649. # Quantization settings for speculative model.
  650. parser.add_argument(
  651. '--speculative-model-quantization',
  652. type=str,
  653. choices=[*QUANTIZATION_METHODS, None],
  654. default=EngineArgs.speculative_model_quantization,
  655. help='Method used to quantize the weights of speculative model.'
  656. 'If None, we first check the `quantization_config` '
  657. 'attribute in the model config file. If that is '
  658. 'None, we assume the model weights are not '
  659. 'quantized and use `dtype` to determine the data '
  660. 'type of the weights.')
  661. parser.add_argument("--num-speculative-tokens",
  662. type=int,
  663. default=EngineArgs.num_speculative_tokens,
  664. help="Category: Speculative Decoding Options\n"
  665. "The number of speculative tokens to sample from "
  666. "the draft model in speculative decoding")
  667. parser.add_argument(
  668. "--speculative-max-model-len",
  669. type=str,
  670. default=EngineArgs.speculative_max_model_len,
  671. help="Category: Speculative Decoding Options\n"
  672. "The maximum sequence length supported by the "
  673. "draft model. Sequences over this length will skip "
  674. "speculation.")
  675. parser.add_argument(
  676. "--ngram-prompt-lookup-max",
  677. type=int,
  678. default=EngineArgs.ngram_prompt_lookup_max,
  679. help="Category: Speculative Decoding Options\n"
  680. "Max size of window for ngram prompt lookup in speculative "
  681. "decoding.")
  682. parser.add_argument(
  683. "--ngram-prompt-lookup-min",
  684. type=int,
  685. default=EngineArgs.ngram_prompt_lookup_min,
  686. help="Category: Speculative Decoding Options\n"
  687. "Min size of window for ngram prompt lookup in speculative "
  688. "decoding.")
  689. parser.add_argument(
  690. "--speculative-draft-tensor-parallel-size",
  691. "-spec-draft-tp",
  692. type=int,
  693. default=EngineArgs.speculative_draft_tensor_parallel_size,
  694. help="Category: Speculative Decoding Options\n"
  695. "Number of tensor parallel replicas for "
  696. "the draft model in speculative decoding.")
  697. parser.add_argument(
  698. "--speculative-disable-by-batch-size",
  699. type=int,
  700. default=EngineArgs.speculative_disable_by_batch_size,
  701. help="Category: Speculative Decoding Options\n"
  702. "Disable speculative decoding for new incoming requests "
  703. "if the number of enqueue requests is larger than this value.")
  704. parser.add_argument(
  705. '--spec-decoding-acceptance-method',
  706. type=str,
  707. default=EngineArgs.spec_decoding_acceptance_method,
  708. choices=['rejection_sampler', 'typical_acceptance_sampler'],
  709. help='Category: Speculative Decoding Options\n'
  710. 'Specify the acceptance method to use during draft token '
  711. 'verification in speculative decoding. Two types of acceptance '
  712. 'routines are supported: '
  713. '1) RejectionSampler which does not allow changing the '
  714. 'acceptance rate of draft tokens, '
  715. '2) TypicalAcceptanceSampler which is configurable, allowing for '
  716. 'a higher acceptance rate at the cost of lower quality, '
  717. 'and vice versa.')
  718. parser.add_argument(
  719. '--typical-acceptance-sampler-posterior-threshold',
  720. type=float,
  721. default=EngineArgs.typical_acceptance_sampler_posterior_threshold,
  722. help='Category: Speculative Decoding Options\n'
  723. 'Set the lower bound threshold for the posterior '
  724. 'probability of a token to be accepted. This threshold is '
  725. 'used by the TypicalAcceptanceSampler to make sampling decisions '
  726. 'during speculative decoding. Defaults to 0.09')
  727. parser.add_argument(
  728. '--typical-acceptance-sampler-posterior-alpha',
  729. type=float,
  730. default=EngineArgs.typical_acceptance_sampler_posterior_alpha,
  731. help='Category: Speculative Decoding Options\n'
  732. 'A scaling factor for the entropy-based threshold for token '
  733. 'acceptance in the TypicalAcceptanceSampler. Typically defaults '
  734. 'to sqrt of --typical-acceptance-sampler-posterior-threshold '
  735. 'i.e. 0.3')
  736. parser.add_argument(
  737. '--disable-logprobs-during-spec-decoding',
  738. type=bool,
  739. default=EngineArgs.disable_logprobs_during_spec_decoding,
  740. help='Category: Speculative Decoding Options\n'
  741. 'If set to True, token log probabilities are not returned '
  742. 'during speculative decoding. If set to False, log probabilities '
  743. 'are returned according to the settings in SamplingParams. If '
  744. 'not specified, it defaults to True. Disabling log probabilities '
  745. 'during speculative decoding reduces latency by skipping logprob '
  746. 'calculation in proposal sampling, target sampling, and after '
  747. 'accepted tokens are determined.')
  748. # Adapter Options
  749. parser.add_argument(
  750. "--enable-lora",
  751. action="store_true",
  752. help="Category: Adapter Options\n"
  753. "If True, enable handling of LoRA adapters.",
  754. )
  755. parser.add_argument(
  756. "--max-loras",
  757. type=int,
  758. default=EngineArgs.max_loras,
  759. help="Category: Adapter Options\n"
  760. "Max number of LoRAs in a single batch.",
  761. )
  762. parser.add_argument(
  763. "--max-lora-rank",
  764. type=int,
  765. default=EngineArgs.max_lora_rank,
  766. help="Category: Adapter Options\n"
  767. "Max LoRA rank.",
  768. )
  769. parser.add_argument(
  770. "--lora-extra-vocab-size",
  771. type=int,
  772. default=EngineArgs.lora_extra_vocab_size,
  773. help=("Category: Adapter Options\n"
  774. "Maximum size of extra vocabulary that can be "
  775. "present in a LoRA adapter (added to the base "
  776. "model vocabulary)."),
  777. )
  778. parser.add_argument(
  779. "--lora-dtype",
  780. type=str,
  781. default=EngineArgs.lora_dtype,
  782. choices=["auto", "float16", "bfloat16", "float32"],
  783. help=("Category: Adapter Options\n"
  784. "Data type for LoRA. If auto, will default to "
  785. "base model dtype."),
  786. )
  787. parser.add_argument(
  788. "--max-cpu-loras",
  789. type=int,
  790. default=EngineArgs.max_cpu_loras,
  791. help=("Category: Adapter Options\n"
  792. "Maximum number of LoRAs to store in CPU memory. "
  793. "Must be >= than max_num_seqs. "
  794. "Defaults to max_num_seqs."),
  795. )
  796. parser.add_argument(
  797. "--long-lora-scaling-factors",
  798. type=str,
  799. default=EngineArgs.long_lora_scaling_factors,
  800. help=("Category: Adapter Options\n"
  801. "Specify multiple scaling factors (which can "
  802. "be different from base model scaling factor "
  803. "- see eg. Long LoRA) to allow for multiple "
  804. "LoRA adapters trained with those scaling "
  805. "factors to be used at the same time. If not "
  806. "specified, only adapters trained with the "
  807. "base model scaling factor are allowed."))
  808. parser.add_argument(
  809. "--fully-sharded-loras",
  810. action='store_true',
  811. help=("Category: Adapter Options\n"
  812. "By default, only half of the LoRA computation is sharded "
  813. "with tensor parallelism. Enabling this will use the fully "
  814. "sharded layers. At high sequence length, max rank or "
  815. "tensor parallel size, this is likely faster."))
  816. parser.add_argument("--qlora-adapter-name-or-path",
  817. type=str,
  818. default=None,
  819. help="Category: Adapter Options\n"
  820. "Name or path of the LoRA adapter to use.")
  821. parser.add_argument('--enable-prompt-adapter',
  822. action='store_true',
  823. help='Category: Adapter Options\n'
  824. 'If True, enable handling of PromptAdapters.')
  825. parser.add_argument('--max-prompt-adapters',
  826. type=int,
  827. default=EngineArgs.max_prompt_adapters,
  828. help='Category: Adapter Options\n'
  829. 'Max number of PromptAdapters in a batch.')
  830. parser.add_argument('--max-prompt-adapter-token',
  831. type=int,
  832. default=EngineArgs.max_prompt_adapter_token,
  833. help='Category: Adapter Options\n'
  834. 'Max number of PromptAdapters tokens')
  835. # Log Options
  836. parser.add_argument(
  837. "--disable-log-stats",
  838. action="store_true",
  839. help="Category: Log Options\n"
  840. "disable logging statistics",
  841. )
  842. return parser
  843. @classmethod
  844. def from_cli_args(cls, args: argparse.Namespace) -> "EngineArgs":
  845. # Get the list of attributes of this dataclass.
  846. attrs = [attr.name for attr in dataclasses.fields(cls)]
  847. # Set the attributes from the parsed arguments.
  848. engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})
  849. return engine_args
  850. def create_engine_config(self, ) -> EngineConfig:
  851. # gguf file needs a specific model loader and doesn't use hf_repo
  852. if check_gguf_file(self.model):
  853. self.quantization = self.load_format = "gguf"
  854. # bitsandbytes quantization needs a specific model loader
  855. # so we make sure the quant method and the load format are consistent
  856. if (self.quantization == "bitsandbytes" or
  857. self.qlora_adapter_name_or_path is not None) and \
  858. self.load_format != "bitsandbytes":
  859. raise ValueError(
  860. "BitsAndBytes quantization and QLoRA adapter only support "
  861. f"'bitsandbytes' load format, but got {self.load_format}")
  862. if (self.load_format == "bitsandbytes" or
  863. self.qlora_adapter_name_or_path is not None) and \
  864. self.quantization != "bitsandbytes":
  865. raise ValueError(
  866. "BitsAndBytes load format and QLoRA adapter only support "
  867. f"'bitsandbytes' quantization, but got {self.quantization}")
  868. assert self.cpu_offload_gb >= 0, (
  869. "CPU offload space must be non-negative"
  870. f", but got {self.cpu_offload_gb}")
  871. device_config = DeviceConfig(device=self.device)
  872. model_config = ModelConfig(
  873. model=self.model,
  874. tokenizer=self.tokenizer,
  875. tokenizer_mode=self.tokenizer_mode,
  876. trust_remote_code=self.trust_remote_code,
  877. dtype=self.dtype,
  878. seed=self.seed,
  879. revision=self.revision,
  880. code_revision=self.code_revision,
  881. rope_scaling=self.rope_scaling,
  882. rope_theta=self.rope_theta,
  883. tokenizer_revision=self.tokenizer_revision,
  884. max_model_len=self.max_model_len,
  885. quantization=self.quantization,
  886. deepspeed_fp_bits=self.deepspeed_fp_bits,
  887. quant_llm_fp_bits=self.quant_llm_fp_bits,
  888. quant_llm_exp_bits=self.quant_llm_exp_bits,
  889. quantization_param_path=self.quantization_param_path,
  890. enforce_eager=self.enforce_eager,
  891. max_context_len_to_capture=self.max_context_len_to_capture,
  892. max_seq_len_to_capture=self.max_seq_len_to_capture,
  893. max_logprobs=self.max_logprobs,
  894. disable_sliding_window=self.disable_sliding_window,
  895. skip_tokenizer_init=self.skip_tokenizer_init,
  896. served_model_name=self.served_model_name,
  897. limit_mm_per_prompt=self.limit_mm_per_prompt,
  898. config_format=self.config_format,
  899. )
  900. cache_config = CacheConfig(
  901. block_size=self.block_size,
  902. gpu_memory_utilization=self.gpu_memory_utilization,
  903. swap_space=self.swap_space,
  904. cache_dtype=self.kv_cache_dtype,
  905. is_attention_free=model_config.is_attention_free(),
  906. num_gpu_blocks_override=self.num_gpu_blocks_override,
  907. sliding_window=model_config.get_sliding_window(),
  908. enable_prefix_caching=self.enable_prefix_caching,
  909. cpu_offload_gb=self.cpu_offload_gb,
  910. )
  911. parallel_config = ParallelConfig(
  912. pipeline_parallel_size=self.pipeline_parallel_size,
  913. tensor_parallel_size=self.tensor_parallel_size,
  914. worker_use_ray=self.worker_use_ray,
  915. max_parallel_loading_workers=self.max_parallel_loading_workers,
  916. disable_custom_all_reduce=self.disable_custom_all_reduce,
  917. tokenizer_pool_config=TokenizerPoolConfig.create_config(
  918. tokenizer_pool_size=self.tokenizer_pool_size,
  919. tokenizer_pool_type=self.tokenizer_pool_type,
  920. tokenizer_pool_extra_config=self.tokenizer_pool_extra_config,
  921. ),
  922. ray_workers_use_nsight=self.ray_workers_use_nsight,
  923. distributed_executor_backend=self.distributed_executor_backend)
  924. max_model_len = model_config.max_model_len
  925. use_long_context = max_model_len > 32768
  926. if self.enable_chunked_prefill is None:
  927. # If not explicitly set, enable chunked prefill by default for
  928. # long context (> 32K) models. This is to avoid OOM errors in the
  929. # initial memory profiling phase.
  930. if use_long_context:
  931. is_gpu = device_config.device_type == "cuda"
  932. use_sliding_window = (model_config.get_sliding_window()
  933. is not None)
  934. use_spec_decode = self.speculative_model is not None
  935. has_seqlen_agnostic_layers = (
  936. model_config.contains_seqlen_agnostic_layers(
  937. parallel_config))
  938. if (is_gpu and not use_sliding_window and not use_spec_decode
  939. and not self.enable_lora
  940. and not self.enable_prompt_adapter
  941. and not self.enable_prefix_caching
  942. and not has_seqlen_agnostic_layers):
  943. self.enable_chunked_prefill = True
  944. logger.warning(
  945. "Chunked prefill is enabled by default for models with "
  946. "max_model_len > 32K. Currently, chunked prefill might "
  947. "not work with some features or models. If you "
  948. "encounter any issues, please disable chunked prefill "
  949. "by setting --enable-chunked-prefill=False.")
  950. if self.enable_chunked_prefill is None:
  951. self.enable_chunked_prefill = False
  952. if not self.enable_chunked_prefill and use_long_context:
  953. logger.warning(
  954. f"The model has a long context length ({max_model_len}). "
  955. "This may cause OOM errors during the initial memory "
  956. "profiling phase, or result in low performance due to small "
  957. "KV cache space. Consider setting --max-model-len to a "
  958. "smaller value.")
  959. if self.num_scheduler_steps > 1 and not self.use_v2_block_manager:
  960. self.use_v2_block_manager = True
  961. logger.warning(
  962. "Enabled BlockSpaceManagerV2 because it is "
  963. "required for multi-step scheduling.")
  964. speculative_config = SpeculativeConfig.maybe_create_spec_config(
  965. target_model_config=model_config,
  966. target_parallel_config=parallel_config,
  967. target_dtype=self.dtype,
  968. speculative_model=self.speculative_model,
  969. speculative_model_quantization = \
  970. self.speculative_model_quantization,
  971. speculative_draft_tensor_parallel_size=self.
  972. speculative_draft_tensor_parallel_size,
  973. num_speculative_tokens=self.num_speculative_tokens,
  974. speculative_disable_by_batch_size=self.
  975. speculative_disable_by_batch_size,
  976. speculative_max_model_len=self.speculative_max_model_len,
  977. enable_chunked_prefill=self.enable_chunked_prefill,
  978. use_v2_block_manager=self.use_v2_block_manager,
  979. disable_log_stats=self.disable_log_stats,
  980. ngram_prompt_lookup_max=self.ngram_prompt_lookup_max,
  981. ngram_prompt_lookup_min=self.ngram_prompt_lookup_min,
  982. draft_token_acceptance_method=\
  983. self.spec_decoding_acceptance_method,
  984. typical_acceptance_sampler_posterior_threshold=self.
  985. typical_acceptance_sampler_posterior_threshold,
  986. typical_acceptance_sampler_posterior_alpha=self.
  987. typical_acceptance_sampler_posterior_alpha,
  988. disable_logprobs=self.disable_logprobs_during_spec_decoding,
  989. )
  990. if self.num_scheduler_steps > 1:
  991. if speculative_config is not None:
  992. raise ValueError("Speculative decoding is not supported with "
  993. "multi-step (--num-scheduler-steps > 1)")
  994. if self.enable_chunked_prefill:
  995. raise ValueError("Chunked prefill is not supported with "
  996. "multi-step (--num-scheduler-steps > 1)")
  997. # make sure num_lookahead_slots is set the higher value depending on
  998. # if we are using speculative decoding or multi-step
  999. num_lookahead_slots = max(self.num_lookahead_slots,
  1000. self.num_scheduler_steps - 1)
  1001. num_lookahead_slots = num_lookahead_slots \
  1002. if speculative_config is None \
  1003. else speculative_config.num_lookahead_slots
  1004. scheduler_config = SchedulerConfig(
  1005. max_num_batched_tokens=self.max_num_batched_tokens,
  1006. max_num_seqs=self.max_num_seqs,
  1007. max_model_len=model_config.max_model_len,
  1008. is_attention_free=model_config.is_attention_free(),
  1009. use_v2_block_manager=self.use_v2_block_manager,
  1010. num_lookahead_slots=num_lookahead_slots,
  1011. delay_factor=self.scheduler_delay_factor,
  1012. enable_chunked_prefill=self.enable_chunked_prefill,
  1013. embedding_mode=model_config.embedding_mode,
  1014. preemption_mode=self.preemption_mode,
  1015. num_scheduler_steps=self.num_scheduler_steps,
  1016. send_delta_data=(APHRODITE_USE_RAY_SPMD_WORKER and
  1017. parallel_config.use_ray),
  1018. )
  1019. if not HAS_TRITON and self.enable_lora:
  1020. raise ValueError("Triton is not installed, LoRA will not work.")
  1021. lora_config = LoRAConfig(
  1022. max_lora_rank=self.max_lora_rank,
  1023. max_loras=self.max_loras,
  1024. fully_sharded_loras=self.fully_sharded_loras,
  1025. lora_extra_vocab_size=self.lora_extra_vocab_size,
  1026. long_lora_scaling_factors=self.long_lora_scaling_factors,
  1027. lora_dtype=self.lora_dtype,
  1028. max_cpu_loras=self.max_cpu_loras if self.max_cpu_loras
  1029. and self.max_cpu_loras > 0 else None) if self.enable_lora else None
  1030. if self.qlora_adapter_name_or_path is not None and \
  1031. self.qlora_adapter_name_or_path != "":
  1032. if self.model_loader_extra_config is None:
  1033. self.model_loader_extra_config = {}
  1034. self.model_loader_extra_config[
  1035. "qlora_adapter_name_or_path"] = self.qlora_adapter_name_or_path
  1036. load_config = LoadConfig(
  1037. load_format=self.load_format,
  1038. download_dir=self.download_dir,
  1039. model_loader_extra_config=self.model_loader_extra_config,
  1040. ignore_patterns=self.ignore_patterns)
  1041. prompt_adapter_config = PromptAdapterConfig(
  1042. max_prompt_adapters=self.max_prompt_adapters,
  1043. max_prompt_adapter_token=self.max_prompt_adapter_token) \
  1044. if self.enable_prompt_adapter else None
  1045. decoding_config = DecodingConfig(
  1046. guided_decoding_backend=self.guided_decoding_backend)
  1047. if (model_config.get_sliding_window() is not None
  1048. and scheduler_config.chunked_prefill_enabled
  1049. and not scheduler_config.use_v2_block_manager):
  1050. raise ValueError(
  1051. "Chunked prefill is not supported with sliding window. "
  1052. "Set --disable-sliding-window to disable sliding window.")
  1053. return EngineConfig(model_config=model_config,
  1054. cache_config=cache_config,
  1055. parallel_config=parallel_config,
  1056. scheduler_config=scheduler_config,
  1057. device_config=device_config,
  1058. lora_config=lora_config,
  1059. speculative_config=speculative_config,
  1060. load_config=load_config,
  1061. decoding_config=decoding_config,
  1062. prompt_adapter_config=prompt_adapter_config)
  1063. @dataclass
  1064. class AsyncEngineArgs(EngineArgs):
  1065. """Arguments for asynchronous Aphrodite engine."""
  1066. engine_use_ray: bool = False
  1067. disable_log_requests: bool = False
  1068. uvloop: bool = False
  1069. @staticmethod
  1070. def add_cli_args(parser: FlexibleArgumentParser,
  1071. async_args_only: bool = False) -> FlexibleArgumentParser:
  1072. if not async_args_only:
  1073. parser = EngineArgs.add_cli_args(parser)
  1074. parser.add_argument('--engine-use-ray',
  1075. action='store_true',
  1076. help='Use Ray to start the LLM engine in a '
  1077. 'separate process as the server process.')
  1078. parser.add_argument('--disable-log-requests',
  1079. action='store_true',
  1080. help='Disable logging requests.')
  1081. parser.add_argument(
  1082. "--uvloop",
  1083. action="store_true",
  1084. help="Use the Uvloop asyncio event loop to possibly increase "
  1085. "performance")
  1086. return parser
  1087. class StoreBoolean(argparse.Action):
  1088. def __call__(self, parser, namespace, values, option_string=None):
  1089. if values.lower() == "true":
  1090. setattr(namespace, self.dest, True)
  1091. elif values.lower() == "false":
  1092. setattr(namespace, self.dest, False)
  1093. else:
  1094. raise ValueError(f"Invalid boolean value: {values}. "
  1095. "Expected 'true' or 'false'.")