args_tools.py 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902
  1. import argparse
  2. import dataclasses
  3. import json
  4. from dataclasses import dataclass
  5. from typing import List, Optional, Tuple, Union
  6. from aphrodite.common.config import (CacheConfig, DecodingConfig, DeviceConfig,
  7. EngineConfig, LoadConfig, LoRAConfig,
  8. ModelConfig, ParallelConfig,
  9. SchedulerConfig, SpeculativeConfig,
  10. TokenizerPoolConfig, VisionLanguageConfig)
  11. from aphrodite.common.utils import is_cpu, str_to_int_tuple
  12. from aphrodite.quantization import QUANTIZATION_METHODS
  13. @dataclass
  14. class EngineArgs:
  15. """Arguments for Aphrodite engine."""
  16. model: str
  17. served_model_name: Optional[Union[List[str]]] = None
  18. tokenizer: Optional[str] = None
  19. skip_tokenizer_init: bool = False
  20. tokenizer_mode: str = "auto"
  21. trust_remote_code: bool = False
  22. download_dir: Optional[str] = None
  23. load_format: str = "auto"
  24. dtype: str = "auto"
  25. kv_cache_dtype: str = "auto"
  26. quantization_param_path: Optional[str] = None
  27. seed: int = 0
  28. max_model_len: Optional[int] = None
  29. worker_use_ray: Optional[bool] = False
  30. distributed_executor_backend: Optional[str] = None
  31. pipeline_parallel_size: int = 1
  32. tensor_parallel_size: int = 1
  33. max_parallel_loading_workers: Optional[int] = None
  34. block_size: int = 16
  35. enable_prefix_caching: bool = False
  36. disable_sliding_window: bool = False
  37. use_v2_block_manager: bool = False
  38. swap_space: int = 4 # GiB
  39. gpu_memory_utilization: float = 0.90
  40. max_num_batched_tokens: Optional[int] = None
  41. max_num_seqs: int = 256
  42. max_logprobs: int = 10 # OpenAI default is 5, setting to 10 because ST
  43. disable_log_stats: bool = False
  44. revision: Optional[str] = None
  45. code_revision: Optional[str] = None
  46. rope_scaling: Optional[dict] = None
  47. rope_theta: Optional[float] = None
  48. tokenizer_revision: Optional[str] = None
  49. quantization: Optional[str] = None
  50. load_in_4bit: bool = False
  51. load_in_8bit: bool = False
  52. load_in_smooth: bool = False
  53. deepspeed_fp_bits: Optional[int] = None
  54. enforce_eager: bool = True
  55. max_context_len_to_capture: Optional[int] = None
  56. max_seq_len_to_capture: int = 8192
  57. disable_custom_all_reduce: bool = False
  58. tokenizer_pool_size: int = 0
  59. tokenizer_pool_type: str = "ray"
  60. tokenizer_pool_extra_config: Optional[dict] = None
  61. enable_lora: bool = False
  62. max_loras: int = 1
  63. max_lora_rank: int = 16
  64. fully_sharded_loras: bool = False
  65. lora_extra_vocab_size: int = 256
  66. long_lora_scaling_factors: Optional[Tuple[float]] = None
  67. lora_dtype: str = "auto"
  68. max_cpu_loras: Optional[int] = None
  69. device: str = "auto"
  70. ray_workers_use_nsight: bool = False
  71. num_gpu_blocks_override: Optional[int] = None
  72. num_lookahead_slots: int = 0
  73. model_loader_extra_config: Optional[dict] = None
  74. preemption_mode: Optional[str] = None
  75. # Related to Vision-language models such as llava
  76. image_token_id: Optional[int] = None
  77. image_input_shape: Optional[str] = None
  78. image_feature_size: Optional[int] = None
  79. # Scheduler config
  80. scheduler_delay_factor: float = 0.0
  81. enable_chunked_prefill: bool = False
  82. guided_decoding_backend: str = 'outlines'
  83. # Speculative decoding config
  84. speculative_model: Optional[str] = None
  85. speculative_draft_tensor_parallel_size: Optional[int] = None
  86. num_speculative_tokens: Optional[int] = None
  87. speculative_max_model_len: Optional[int] = None
  88. speculative_disable_by_batch_size: Optional[int] = None
  89. ngram_prompt_lookup_max: Optional[int] = None
  90. ngram_prompt_lookup_min: Optional[int] = None
  91. spec_decoding_acceptance_method: str = 'rejection_sampler'
  92. typical_acceptance_sampler_posterior_threshold: Optional[float] = None
  93. typical_acceptance_sampler_posterior_alpha: Optional[float] = None
  94. qlora_adapter_name_or_path: Optional[str] = None
  95. def __post_init__(self):
  96. if self.tokenizer is None:
  97. self.tokenizer = self.model
  98. if is_cpu():
  99. self.distributed_executor_backend = None
  100. @staticmethod
  101. def add_cli_args_for_vlm(
  102. parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
  103. parser.add_argument('--image-token-id',
  104. type=int,
  105. default=None,
  106. help=('Input id for image token.'))
  107. parser.add_argument(
  108. '--image-input-shape',
  109. type=str,
  110. default=None,
  111. help=('The biggest image input shape (worst for memory footprint) '
  112. 'given an input type. Only used for the profile_run.'))
  113. parser.add_argument(
  114. '--image-feature-size',
  115. type=int,
  116. default=None,
  117. help=('The image feature size along the context dimension.'))
  118. return parser
  119. @staticmethod
  120. def add_cli_args(
  121. parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
  122. """Shared CLI arguments for the Aphrodite engine."""
  123. # NOTE: If you update any of the arguments below, please also
  124. # make sure to update docs/source/models/engine_args.rst
  125. # Model arguments
  126. parser.add_argument(
  127. "--model",
  128. type=str,
  129. default="EleutherAI/pythia-70m-deduped",
  130. help="name or path of the huggingface model to use",
  131. )
  132. parser.add_argument(
  133. "--tokenizer",
  134. type=str,
  135. default=EngineArgs.tokenizer,
  136. help="name or path of the huggingface tokenizer to use",
  137. )
  138. parser.add_argument(
  139. "--skip-tokenizer-init",
  140. action="store_true",
  141. help="Skip initialization of tokenizer and detokenizer")
  142. parser.add_argument(
  143. "--revision",
  144. type=str,
  145. default=None,
  146. help="the specific model version to use. It can be a branch "
  147. "name, a tag name, or a commit id. If unspecified, will use "
  148. "the default version.",
  149. )
  150. parser.add_argument(
  151. "--code-revision",
  152. type=str,
  153. default=None,
  154. help="the specific revision to use for the model code on "
  155. "Hugging Face Hub. It can be a branch name, a tag name, or a "
  156. "commit id. If unspecified, will use the default version.",
  157. )
  158. parser.add_argument(
  159. "--tokenizer-revision",
  160. type=str,
  161. default=None,
  162. help="the specific tokenizer version to use. It can be a branch "
  163. "name, a tag name, or a commit id. If unspecified, will use "
  164. "the default version.",
  165. )
  166. parser.add_argument(
  167. "--tokenizer-mode",
  168. type=str,
  169. default=EngineArgs.tokenizer_mode,
  170. choices=["auto", "slow"],
  171. help='tokenizer mode. "auto" will use the fast '
  172. 'tokenizer if available, and "slow" will '
  173. "always use the slow tokenizer.",
  174. )
  175. parser.add_argument(
  176. "--trust-remote-code",
  177. action="store_true",
  178. help="trust remote code from huggingface",
  179. )
  180. parser.add_argument(
  181. "--download-dir",
  182. type=str,
  183. default=EngineArgs.download_dir,
  184. help="directory to download and load the weights, "
  185. "default to the default cache dir of "
  186. "huggingface",
  187. )
  188. parser.add_argument(
  189. '--load-format',
  190. type=str,
  191. default=EngineArgs.load_format,
  192. choices=[
  193. 'auto',
  194. 'pt',
  195. 'safetensors',
  196. 'npcache',
  197. 'dummy',
  198. 'tensorizer',
  199. 'sharded_state',
  200. 'bitsandbytes',
  201. ],
  202. help='The format of the model weights to load.\n\n'
  203. '* "auto" will try to load the weights in the safetensors format '
  204. 'and fall back to the pytorch bin format if safetensors format '
  205. 'is not available.\n'
  206. '* "pt" will load the weights in the pytorch bin format.\n'
  207. '* "safetensors" will load the weights in the safetensors format.\n'
  208. '* "npcache" will load the weights in pytorch format and store '
  209. 'a numpy cache to speed up the loading.\n'
  210. '* "dummy" will initialize the weights with random values, '
  211. 'which is mainly for profiling.\n'
  212. '* "tensorizer" will load the weights using tensorizer from '
  213. 'CoreWeave. See the Tensorize Aphrodite Model script in the '
  214. 'Examples section for more information.\n'
  215. '* "bitsandbytes" will load the weights using bitsandbytes '
  216. 'quantization.\n')
  217. parser.add_argument(
  218. '--dtype',
  219. type=str,
  220. default=EngineArgs.dtype,
  221. choices=[
  222. 'auto', 'half', 'float16', 'bfloat16', 'float', 'float32'
  223. ],
  224. help='Data type for model weights and activations.\n\n'
  225. '* "auto" will use FP16 precision for FP32 and FP16 models, and '
  226. 'BF16 precision for BF16 models.\n'
  227. '* "half" for FP16. Recommended for AWQ quantization.\n'
  228. '* "float16" is the same as "half".\n'
  229. '* "bfloat16" for a balance between precision and range.\n'
  230. '* "float" is shorthand for FP32 precision.\n'
  231. '* "float32" for FP32 precision.')
  232. parser.add_argument(
  233. '--kv-cache-dtype',
  234. type=str,
  235. choices=['auto', 'fp8', 'fp8_e5m2', 'fp8_e4m3'],
  236. default=EngineArgs.kv_cache_dtype,
  237. help='Data type for kv cache storage. If "auto", will use model '
  238. 'data type. CUDA 11.8+ supports fp8 (=fp8_e4m3) and fp8_e5m2. '
  239. 'ROCm (AMD GPU) supports fp8 (=fp8_e4m3)')
  240. parser.add_argument(
  241. '--quantization-param-path',
  242. type=str,
  243. default=None,
  244. help='Path to the JSON file containing the KV cache '
  245. 'scaling factors. This should generally be supplied, when '
  246. 'KV cache dtype is FP8. Otherwise, KV cache scaling factors '
  247. 'default to 1.0, which may cause accuracy issues. '
  248. 'FP8_E5M2 (without scaling) is only supported on cuda version'
  249. 'greater than 11.8. On ROCm (AMD GPU), FP8_E4M3 is instead '
  250. 'supported for common inference criteria. ')
  251. parser.add_argument(
  252. "--max-model-len",
  253. type=int,
  254. default=EngineArgs.max_model_len,
  255. help="model context length. If unspecified, "
  256. "will be automatically derived from the model.",
  257. )
  258. parser.add_argument(
  259. '--guided-decoding-backend',
  260. type=str,
  261. default='outlines',
  262. choices=['outlines', 'lm-format-enforcer'],
  263. help='Which engine will be used for guided decoding'
  264. ' (JSON schema / regex etc) by default. Currently support '
  265. 'https://github.com/outlines-dev/outlines and '
  266. 'https://github.com/noamgat/lm-format-enforcer.'
  267. ' Can be overridden per request via guided_decoding_backend'
  268. ' parameter.')
  269. # Parallel arguments
  270. parser.add_argument(
  271. '--distributed-executor-backend',
  272. choices=['ray', 'mp'],
  273. default=EngineArgs.distributed_executor_backend,
  274. help='Backend to use for distributed serving. When more than 1 GPU '
  275. 'is used, will be automatically set to "ray" if installed '
  276. 'or "mp" (multiprocessing) otherwise.')
  277. parser.add_argument(
  278. '--worker-use-ray',
  279. action='store_true',
  280. help='Deprecated, use --distributed-executor-backend=ray.')
  281. parser.add_argument(
  282. "--pipeline-parallel-size",
  283. "-pp",
  284. type=int,
  285. default=EngineArgs.pipeline_parallel_size,
  286. help="number of pipeline stages. Currently not supported.")
  287. parser.add_argument(
  288. "--tensor-parallel-size",
  289. "-tp",
  290. type=int,
  291. default=EngineArgs.tensor_parallel_size,
  292. help="number of tensor parallel replicas, i.e. the number of GPUs "
  293. "to use.")
  294. parser.add_argument(
  295. "--max-parallel-loading-workers",
  296. type=int,
  297. default=EngineArgs.max_parallel_loading_workers,
  298. help="load model sequentially in multiple batches, "
  299. "to avoid RAM OOM when using tensor "
  300. "parallel and large models",
  301. )
  302. parser.add_argument(
  303. "--ray-workers-use-nsight",
  304. action="store_true",
  305. help="If specified, use nsight to profile ray workers",
  306. )
  307. # KV cache arguments
  308. parser.add_argument(
  309. "--block-size",
  310. type=int,
  311. default=EngineArgs.block_size,
  312. choices=[8, 16, 32],
  313. help="token block size",
  314. )
  315. parser.add_argument(
  316. "--enable-prefix-caching",
  317. "--context-shift",
  318. action="store_true",
  319. help="Enable automatic prefix caching.",
  320. )
  321. parser.add_argument('--disable-sliding-window',
  322. action='store_true',
  323. help='Disables sliding window, '
  324. 'capping to sliding window size')
  325. parser.add_argument("--use-v2-block-manager",
  326. action="store_true",
  327. help="Use the v2 block manager.")
  328. parser.add_argument(
  329. "--num-lookahead-slots",
  330. type=int,
  331. default=EngineArgs.num_lookahead_slots,
  332. help="Experimental scheduling config necessary for "
  333. "speculative decoding. This will be replaced by "
  334. "speculative decoding config in the future; it is "
  335. "present for testing purposes until then.")
  336. parser.add_argument("--seed",
  337. type=int,
  338. default=EngineArgs.seed,
  339. help="random seed")
  340. parser.add_argument(
  341. "--swap-space",
  342. type=int,
  343. default=EngineArgs.swap_space,
  344. help="CPU swap space size (GiB) per GPU",
  345. )
  346. parser.add_argument(
  347. "--gpu-memory-utilization",
  348. "-gmu",
  349. type=float,
  350. default=EngineArgs.gpu_memory_utilization,
  351. help="the fraction of GPU memory to be used for "
  352. "the model executor, which can range from 0 to 1."
  353. "If unspecified, will use the default value of 0.9.",
  354. )
  355. parser.add_argument(
  356. "--num-gpu-blocks-override",
  357. type=int,
  358. default=None,
  359. help="If specified, ignore GPU profiling result and use this "
  360. "number of GPU blocks. Used for testing preemption.")
  361. parser.add_argument(
  362. "--max-num-batched-tokens",
  363. type=int,
  364. default=EngineArgs.max_num_batched_tokens,
  365. help="maximum number of batched tokens per "
  366. "iteration",
  367. )
  368. parser.add_argument(
  369. "--max-num-seqs",
  370. type=int,
  371. default=EngineArgs.max_num_seqs,
  372. help="maximum number of sequences per iteration",
  373. )
  374. parser.add_argument(
  375. "--max-logprobs",
  376. type=int,
  377. default=EngineArgs.max_logprobs,
  378. help="maximum number of log probabilities to "
  379. "return.",
  380. )
  381. parser.add_argument(
  382. "--disable-log-stats",
  383. action="store_true",
  384. help="disable logging statistics",
  385. )
  386. # Quantization settings.
  387. parser.add_argument(
  388. "--quantization",
  389. "-q",
  390. type=str,
  391. choices=[*QUANTIZATION_METHODS, None],
  392. default=EngineArgs.quantization,
  393. help="Method used to quantize the weights. If "
  394. "None, we first check the `quantization_config` "
  395. "attribute in the model config file. If that is "
  396. "None, we assume the model weights are not "
  397. "quantized and use `dtype` to determine the data "
  398. "type of the weights.",
  399. )
  400. parser.add_argument(
  401. "--load-in-4bit",
  402. action="store_true",
  403. help="Load the FP16 model in 4-bit format. Also "
  404. "works with AWQ models. Throughput at 2.5x of "
  405. "FP16.",
  406. )
  407. parser.add_argument(
  408. "--load-in-8bit",
  409. action="store_true",
  410. help="Load the FP16 model in 8-bit format. "
  411. "Throughput at 0.3x of FP16.",
  412. )
  413. parser.add_argument(
  414. "--load-in-smooth",
  415. action="store_true",
  416. help="Load the FP16 model in smoothquant "
  417. "8bit format. Throughput at 0.7x of FP16. ",
  418. )
  419. parser.add_argument(
  420. "--deepspeed-fp-bits",
  421. type=int,
  422. default=None,
  423. help="Number of floating bits to use for the deepseed "
  424. "quantization. Supported bits are: 4, 6, 8, 12. ")
  425. parser.add_argument('--rope-scaling',
  426. default=None,
  427. type=json.loads,
  428. help='RoPE scaling configuration in JSON format. '
  429. 'For example, {"type":"dynamic","factor":2.0}')
  430. parser.add_argument('--rope-theta',
  431. default=None,
  432. type=float,
  433. help='RoPE theta. Use with `rope_scaling`. In '
  434. 'some cases, changing the RoPE theta improves the '
  435. 'performance of the scaled model.')
  436. parser.add_argument(
  437. "--enforce-eager",
  438. type=lambda x: (str(x).lower() == 'true'),
  439. default=EngineArgs.enforce_eager,
  440. help="Always use eager-mode PyTorch. If False, "
  441. "will use eager mode and CUDA graph in hybrid "
  442. "for maximal performance and flexibility.",
  443. )
  444. parser.add_argument("--max-context-len-to-capture",
  445. type=int,
  446. default=EngineArgs.max_context_len_to_capture,
  447. help="Maximum context length covered by CUDA "
  448. "graphs. When a sequence has context length "
  449. "larger than this, we fall back to eager mode. "
  450. "(DEPRECATED. Use --max-seq_len-to-capture instead"
  451. ")")
  452. parser.add_argument("--max-seq_len-to-capture",
  453. type=int,
  454. default=EngineArgs.max_seq_len_to_capture,
  455. help="Maximum sequence length covered by CUDA "
  456. "graphs. When a sequence has context length "
  457. "larger than this, we fall back to eager mode.")
  458. parser.add_argument(
  459. "--disable-custom-all-reduce",
  460. action="store_true",
  461. default=EngineArgs.disable_custom_all_reduce,
  462. help="See ParallelConfig",
  463. )
  464. parser.add_argument("--tokenizer-pool-size",
  465. type=int,
  466. default=EngineArgs.tokenizer_pool_size,
  467. help="Size of tokenizer pool to use for "
  468. "asynchronous tokenization. If 0, will "
  469. "use synchronous tokenization.")
  470. parser.add_argument("--tokenizer-pool-type",
  471. type=str,
  472. default=EngineArgs.tokenizer_pool_type,
  473. help="The type of tokenizer pool to use for "
  474. "asynchronous tokenization. Ignored if "
  475. "tokenizer_pool_size is 0.")
  476. parser.add_argument("--tokenizer-pool-extra-config",
  477. type=str,
  478. default=EngineArgs.tokenizer_pool_extra_config,
  479. help="Extra config for tokenizer pool. "
  480. "This should be a JSON string that will be "
  481. "parsed into a dictionary. Ignored if "
  482. "tokenizer_pool_size is 0.")
  483. parser.add_argument(
  484. '--preemption-mode',
  485. type=str,
  486. default=None,
  487. help='If \'recompute\', the engine performs preemption by block '
  488. 'swapping; If \'swap\', the engine performs preemption by block '
  489. 'swapping.')
  490. # LoRA related configs
  491. parser.add_argument(
  492. "--enable-lora",
  493. action="store_true",
  494. help="If True, enable handling of LoRA adapters.",
  495. )
  496. parser.add_argument(
  497. "--max-loras",
  498. type=int,
  499. default=EngineArgs.max_loras,
  500. help="Max number of LoRAs in a single batch.",
  501. )
  502. parser.add_argument(
  503. "--max-lora-rank",
  504. type=int,
  505. default=EngineArgs.max_lora_rank,
  506. help="Max LoRA rank.",
  507. )
  508. parser.add_argument(
  509. "--lora-extra-vocab-size",
  510. type=int,
  511. default=EngineArgs.lora_extra_vocab_size,
  512. help=("Maximum size of extra vocabulary that can be "
  513. "present in a LoRA adapter (added to the base "
  514. "model vocabulary)."),
  515. )
  516. parser.add_argument(
  517. "--lora-dtype",
  518. type=str,
  519. default=EngineArgs.lora_dtype,
  520. choices=["auto", "float16", "bfloat16", "float32"],
  521. help=("Data type for LoRA. If auto, will default to "
  522. "base model dtype."),
  523. )
  524. parser.add_argument(
  525. "--long-lora-scaling-factors",
  526. type=str,
  527. default=EngineArgs.long_lora_scaling_factors,
  528. help=("Specify multiple scaling factors (which can "
  529. "be different from base model scaling factor "
  530. "- see eg. Long LoRA) to allow for multiple "
  531. "LoRA adapters trained with those scaling "
  532. "factors to be used at the same time. If not "
  533. "specified, only adapters trained with the "
  534. "base model scaling factor are allowed."))
  535. parser.add_argument(
  536. "--max-cpu-loras",
  537. type=int,
  538. default=EngineArgs.max_cpu_loras,
  539. help=("Maximum number of LoRAs to store in CPU memory. "
  540. "Must be >= than max_num_seqs. "
  541. "Defaults to max_num_seqs."),
  542. )
  543. parser.add_argument(
  544. "--fully-sharded-loras",
  545. action='store_true',
  546. help=("By default, only half of the LoRA computation is sharded "
  547. "with tensor parallelism. Enabling this will use the fully "
  548. "sharded layers. At high sequence length, max rank or "
  549. "tensor parallel size, this is likely faster."))
  550. parser.add_argument(
  551. "--device",
  552. type=str,
  553. default=EngineArgs.device,
  554. choices=[
  555. "auto", "cuda", "neuron", "cpu", "openvino", "tpu", "xpu"
  556. ],
  557. help=("Device to use for model execution."),
  558. )
  559. # Related to Vision-language models such as llava
  560. parser = EngineArgs.add_cli_args_for_vlm(parser)
  561. parser.add_argument(
  562. "--scheduler-delay-factor",
  563. "-sdf",
  564. type=float,
  565. default=EngineArgs.scheduler_delay_factor,
  566. help="Apply a delay (of delay factor multiplied by previous "
  567. "prompt latency) before scheduling next prompt.")
  568. parser.add_argument(
  569. "--enable-chunked-prefill",
  570. action="store_true",
  571. help="If True, the prefill requests can be chunked based on the "
  572. "max_num_batched_tokens.")
  573. parser.add_argument(
  574. "--speculative-model",
  575. type=str,
  576. default=EngineArgs.speculative_model,
  577. help=
  578. "The name of the draft model to be used in speculative decoding.")
  579. parser.add_argument(
  580. "--speculative-draft-tensor-parallel-size",
  581. "-spec-draft-tp",
  582. type=int,
  583. default=EngineArgs.speculative_draft_tensor_parallel_size,
  584. help="Number of tensor parallel replicas for "
  585. "the draft model in speculative decoding.")
  586. parser.add_argument(
  587. "--num-speculative-tokens",
  588. type=int,
  589. default=EngineArgs.num_speculative_tokens,
  590. help="The number of speculative tokens to sample from "
  591. "the draft model in speculative decoding")
  592. parser.add_argument(
  593. "--speculative-max-model-len",
  594. type=str,
  595. default=EngineArgs.speculative_max_model_len,
  596. help="The maximum sequence length supported by the "
  597. "draft model. Sequences over this length will skip "
  598. "speculation.")
  599. parser.add_argument(
  600. "--speculative-disable-by-batch-size",
  601. type=int,
  602. default=EngineArgs.speculative_disable_by_batch_size,
  603. help="Disable speculative decoding for new incoming requests "
  604. "if the number of enqueue requests is larger than this value.")
  605. parser.add_argument(
  606. "--ngram-prompt-lookup-max",
  607. type=int,
  608. default=EngineArgs.ngram_prompt_lookup_max,
  609. help="Max size of window for ngram prompt lookup in speculative "
  610. "decoding.")
  611. parser.add_argument(
  612. "--ngram-prompt-lookup-min",
  613. type=int,
  614. default=EngineArgs.ngram_prompt_lookup_min,
  615. help="Min size of window for ngram prompt lookup in speculative "
  616. "decoding.")
  617. parser.add_argument(
  618. '--spec-decoding-acceptance-method',
  619. type=str,
  620. default=EngineArgs.spec_decoding_acceptance_method,
  621. choices=['rejection_sampler', 'typical_acceptance_sampler'],
  622. help='Specify the acceptance method to use during draft token '
  623. 'verification in speculative decoding. Two types of acceptance '
  624. 'routines are supported: '
  625. '1) RejectionSampler which does not allow changing the '
  626. 'acceptance rate of draft tokens, '
  627. '2) TypicalAcceptanceSampler which is configurable, allowing for '
  628. 'a higher acceptance rate at the cost of lower quality, '
  629. 'and vice versa.')
  630. parser.add_argument(
  631. '--typical-acceptance-sampler-posterior-threshold',
  632. type=float,
  633. default=EngineArgs.typical_acceptance_sampler_posterior_threshold,
  634. help='Set the lower bound threshold for the posterior '
  635. 'probability of a token to be accepted. This threshold is '
  636. 'used by the TypicalAcceptanceSampler to make sampling decisions '
  637. 'during speculative decoding. Defaults to 0.09')
  638. parser.add_argument(
  639. '--typical-acceptance-sampler-posterior-alpha',
  640. type=float,
  641. default=EngineArgs.typical_acceptance_sampler_posterior_alpha,
  642. help='A scaling factor for the entropy-based threshold for token '
  643. 'acceptance in the TypicalAcceptanceSampler. Typically defaults '
  644. 'to sqrt of --typical-acceptance-sampler-posterior-threshold '
  645. 'i.e. 0.3')
  646. parser.add_argument("--model-loader-extra-config",
  647. type=str,
  648. default=EngineArgs.model_loader_extra_config,
  649. help="Extra config for model loader. "
  650. "This will be passed to the model loader "
  651. "corresponding to the chosen load_format. "
  652. "This should be a JSON string that will be "
  653. "parsed into a dictionary.")
  654. parser.add_argument(
  655. "--served-model-name",
  656. nargs="+",
  657. type=str,
  658. default=None,
  659. help="The model name(s) used in the API. If multiple "
  660. "names are provided, the server will respond to any "
  661. "of the provided names. The model name in the model "
  662. "field of a response will be the first name in this "
  663. "list. If not specified, the model name will be the "
  664. "same as the `--model` argument. Noted that this name(s)"
  665. "will also be used in `model_name` tag content of "
  666. "prometheus metrics, if multiple names provided, metrics"
  667. "tag will take the first one.")
  668. parser.add_argument("--qlora-adapter-name-or-path",
  669. type=str,
  670. default=None,
  671. help="Name or path of the LoRA adapter to use.")
  672. return parser
  673. @classmethod
  674. def from_cli_args(cls, args: argparse.Namespace) -> "EngineArgs":
  675. # Get the list of attributes of this dataclass.
  676. attrs = [attr.name for attr in dataclasses.fields(cls)]
  677. # Set the attributes from the parsed arguments.
  678. engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})
  679. return engine_args
  680. def create_engine_config(self, ) -> EngineConfig:
  681. # bitsandbytes quantization needs a specific model loader
  682. # so we make sure the quant method and the load format are consistent
  683. if (self.quantization == "bitsandbytes" or
  684. self.qlora_adapter_name_or_path is not None) and \
  685. self.load_format != "bitsandbytes":
  686. raise ValueError(
  687. "BitsAndBytes quantization and QLoRA adapter only support "
  688. f"'bitsandbytes' load format, but got {self.load_format}")
  689. if (self.load_format == "bitsandbytes" or
  690. self.qlora_adapter_name_or_path is not None) and \
  691. self.quantization != "bitsandbytes":
  692. raise ValueError(
  693. "BitsAndBytes load format and QLoRA adapter only support "
  694. f"'bitsandbytes' quantization, but got {self.quantization}")
  695. if self.image_token_id is not None:
  696. if (not self.image_input_shape or not self.image_feature_size):
  697. raise ValueError(
  698. 'Specify `image_input_shape` and '
  699. '`image_feature_size` together with `image_token_id`.')
  700. vision_language_config = VisionLanguageConfig(
  701. image_token_id=self.image_token_id,
  702. image_input_shape=str_to_int_tuple(self.image_input_shape),
  703. image_feature_size=self.image_feature_size,
  704. )
  705. else:
  706. vision_language_config = None
  707. device_config = DeviceConfig(device=self.device)
  708. model_config = ModelConfig(
  709. model=self.model,
  710. tokenizer=self.tokenizer,
  711. tokenizer_mode=self.tokenizer_mode,
  712. trust_remote_code=self.trust_remote_code,
  713. dtype=self.dtype,
  714. seed=self.seed,
  715. revision=self.revision,
  716. code_revision=self.code_revision,
  717. rope_scaling=self.rope_scaling,
  718. rope_theta=self.rope_theta,
  719. tokenizer_revision=self.tokenizer_revision,
  720. max_model_len=self.max_model_len,
  721. quantization=self.quantization,
  722. load_in_4bit=self.load_in_4bit,
  723. load_in_8bit=self.load_in_8bit,
  724. load_in_smooth=self.load_in_smooth,
  725. deepspeed_fp_bits=self.deepspeed_fp_bits,
  726. quantization_param_path=self.quantization_param_path,
  727. enforce_eager=self.enforce_eager,
  728. max_context_len_to_capture=self.max_context_len_to_capture,
  729. max_seq_len_to_capture=self.max_seq_len_to_capture,
  730. max_logprobs=self.max_logprobs,
  731. disable_sliding_window=self.disable_sliding_window,
  732. skip_tokenizer_init=self.skip_tokenizer_init,
  733. served_model_name=self.served_model_name,
  734. multimodal_config=vision_language_config,
  735. )
  736. cache_config = CacheConfig(
  737. block_size=self.block_size,
  738. gpu_memory_utilization=self.gpu_memory_utilization,
  739. swap_space=self.swap_space,
  740. cache_dtype=self.kv_cache_dtype,
  741. num_gpu_blocks_override=self.num_gpu_blocks_override,
  742. sliding_window=model_config.get_sliding_window(),
  743. enable_prefix_caching=self.enable_prefix_caching,
  744. )
  745. parallel_config = ParallelConfig(
  746. pipeline_parallel_size=self.pipeline_parallel_size,
  747. tensor_parallel_size=self.tensor_parallel_size,
  748. worker_use_ray=self.worker_use_ray,
  749. max_parallel_loading_workers=self.max_parallel_loading_workers,
  750. disable_custom_all_reduce=self.disable_custom_all_reduce,
  751. tokenizer_pool_config=TokenizerPoolConfig.create_config(
  752. tokenizer_pool_size=self.tokenizer_pool_size,
  753. tokenizer_pool_type=self.tokenizer_pool_type,
  754. tokenizer_pool_extra_config=self.tokenizer_pool_extra_config,
  755. ),
  756. ray_workers_use_nsight=self.ray_workers_use_nsight,
  757. distributed_executor_backend=self.distributed_executor_backend)
  758. speculative_config = SpeculativeConfig.maybe_create_spec_config(
  759. target_model_config=model_config,
  760. target_parallel_config=parallel_config,
  761. target_dtype=self.dtype,
  762. speculative_model=self.speculative_model,
  763. speculative_draft_tensor_parallel_size=self.
  764. speculative_draft_tensor_parallel_size,
  765. num_speculative_tokens=self.num_speculative_tokens,
  766. speculative_disable_by_batch_size=self.
  767. speculative_disable_by_batch_size,
  768. speculative_max_model_len=self.speculative_max_model_len,
  769. enable_chunked_prefill=self.enable_chunked_prefill,
  770. use_v2_block_manager=self.use_v2_block_manager,
  771. ngram_prompt_lookup_max=self.ngram_prompt_lookup_max,
  772. ngram_prompt_lookup_min=self.ngram_prompt_lookup_min,
  773. draft_token_acceptance_method=\
  774. self.spec_decoding_acceptance_method,
  775. typical_acceptance_sampler_posterior_threshold=self.
  776. typical_acceptance_sampler_posterior_threshold,
  777. typical_acceptance_sampler_posterior_alpha=self.
  778. typical_acceptance_sampler_posterior_alpha,
  779. )
  780. scheduler_config = SchedulerConfig(
  781. max_num_batched_tokens=self.max_num_batched_tokens,
  782. max_num_seqs=self.max_num_seqs,
  783. max_model_len=model_config.max_model_len,
  784. use_v2_block_manager=self.use_v2_block_manager,
  785. num_lookahead_slots=(self.num_lookahead_slots
  786. if speculative_config is None else
  787. speculative_config.num_lookahead_slots),
  788. delay_factor=self.scheduler_delay_factor,
  789. enable_chunked_prefill=self.enable_chunked_prefill,
  790. embedding_mode=model_config.embedding_mode,
  791. preemption_mode=self.preemption_mode,
  792. )
  793. lora_config = LoRAConfig(
  794. max_lora_rank=self.max_lora_rank,
  795. max_loras=self.max_loras,
  796. fully_sharded_loras=self.fully_sharded_loras,
  797. lora_extra_vocab_size=self.lora_extra_vocab_size,
  798. long_lora_scaling_factors=self.long_lora_scaling_factors,
  799. lora_dtype=self.lora_dtype,
  800. max_cpu_loras=self.max_cpu_loras if self.max_cpu_loras
  801. and self.max_cpu_loras > 0 else None) if self.enable_lora else None
  802. if self.qlora_adapter_name_or_path is not None and \
  803. self.qlora_adapter_name_or_path != "":
  804. if self.model_loader_extra_config is None:
  805. self.model_loader_extra_config = {}
  806. self.model_loader_extra_config[
  807. "qlora_adapter_name_or_path"] = self.qlora_adapter_name_or_path
  808. load_config = LoadConfig(
  809. load_format=self.load_format,
  810. download_dir=self.download_dir,
  811. model_loader_extra_config=self.model_loader_extra_config,
  812. )
  813. decoding_config = DecodingConfig(
  814. guided_decoding_backend=self.guided_decoding_backend)
  815. if (model_config.get_sliding_window() is not None
  816. and scheduler_config.chunked_prefill_enabled
  817. and not scheduler_config.use_v2_block_manager):
  818. raise ValueError(
  819. "Chunked prefill is not supported with sliding window. "
  820. "Set --disable-sliding-window to disable sliding window.")
  821. return EngineConfig(model_config=model_config,
  822. cache_config=cache_config,
  823. parallel_config=parallel_config,
  824. scheduler_config=scheduler_config,
  825. device_config=device_config,
  826. lora_config=lora_config,
  827. vision_language_config=vision_language_config,
  828. speculative_config=speculative_config,
  829. load_config=load_config,
  830. decoding_config=decoding_config)
  831. @dataclass
  832. class AsyncEngineArgs(EngineArgs):
  833. """Arguments for asynchronous Aphrodite engine."""
  834. engine_use_ray: bool = False
  835. disable_log_requests: bool = False
  836. max_log_len: int = 0
  837. uvloop: bool = False
  838. @staticmethod
  839. def add_cli_args(
  840. parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
  841. parser = EngineArgs.add_cli_args(parser)
  842. parser.add_argument(
  843. "--engine-use-ray",
  844. action="store_true",
  845. help="use Ray to start the LLM engine in a "
  846. "separate process as the server process.",
  847. )
  848. parser.add_argument(
  849. "--disable-log-requests",
  850. action="store_true",
  851. help="disable logging requests",
  852. )
  853. parser.add_argument(
  854. "--max-log-len",
  855. type=int,
  856. default=0,
  857. help="max number of prompt characters or prompt "
  858. "ID numbers being printed in log. "
  859. "Default: unlimited.",
  860. )
  861. parser.add_argument(
  862. "--uvloop",
  863. action="store_true",
  864. help="Use the Uvloop asyncio event loop to possibly increase "
  865. "performance")
  866. return parser