args_tools.py 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884
  1. import argparse
  2. import dataclasses
  3. import json
  4. from dataclasses import dataclass
  5. from typing import List, Optional, Tuple, Union
  6. from aphrodite.common.config import (CacheConfig, DecodingConfig, DeviceConfig,
  7. EngineConfig, LoadConfig, LoRAConfig,
  8. ModelConfig, MultiModalConfig,
  9. ParallelConfig, PromptAdapterConfig,
  10. SchedulerConfig, SpeculativeConfig,
  11. TokenizerPoolConfig)
  12. from aphrodite.common.utils import is_cpu
  13. from aphrodite.quantization import QUANTIZATION_METHODS
  14. @dataclass
  15. class EngineArgs:
  16. """Arguments for Aphrodite engine."""
  17. model: str
  18. served_model_name: Optional[Union[List[str]]] = None
  19. tokenizer: Optional[str] = None
  20. skip_tokenizer_init: bool = False
  21. tokenizer_mode: str = "auto"
  22. trust_remote_code: bool = False
  23. download_dir: Optional[str] = None
  24. load_format: str = "auto"
  25. dtype: str = "auto"
  26. kv_cache_dtype: str = "auto"
  27. quantization_param_path: Optional[str] = None
  28. seed: int = 0
  29. max_model_len: Optional[int] = None
  30. worker_use_ray: Optional[bool] = False
  31. distributed_executor_backend: Optional[str] = None
  32. pipeline_parallel_size: int = 1
  33. tensor_parallel_size: int = 1
  34. max_parallel_loading_workers: Optional[int] = None
  35. block_size: int = 16
  36. enable_prefix_caching: bool = False
  37. disable_sliding_window: bool = False
  38. use_v2_block_manager: bool = False
  39. swap_space: int = 4 # GiB
  40. gpu_memory_utilization: float = 0.90
  41. max_num_batched_tokens: Optional[int] = None
  42. max_num_seqs: int = 256
  43. max_logprobs: int = 10 # OpenAI default is 5, setting to 10 because ST
  44. disable_log_stats: bool = False
  45. revision: Optional[str] = None
  46. code_revision: Optional[str] = None
  47. rope_scaling: Optional[dict] = None
  48. rope_theta: Optional[float] = None
  49. tokenizer_revision: Optional[str] = None
  50. quantization: Optional[str] = None
  51. load_in_4bit: bool = False
  52. load_in_8bit: bool = False
  53. load_in_smooth: bool = False
  54. deepspeed_fp_bits: Optional[int] = None
  55. enforce_eager: bool = True
  56. max_context_len_to_capture: Optional[int] = None
  57. max_seq_len_to_capture: int = 8192
  58. disable_custom_all_reduce: bool = False
  59. tokenizer_pool_size: int = 0
  60. tokenizer_pool_type: str = "ray"
  61. tokenizer_pool_extra_config: Optional[dict] = None
  62. enable_lora: bool = False
  63. max_loras: int = 1
  64. max_lora_rank: int = 16
  65. enable_prompt_adapter: bool = False
  66. max_prompt_adapters: int = 1
  67. max_prompt_adapter_token: int = 0
  68. fully_sharded_loras: bool = False
  69. lora_extra_vocab_size: int = 256
  70. long_lora_scaling_factors: Optional[Tuple[float]] = None
  71. lora_dtype: str = "auto"
  72. max_cpu_loras: Optional[int] = None
  73. device: str = "auto"
  74. ray_workers_use_nsight: bool = False
  75. num_gpu_blocks_override: Optional[int] = None
  76. num_lookahead_slots: int = 0
  77. model_loader_extra_config: Optional[dict] = None
  78. preemption_mode: Optional[str] = None
  79. # Scheduler config
  80. scheduler_delay_factor: float = 0.0
  81. enable_chunked_prefill: bool = False
  82. guided_decoding_backend: str = 'outlines'
  83. # Speculative decoding config
  84. speculative_model: Optional[str] = None
  85. speculative_draft_tensor_parallel_size: Optional[int] = None
  86. num_speculative_tokens: Optional[int] = None
  87. speculative_max_model_len: Optional[int] = None
  88. speculative_disable_by_batch_size: Optional[int] = None
  89. ngram_prompt_lookup_max: Optional[int] = None
  90. ngram_prompt_lookup_min: Optional[int] = None
  91. spec_decoding_acceptance_method: str = 'rejection_sampler'
  92. typical_acceptance_sampler_posterior_threshold: Optional[float] = None
  93. typical_acceptance_sampler_posterior_alpha: Optional[float] = None
  94. qlora_adapter_name_or_path: Optional[str] = None
  95. def __post_init__(self):
  96. if self.tokenizer is None:
  97. self.tokenizer = self.model
  98. if is_cpu():
  99. self.distributed_executor_backend = None
  100. @staticmethod
  101. def add_cli_args(
  102. parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
  103. """Shared CLI arguments for the Aphrodite engine."""
  104. # NOTE: If you update any of the arguments below, please also
  105. # make sure to update docs/source/models/engine_args.rst
  106. # Model arguments
  107. parser.add_argument(
  108. "--model",
  109. type=str,
  110. default="EleutherAI/pythia-70m-deduped",
  111. help="name or path of the huggingface model to use",
  112. )
  113. parser.add_argument(
  114. "--tokenizer",
  115. type=str,
  116. default=EngineArgs.tokenizer,
  117. help="name or path of the huggingface tokenizer to use",
  118. )
  119. parser.add_argument(
  120. "--skip-tokenizer-init",
  121. action="store_true",
  122. help="Skip initialization of tokenizer and detokenizer")
  123. parser.add_argument(
  124. "--revision",
  125. type=str,
  126. default=None,
  127. help="the specific model version to use. It can be a branch "
  128. "name, a tag name, or a commit id. If unspecified, will use "
  129. "the default version.",
  130. )
  131. parser.add_argument(
  132. "--code-revision",
  133. type=str,
  134. default=None,
  135. help="the specific revision to use for the model code on "
  136. "Hugging Face Hub. It can be a branch name, a tag name, or a "
  137. "commit id. If unspecified, will use the default version.",
  138. )
  139. parser.add_argument(
  140. "--tokenizer-revision",
  141. type=str,
  142. default=None,
  143. help="the specific tokenizer version to use. It can be a branch "
  144. "name, a tag name, or a commit id. If unspecified, will use "
  145. "the default version.",
  146. )
  147. parser.add_argument(
  148. "--tokenizer-mode",
  149. type=str,
  150. default=EngineArgs.tokenizer_mode,
  151. choices=["auto", "slow"],
  152. help='tokenizer mode. "auto" will use the fast '
  153. 'tokenizer if available, and "slow" will '
  154. "always use the slow tokenizer.",
  155. )
  156. parser.add_argument(
  157. "--trust-remote-code",
  158. action="store_true",
  159. help="trust remote code from huggingface",
  160. )
  161. parser.add_argument(
  162. "--download-dir",
  163. type=str,
  164. default=EngineArgs.download_dir,
  165. help="directory to download and load the weights, "
  166. "default to the default cache dir of "
  167. "huggingface",
  168. )
  169. parser.add_argument(
  170. '--load-format',
  171. type=str,
  172. default=EngineArgs.load_format,
  173. choices=[
  174. 'auto',
  175. 'pt',
  176. 'safetensors',
  177. 'npcache',
  178. 'dummy',
  179. 'tensorizer',
  180. 'sharded_state',
  181. 'bitsandbytes',
  182. ],
  183. help='The format of the model weights to load.\n\n'
  184. '* "auto" will try to load the weights in the safetensors format '
  185. 'and fall back to the pytorch bin format if safetensors format '
  186. 'is not available.\n'
  187. '* "pt" will load the weights in the pytorch bin format.\n'
  188. '* "safetensors" will load the weights in the safetensors format.\n'
  189. '* "npcache" will load the weights in pytorch format and store '
  190. 'a numpy cache to speed up the loading.\n'
  191. '* "dummy" will initialize the weights with random values, '
  192. 'which is mainly for profiling.\n'
  193. '* "tensorizer" will load the weights using tensorizer from '
  194. 'CoreWeave. See the Tensorize Aphrodite Model script in the '
  195. 'Examples section for more information.\n'
  196. '* "bitsandbytes" will load the weights using bitsandbytes '
  197. 'quantization.\n')
  198. parser.add_argument(
  199. '--dtype',
  200. type=str,
  201. default=EngineArgs.dtype,
  202. choices=[
  203. 'auto', 'half', 'float16', 'bfloat16', 'float', 'float32'
  204. ],
  205. help='Data type for model weights and activations.\n\n'
  206. '* "auto" will use FP16 precision for FP32 and FP16 models, and '
  207. 'BF16 precision for BF16 models.\n'
  208. '* "half" for FP16. Recommended for AWQ quantization.\n'
  209. '* "float16" is the same as "half".\n'
  210. '* "bfloat16" for a balance between precision and range.\n'
  211. '* "float" is shorthand for FP32 precision.\n'
  212. '* "float32" for FP32 precision.')
  213. parser.add_argument(
  214. '--kv-cache-dtype',
  215. type=str,
  216. choices=['auto', 'fp8', 'fp8_e5m2', 'fp8_e4m3'],
  217. default=EngineArgs.kv_cache_dtype,
  218. help='Data type for kv cache storage. If "auto", will use model '
  219. 'data type. CUDA 11.8+ supports fp8 (=fp8_e4m3) and fp8_e5m2. '
  220. 'ROCm (AMD GPU) supports fp8 (=fp8_e4m3)')
  221. parser.add_argument(
  222. '--quantization-param-path',
  223. type=str,
  224. default=None,
  225. help='Path to the JSON file containing the KV cache '
  226. 'scaling factors. This should generally be supplied, when '
  227. 'KV cache dtype is FP8. Otherwise, KV cache scaling factors '
  228. 'default to 1.0, which may cause accuracy issues. '
  229. 'FP8_E5M2 (without scaling) is only supported on cuda version'
  230. 'greater than 11.8. On ROCm (AMD GPU), FP8_E4M3 is instead '
  231. 'supported for common inference criteria. ')
  232. parser.add_argument(
  233. "--max-model-len",
  234. type=int,
  235. default=EngineArgs.max_model_len,
  236. help="model context length. If unspecified, "
  237. "will be automatically derived from the model.",
  238. )
  239. parser.add_argument(
  240. '--guided-decoding-backend',
  241. type=str,
  242. default='outlines',
  243. choices=['outlines', 'lm-format-enforcer'],
  244. help='Which engine will be used for guided decoding'
  245. ' (JSON schema / regex etc) by default. Currently support '
  246. 'https://github.com/outlines-dev/outlines and '
  247. 'https://github.com/noamgat/lm-format-enforcer.'
  248. ' Can be overridden per request via guided_decoding_backend'
  249. ' parameter.')
  250. # Parallel arguments
  251. parser.add_argument(
  252. '--distributed-executor-backend',
  253. choices=['ray', 'mp'],
  254. default=EngineArgs.distributed_executor_backend,
  255. help='Backend to use for distributed serving. When more than 1 GPU '
  256. 'is used, will be automatically set to "ray" if installed '
  257. 'or "mp" (multiprocessing) otherwise.')
  258. parser.add_argument(
  259. '--worker-use-ray',
  260. action='store_true',
  261. help='Deprecated, use --distributed-executor-backend=ray.')
  262. parser.add_argument(
  263. "--pipeline-parallel-size",
  264. "-pp",
  265. type=int,
  266. default=EngineArgs.pipeline_parallel_size,
  267. help="number of pipeline stages. Currently not supported.")
  268. parser.add_argument(
  269. "--tensor-parallel-size",
  270. "-tp",
  271. type=int,
  272. default=EngineArgs.tensor_parallel_size,
  273. help="number of tensor parallel replicas, i.e. the number of GPUs "
  274. "to use.")
  275. parser.add_argument(
  276. "--max-parallel-loading-workers",
  277. type=int,
  278. default=EngineArgs.max_parallel_loading_workers,
  279. help="load model sequentially in multiple batches, "
  280. "to avoid RAM OOM when using tensor "
  281. "parallel and large models",
  282. )
  283. parser.add_argument(
  284. "--ray-workers-use-nsight",
  285. action="store_true",
  286. help="If specified, use nsight to profile ray workers",
  287. )
  288. # KV cache arguments
  289. parser.add_argument(
  290. "--block-size",
  291. type=int,
  292. default=EngineArgs.block_size,
  293. choices=[8, 16, 32],
  294. help="token block size",
  295. )
  296. parser.add_argument(
  297. "--enable-prefix-caching",
  298. "--context-shift",
  299. action="store_true",
  300. help="Enable automatic prefix caching.",
  301. )
  302. parser.add_argument('--disable-sliding-window',
  303. action='store_true',
  304. help='Disables sliding window, '
  305. 'capping to sliding window size')
  306. parser.add_argument("--use-v2-block-manager",
  307. action="store_true",
  308. help="Use the v2 block manager.")
  309. parser.add_argument(
  310. "--num-lookahead-slots",
  311. type=int,
  312. default=EngineArgs.num_lookahead_slots,
  313. help="Experimental scheduling config necessary for "
  314. "speculative decoding. This will be replaced by "
  315. "speculative decoding config in the future; it is "
  316. "present for testing purposes until then.")
  317. parser.add_argument("--seed",
  318. type=int,
  319. default=EngineArgs.seed,
  320. help="random seed")
  321. parser.add_argument(
  322. "--swap-space",
  323. type=int,
  324. default=EngineArgs.swap_space,
  325. help="CPU swap space size (GiB) per GPU",
  326. )
  327. parser.add_argument(
  328. "--gpu-memory-utilization",
  329. "-gmu",
  330. type=float,
  331. default=EngineArgs.gpu_memory_utilization,
  332. help="the fraction of GPU memory to be used for "
  333. "the model executor, which can range from 0 to 1."
  334. "If unspecified, will use the default value of 0.9.",
  335. )
  336. parser.add_argument(
  337. "--num-gpu-blocks-override",
  338. type=int,
  339. default=None,
  340. help="If specified, ignore GPU profiling result and use this "
  341. "number of GPU blocks. Used for testing preemption.")
  342. parser.add_argument(
  343. "--max-num-batched-tokens",
  344. type=int,
  345. default=EngineArgs.max_num_batched_tokens,
  346. help="maximum number of batched tokens per "
  347. "iteration",
  348. )
  349. parser.add_argument(
  350. "--max-num-seqs",
  351. type=int,
  352. default=EngineArgs.max_num_seqs,
  353. help="maximum number of sequences per iteration",
  354. )
  355. parser.add_argument(
  356. "--max-logprobs",
  357. type=int,
  358. default=EngineArgs.max_logprobs,
  359. help="maximum number of log probabilities to "
  360. "return.",
  361. )
  362. parser.add_argument(
  363. "--disable-log-stats",
  364. action="store_true",
  365. help="disable logging statistics",
  366. )
  367. # Quantization settings.
  368. parser.add_argument(
  369. "--quantization",
  370. "-q",
  371. type=str,
  372. choices=[*QUANTIZATION_METHODS, None],
  373. default=EngineArgs.quantization,
  374. help="Method used to quantize the weights. If "
  375. "None, we first check the `quantization_config` "
  376. "attribute in the model config file. If that is "
  377. "None, we assume the model weights are not "
  378. "quantized and use `dtype` to determine the data "
  379. "type of the weights.",
  380. )
  381. parser.add_argument(
  382. "--load-in-4bit",
  383. action="store_true",
  384. help="Load the FP16 model in 4-bit format. Also "
  385. "works with AWQ models. Throughput at 2.5x of "
  386. "FP16.",
  387. )
  388. parser.add_argument(
  389. "--load-in-8bit",
  390. action="store_true",
  391. help="Load the FP16 model in 8-bit format. "
  392. "Throughput at 0.3x of FP16.",
  393. )
  394. parser.add_argument(
  395. "--load-in-smooth",
  396. action="store_true",
  397. help="Load the FP16 model in smoothquant "
  398. "8bit format. Throughput at 0.7x of FP16. ",
  399. )
  400. parser.add_argument(
  401. "--deepspeed-fp-bits",
  402. type=int,
  403. default=None,
  404. help="Number of floating bits to use for the deepseed "
  405. "quantization. Supported bits are: 4, 6, 8, 12. ")
  406. parser.add_argument('--rope-scaling',
  407. default=None,
  408. type=json.loads,
  409. help='RoPE scaling configuration in JSON format. '
  410. 'For example, {"type":"dynamic","factor":2.0}')
  411. parser.add_argument('--rope-theta',
  412. default=None,
  413. type=float,
  414. help='RoPE theta. Use with `rope_scaling`. In '
  415. 'some cases, changing the RoPE theta improves the '
  416. 'performance of the scaled model.')
  417. parser.add_argument(
  418. "--enforce-eager",
  419. type=lambda x: (str(x).lower() == 'true'),
  420. default=EngineArgs.enforce_eager,
  421. help="Always use eager-mode PyTorch. If False, "
  422. "will use eager mode and CUDA graph in hybrid "
  423. "for maximal performance and flexibility.",
  424. )
  425. parser.add_argument("--max-context-len-to-capture",
  426. type=int,
  427. default=EngineArgs.max_context_len_to_capture,
  428. help="Maximum context length covered by CUDA "
  429. "graphs. When a sequence has context length "
  430. "larger than this, we fall back to eager mode. "
  431. "(DEPRECATED. Use --max-seq_len-to-capture instead"
  432. ")")
  433. parser.add_argument("--max-seq_len-to-capture",
  434. type=int,
  435. default=EngineArgs.max_seq_len_to_capture,
  436. help="Maximum sequence length covered by CUDA "
  437. "graphs. When a sequence has context length "
  438. "larger than this, we fall back to eager mode.")
  439. parser.add_argument(
  440. "--disable-custom-all-reduce",
  441. action="store_true",
  442. default=EngineArgs.disable_custom_all_reduce,
  443. help="See ParallelConfig",
  444. )
  445. parser.add_argument("--tokenizer-pool-size",
  446. type=int,
  447. default=EngineArgs.tokenizer_pool_size,
  448. help="Size of tokenizer pool to use for "
  449. "asynchronous tokenization. If 0, will "
  450. "use synchronous tokenization.")
  451. parser.add_argument("--tokenizer-pool-type",
  452. type=str,
  453. default=EngineArgs.tokenizer_pool_type,
  454. help="The type of tokenizer pool to use for "
  455. "asynchronous tokenization. Ignored if "
  456. "tokenizer_pool_size is 0.")
  457. parser.add_argument("--tokenizer-pool-extra-config",
  458. type=str,
  459. default=EngineArgs.tokenizer_pool_extra_config,
  460. help="Extra config for tokenizer pool. "
  461. "This should be a JSON string that will be "
  462. "parsed into a dictionary. Ignored if "
  463. "tokenizer_pool_size is 0.")
  464. parser.add_argument(
  465. '--preemption-mode',
  466. type=str,
  467. default=None,
  468. help='If \'recompute\', the engine performs preemption by block '
  469. 'swapping; If \'swap\', the engine performs preemption by block '
  470. 'swapping.')
  471. # LoRA related configs
  472. parser.add_argument(
  473. "--enable-lora",
  474. action="store_true",
  475. help="If True, enable handling of LoRA adapters.",
  476. )
  477. parser.add_argument(
  478. "--max-loras",
  479. type=int,
  480. default=EngineArgs.max_loras,
  481. help="Max number of LoRAs in a single batch.",
  482. )
  483. parser.add_argument(
  484. "--max-lora-rank",
  485. type=int,
  486. default=EngineArgs.max_lora_rank,
  487. help="Max LoRA rank.",
  488. )
  489. parser.add_argument(
  490. "--lora-extra-vocab-size",
  491. type=int,
  492. default=EngineArgs.lora_extra_vocab_size,
  493. help=("Maximum size of extra vocabulary that can be "
  494. "present in a LoRA adapter (added to the base "
  495. "model vocabulary)."),
  496. )
  497. parser.add_argument(
  498. "--lora-dtype",
  499. type=str,
  500. default=EngineArgs.lora_dtype,
  501. choices=["auto", "float16", "bfloat16", "float32"],
  502. help=("Data type for LoRA. If auto, will default to "
  503. "base model dtype."),
  504. )
  505. parser.add_argument(
  506. "--long-lora-scaling-factors",
  507. type=str,
  508. default=EngineArgs.long_lora_scaling_factors,
  509. help=("Specify multiple scaling factors (which can "
  510. "be different from base model scaling factor "
  511. "- see eg. Long LoRA) to allow for multiple "
  512. "LoRA adapters trained with those scaling "
  513. "factors to be used at the same time. If not "
  514. "specified, only adapters trained with the "
  515. "base model scaling factor are allowed."))
  516. parser.add_argument(
  517. "--max-cpu-loras",
  518. type=int,
  519. default=EngineArgs.max_cpu_loras,
  520. help=("Maximum number of LoRAs to store in CPU memory. "
  521. "Must be >= than max_num_seqs. "
  522. "Defaults to max_num_seqs."),
  523. )
  524. parser.add_argument(
  525. "--fully-sharded-loras",
  526. action='store_true',
  527. help=("By default, only half of the LoRA computation is sharded "
  528. "with tensor parallelism. Enabling this will use the fully "
  529. "sharded layers. At high sequence length, max rank or "
  530. "tensor parallel size, this is likely faster."))
  531. parser.add_argument('--enable-prompt-adapter',
  532. action='store_true',
  533. help='If True, enable handling of PromptAdapters.')
  534. parser.add_argument('--max-prompt-adapters',
  535. type=int,
  536. default=EngineArgs.max_prompt_adapters,
  537. help='Max number of PromptAdapters in a batch.')
  538. parser.add_argument('--max-prompt-adapter-token',
  539. type=int,
  540. default=EngineArgs.max_prompt_adapter_token,
  541. help='Max number of PromptAdapters tokens')
  542. parser.add_argument(
  543. "--device",
  544. type=str,
  545. default=EngineArgs.device,
  546. choices=[
  547. "auto", "cuda", "neuron", "cpu", "openvino", "tpu", "xpu"
  548. ],
  549. help=("Device to use for model execution."),
  550. )
  551. parser.add_argument(
  552. "--scheduler-delay-factor",
  553. "-sdf",
  554. type=float,
  555. default=EngineArgs.scheduler_delay_factor,
  556. help="Apply a delay (of delay factor multiplied by previous "
  557. "prompt latency) before scheduling next prompt.")
  558. parser.add_argument(
  559. "--enable-chunked-prefill",
  560. action="store_true",
  561. help="If True, the prefill requests can be chunked based on the "
  562. "max_num_batched_tokens.")
  563. parser.add_argument(
  564. "--speculative-model",
  565. type=str,
  566. default=EngineArgs.speculative_model,
  567. help=
  568. "The name of the draft model to be used in speculative decoding.")
  569. parser.add_argument(
  570. "--speculative-draft-tensor-parallel-size",
  571. "-spec-draft-tp",
  572. type=int,
  573. default=EngineArgs.speculative_draft_tensor_parallel_size,
  574. help="Number of tensor parallel replicas for "
  575. "the draft model in speculative decoding.")
  576. parser.add_argument(
  577. "--num-speculative-tokens",
  578. type=int,
  579. default=EngineArgs.num_speculative_tokens,
  580. help="The number of speculative tokens to sample from "
  581. "the draft model in speculative decoding")
  582. parser.add_argument(
  583. "--speculative-max-model-len",
  584. type=str,
  585. default=EngineArgs.speculative_max_model_len,
  586. help="The maximum sequence length supported by the "
  587. "draft model. Sequences over this length will skip "
  588. "speculation.")
  589. parser.add_argument(
  590. "--speculative-disable-by-batch-size",
  591. type=int,
  592. default=EngineArgs.speculative_disable_by_batch_size,
  593. help="Disable speculative decoding for new incoming requests "
  594. "if the number of enqueue requests is larger than this value.")
  595. parser.add_argument(
  596. "--ngram-prompt-lookup-max",
  597. type=int,
  598. default=EngineArgs.ngram_prompt_lookup_max,
  599. help="Max size of window for ngram prompt lookup in speculative "
  600. "decoding.")
  601. parser.add_argument(
  602. "--ngram-prompt-lookup-min",
  603. type=int,
  604. default=EngineArgs.ngram_prompt_lookup_min,
  605. help="Min size of window for ngram prompt lookup in speculative "
  606. "decoding.")
  607. parser.add_argument(
  608. '--spec-decoding-acceptance-method',
  609. type=str,
  610. default=EngineArgs.spec_decoding_acceptance_method,
  611. choices=['rejection_sampler', 'typical_acceptance_sampler'],
  612. help='Specify the acceptance method to use during draft token '
  613. 'verification in speculative decoding. Two types of acceptance '
  614. 'routines are supported: '
  615. '1) RejectionSampler which does not allow changing the '
  616. 'acceptance rate of draft tokens, '
  617. '2) TypicalAcceptanceSampler which is configurable, allowing for '
  618. 'a higher acceptance rate at the cost of lower quality, '
  619. 'and vice versa.')
  620. parser.add_argument(
  621. '--typical-acceptance-sampler-posterior-threshold',
  622. type=float,
  623. default=EngineArgs.typical_acceptance_sampler_posterior_threshold,
  624. help='Set the lower bound threshold for the posterior '
  625. 'probability of a token to be accepted. This threshold is '
  626. 'used by the TypicalAcceptanceSampler to make sampling decisions '
  627. 'during speculative decoding. Defaults to 0.09')
  628. parser.add_argument(
  629. '--typical-acceptance-sampler-posterior-alpha',
  630. type=float,
  631. default=EngineArgs.typical_acceptance_sampler_posterior_alpha,
  632. help='A scaling factor for the entropy-based threshold for token '
  633. 'acceptance in the TypicalAcceptanceSampler. Typically defaults '
  634. 'to sqrt of --typical-acceptance-sampler-posterior-threshold '
  635. 'i.e. 0.3')
  636. parser.add_argument("--model-loader-extra-config",
  637. type=str,
  638. default=EngineArgs.model_loader_extra_config,
  639. help="Extra config for model loader. "
  640. "This will be passed to the model loader "
  641. "corresponding to the chosen load_format. "
  642. "This should be a JSON string that will be "
  643. "parsed into a dictionary.")
  644. parser.add_argument(
  645. "--served-model-name",
  646. nargs="+",
  647. type=str,
  648. default=None,
  649. help="The model name(s) used in the API. If multiple "
  650. "names are provided, the server will respond to any "
  651. "of the provided names. The model name in the model "
  652. "field of a response will be the first name in this "
  653. "list. If not specified, the model name will be the "
  654. "same as the `--model` argument. Noted that this name(s)"
  655. "will also be used in `model_name` tag content of "
  656. "prometheus metrics, if multiple names provided, metrics"
  657. "tag will take the first one.")
  658. parser.add_argument("--qlora-adapter-name-or-path",
  659. type=str,
  660. default=None,
  661. help="Name or path of the LoRA adapter to use.")
  662. return parser
  663. @classmethod
  664. def from_cli_args(cls, args: argparse.Namespace) -> "EngineArgs":
  665. # Get the list of attributes of this dataclass.
  666. attrs = [attr.name for attr in dataclasses.fields(cls)]
  667. # Set the attributes from the parsed arguments.
  668. engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})
  669. return engine_args
  670. def create_engine_config(self, ) -> EngineConfig:
  671. # bitsandbytes quantization needs a specific model loader
  672. # so we make sure the quant method and the load format are consistent
  673. if (self.quantization == "bitsandbytes" or
  674. self.qlora_adapter_name_or_path is not None) and \
  675. self.load_format != "bitsandbytes":
  676. raise ValueError(
  677. "BitsAndBytes quantization and QLoRA adapter only support "
  678. f"'bitsandbytes' load format, but got {self.load_format}")
  679. if (self.load_format == "bitsandbytes" or
  680. self.qlora_adapter_name_or_path is not None) and \
  681. self.quantization != "bitsandbytes":
  682. raise ValueError(
  683. "BitsAndBytes load format and QLoRA adapter only support "
  684. f"'bitsandbytes' quantization, but got {self.quantization}")
  685. multimodal_config = MultiModalConfig()
  686. device_config = DeviceConfig(device=self.device)
  687. model_config = ModelConfig(
  688. model=self.model,
  689. tokenizer=self.tokenizer,
  690. tokenizer_mode=self.tokenizer_mode,
  691. trust_remote_code=self.trust_remote_code,
  692. dtype=self.dtype,
  693. seed=self.seed,
  694. revision=self.revision,
  695. code_revision=self.code_revision,
  696. rope_scaling=self.rope_scaling,
  697. rope_theta=self.rope_theta,
  698. tokenizer_revision=self.tokenizer_revision,
  699. max_model_len=self.max_model_len,
  700. quantization=self.quantization,
  701. load_in_4bit=self.load_in_4bit,
  702. load_in_8bit=self.load_in_8bit,
  703. load_in_smooth=self.load_in_smooth,
  704. deepspeed_fp_bits=self.deepspeed_fp_bits,
  705. quantization_param_path=self.quantization_param_path,
  706. enforce_eager=self.enforce_eager,
  707. max_context_len_to_capture=self.max_context_len_to_capture,
  708. max_seq_len_to_capture=self.max_seq_len_to_capture,
  709. max_logprobs=self.max_logprobs,
  710. disable_sliding_window=self.disable_sliding_window,
  711. skip_tokenizer_init=self.skip_tokenizer_init,
  712. served_model_name=self.served_model_name,
  713. multimodal_config=multimodal_config,
  714. )
  715. cache_config = CacheConfig(
  716. block_size=self.block_size,
  717. gpu_memory_utilization=self.gpu_memory_utilization,
  718. swap_space=self.swap_space,
  719. cache_dtype=self.kv_cache_dtype,
  720. num_gpu_blocks_override=self.num_gpu_blocks_override,
  721. sliding_window=model_config.get_sliding_window(),
  722. enable_prefix_caching=self.enable_prefix_caching,
  723. )
  724. parallel_config = ParallelConfig(
  725. pipeline_parallel_size=self.pipeline_parallel_size,
  726. tensor_parallel_size=self.tensor_parallel_size,
  727. worker_use_ray=self.worker_use_ray,
  728. max_parallel_loading_workers=self.max_parallel_loading_workers,
  729. disable_custom_all_reduce=self.disable_custom_all_reduce,
  730. tokenizer_pool_config=TokenizerPoolConfig.create_config(
  731. tokenizer_pool_size=self.tokenizer_pool_size,
  732. tokenizer_pool_type=self.tokenizer_pool_type,
  733. tokenizer_pool_extra_config=self.tokenizer_pool_extra_config,
  734. ),
  735. ray_workers_use_nsight=self.ray_workers_use_nsight,
  736. distributed_executor_backend=self.distributed_executor_backend)
  737. speculative_config = SpeculativeConfig.maybe_create_spec_config(
  738. target_model_config=model_config,
  739. target_parallel_config=parallel_config,
  740. target_dtype=self.dtype,
  741. speculative_model=self.speculative_model,
  742. speculative_draft_tensor_parallel_size=self.
  743. speculative_draft_tensor_parallel_size,
  744. num_speculative_tokens=self.num_speculative_tokens,
  745. speculative_disable_by_batch_size=self.
  746. speculative_disable_by_batch_size,
  747. speculative_max_model_len=self.speculative_max_model_len,
  748. enable_chunked_prefill=self.enable_chunked_prefill,
  749. use_v2_block_manager=self.use_v2_block_manager,
  750. ngram_prompt_lookup_max=self.ngram_prompt_lookup_max,
  751. ngram_prompt_lookup_min=self.ngram_prompt_lookup_min,
  752. draft_token_acceptance_method=\
  753. self.spec_decoding_acceptance_method,
  754. typical_acceptance_sampler_posterior_threshold=self.
  755. typical_acceptance_sampler_posterior_threshold,
  756. typical_acceptance_sampler_posterior_alpha=self.
  757. typical_acceptance_sampler_posterior_alpha,
  758. )
  759. scheduler_config = SchedulerConfig(
  760. max_num_batched_tokens=self.max_num_batched_tokens,
  761. max_num_seqs=self.max_num_seqs,
  762. max_model_len=model_config.max_model_len,
  763. use_v2_block_manager=self.use_v2_block_manager,
  764. num_lookahead_slots=(self.num_lookahead_slots
  765. if speculative_config is None else
  766. speculative_config.num_lookahead_slots),
  767. delay_factor=self.scheduler_delay_factor,
  768. enable_chunked_prefill=self.enable_chunked_prefill,
  769. embedding_mode=model_config.embedding_mode,
  770. preemption_mode=self.preemption_mode,
  771. )
  772. lora_config = LoRAConfig(
  773. max_lora_rank=self.max_lora_rank,
  774. max_loras=self.max_loras,
  775. fully_sharded_loras=self.fully_sharded_loras,
  776. lora_extra_vocab_size=self.lora_extra_vocab_size,
  777. long_lora_scaling_factors=self.long_lora_scaling_factors,
  778. lora_dtype=self.lora_dtype,
  779. max_cpu_loras=self.max_cpu_loras if self.max_cpu_loras
  780. and self.max_cpu_loras > 0 else None) if self.enable_lora else None
  781. if self.qlora_adapter_name_or_path is not None and \
  782. self.qlora_adapter_name_or_path != "":
  783. if self.model_loader_extra_config is None:
  784. self.model_loader_extra_config = {}
  785. self.model_loader_extra_config[
  786. "qlora_adapter_name_or_path"] = self.qlora_adapter_name_or_path
  787. load_config = LoadConfig(
  788. load_format=self.load_format,
  789. download_dir=self.download_dir,
  790. model_loader_extra_config=self.model_loader_extra_config,
  791. )
  792. prompt_adapter_config = PromptAdapterConfig(
  793. max_prompt_adapters=self.max_prompt_adapters,
  794. max_prompt_adapter_token=self.max_prompt_adapter_token) \
  795. if self.enable_prompt_adapter else None
  796. decoding_config = DecodingConfig(
  797. guided_decoding_backend=self.guided_decoding_backend)
  798. if (model_config.get_sliding_window() is not None
  799. and scheduler_config.chunked_prefill_enabled
  800. and not scheduler_config.use_v2_block_manager):
  801. raise ValueError(
  802. "Chunked prefill is not supported with sliding window. "
  803. "Set --disable-sliding-window to disable sliding window.")
  804. return EngineConfig(model_config=model_config,
  805. cache_config=cache_config,
  806. parallel_config=parallel_config,
  807. scheduler_config=scheduler_config,
  808. device_config=device_config,
  809. lora_config=lora_config,
  810. multimodal_config=multimodal_config,
  811. speculative_config=speculative_config,
  812. load_config=load_config,
  813. decoding_config=decoding_config,
  814. prompt_adapter_config=prompt_adapter_config)
  815. @dataclass
  816. class AsyncEngineArgs(EngineArgs):
  817. """Arguments for asynchronous Aphrodite engine."""
  818. engine_use_ray: bool = False
  819. disable_log_requests: bool = False
  820. max_log_len: int = 0
  821. uvloop: bool = False
  822. @staticmethod
  823. def add_cli_args(
  824. parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
  825. parser = EngineArgs.add_cli_args(parser)
  826. parser.add_argument(
  827. "--engine-use-ray",
  828. action="store_true",
  829. help="use Ray to start the LLM engine in a "
  830. "separate process as the server process.",
  831. )
  832. parser.add_argument(
  833. "--disable-log-requests",
  834. action="store_true",
  835. help="disable logging requests",
  836. )
  837. parser.add_argument(
  838. "--max-log-len",
  839. type=int,
  840. default=0,
  841. help="max number of prompt characters or prompt "
  842. "ID numbers being printed in log. "
  843. "Default: unlimited.",
  844. )
  845. parser.add_argument(
  846. "--uvloop",
  847. action="store_true",
  848. help="Use the Uvloop asyncio event loop to possibly increase "
  849. "performance")
  850. return parser