args_tools.py 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789
  1. import argparse
  2. import dataclasses
  3. from dataclasses import dataclass
  4. import json
  5. from typing import Optional, Tuple
  6. from aphrodite.common.config import (CacheConfig, DecodingConfig, DeviceConfig,
  7. EngineConfig, LoadConfig, LoRAConfig,
  8. ModelConfig, ParallelConfig,
  9. SchedulerConfig, SpeculativeConfig,
  10. TokenizerPoolConfig, VisionLanguageConfig)
  11. from aphrodite.common.utils import str_to_int_tuple, is_cpu
  12. from aphrodite.quantization import QUANTIZATION_METHODS
  13. @dataclass
  14. class EngineArgs:
  15. """Arguments for Aphrodite engine."""
  16. model: str
  17. tokenizer: Optional[str] = None
  18. skip_tokenizer_init: bool = False
  19. tokenizer_mode: str = "auto"
  20. trust_remote_code: bool = False
  21. download_dir: Optional[str] = None
  22. load_format: str = "auto"
  23. dtype: str = "auto"
  24. kv_cache_dtype: str = "auto"
  25. quantization_param_path: Optional[str] = None
  26. seed: int = 0
  27. max_model_len: Optional[int] = None
  28. worker_use_ray: Optional[bool] = False
  29. distributed_executor_backend: Optional[str] = "mp"
  30. pipeline_parallel_size: int = 1
  31. tensor_parallel_size: int = 1
  32. max_parallel_loading_workers: Optional[int] = None
  33. block_size: int = 16
  34. enable_prefix_caching: bool = False
  35. disable_sliding_window: bool = False
  36. use_v2_block_manager: bool = False
  37. swap_space: int = 4 # GiB
  38. gpu_memory_utilization: float = 0.90
  39. max_num_batched_tokens: Optional[int] = None
  40. max_num_seqs: int = 256
  41. max_logprobs: int = 10 # OpenAI default is 5, setting to 10 because ST
  42. disable_log_stats: bool = False
  43. revision: Optional[str] = None
  44. code_revision: Optional[str] = None
  45. rope_scaling: Optional[dict] = None
  46. tokenizer_revision: Optional[str] = None
  47. quantization: Optional[str] = None
  48. load_in_4bit: bool = False
  49. load_in_8bit: bool = False
  50. load_in_smooth: bool = False
  51. deepspeed_fp_bits: Optional[int] = None
  52. enforce_eager: bool = True
  53. max_context_len_to_capture: Optional[int] = None
  54. max_seq_len_to_capture: int = 8192
  55. disable_custom_all_reduce: bool = False
  56. tokenizer_pool_size: int = 0
  57. tokenizer_pool_type: str = "ray"
  58. tokenizer_pool_extra_config: Optional[dict] = None
  59. enable_lora: bool = False
  60. max_loras: int = 1
  61. max_lora_rank: int = 16
  62. fully_sharded_loras: bool = False
  63. lora_extra_vocab_size: int = 256
  64. long_lora_scaling_factors: Optional[Tuple[float]] = None
  65. lora_dtype = "auto"
  66. max_cpu_loras: Optional[int] = None
  67. device: str = "auto"
  68. ray_workers_use_nsight: bool = False
  69. num_gpu_blocks_override: Optional[int] = None
  70. num_lookahead_slots: int = 0
  71. model_loader_extra_config: Optional[dict] = None
  72. # Related to Vision-language models such as llava
  73. image_input_type: Optional[str] = None
  74. image_token_id: Optional[int] = None
  75. image_input_shape: Optional[str] = None
  76. image_feature_size: Optional[int] = None
  77. scheduler_delay_factor: float = 0.0
  78. enable_chunked_prefill: bool = False
  79. guided_decoding_backend: str = 'outlines'
  80. # Speculative decoding config
  81. speculative_model: Optional[str] = None
  82. num_speculative_tokens: Optional[int] = None
  83. speculative_max_model_len: Optional[int] = None
  84. speculative_disable_by_batch_size: Optional[int] = None
  85. ngram_prompt_lookup_max: Optional[int] = None
  86. ngram_prompt_lookup_min: Optional[int] = None
  87. def __post_init__(self):
  88. if self.tokenizer is None:
  89. self.tokenizer = self.model
  90. if is_cpu():
  91. self.distributed_executor_backend = None
  92. @staticmethod
  93. def add_cli_args(
  94. parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
  95. """Shared CLI arguments for the Aphrodite engine."""
  96. # NOTE: If you update any of the arguments below, please also
  97. # make sure to update docs/source/models/engine_args.rst
  98. # Model arguments
  99. parser.add_argument(
  100. "--model",
  101. type=str,
  102. default="EleutherAI/pythia-70m-deduped",
  103. help="name or path of the huggingface model to use",
  104. )
  105. parser.add_argument(
  106. "--tokenizer",
  107. type=str,
  108. default=EngineArgs.tokenizer,
  109. help="name or path of the huggingface tokenizer to use",
  110. )
  111. parser.add_argument(
  112. "--skip-tokenizer-init",
  113. action="store_true",
  114. help="Skip initialization of tokenizer and detokenizer")
  115. parser.add_argument(
  116. "--revision",
  117. type=str,
  118. default=None,
  119. help="the specific model version to use. It can be a branch "
  120. "name, a tag name, or a commit id. If unspecified, will use "
  121. "the default version.",
  122. )
  123. parser.add_argument(
  124. "--code-revision",
  125. type=str,
  126. default=None,
  127. help="the specific revision to use for the model code on "
  128. "Hugging Face Hub. It can be a branch name, a tag name, or a "
  129. "commit id. If unspecified, will use the default version.",
  130. )
  131. parser.add_argument(
  132. "--tokenizer-revision",
  133. type=str,
  134. default=None,
  135. help="the specific tokenizer version to use. It can be a branch "
  136. "name, a tag name, or a commit id. If unspecified, will use "
  137. "the default version.",
  138. )
  139. parser.add_argument(
  140. "--tokenizer-mode",
  141. type=str,
  142. default=EngineArgs.tokenizer_mode,
  143. choices=["auto", "slow"],
  144. help='tokenizer mode. "auto" will use the fast '
  145. 'tokenizer if available, and "slow" will '
  146. "always use the slow tokenizer.",
  147. )
  148. parser.add_argument(
  149. "--trust-remote-code",
  150. action="store_true",
  151. help="trust remote code from huggingface",
  152. )
  153. parser.add_argument(
  154. "--download-dir",
  155. type=str,
  156. default=EngineArgs.download_dir,
  157. help="directory to download and load the weights, "
  158. "default to the default cache dir of "
  159. "huggingface",
  160. )
  161. parser.add_argument(
  162. '--load-format',
  163. type=str,
  164. default=EngineArgs.load_format,
  165. choices=[
  166. 'auto',
  167. 'pt',
  168. 'safetensors',
  169. 'npcache',
  170. 'dummy',
  171. 'tensorizer',
  172. 'sharded_state',
  173. ],
  174. help='The format of the model weights to load.\n\n'
  175. '* "auto" will try to load the weights in the safetensors format '
  176. 'and fall back to the pytorch bin format if safetensors format '
  177. 'is not available.\n'
  178. '* "pt" will load the weights in the pytorch bin format.\n'
  179. '* "safetensors" will load the weights in the safetensors format.\n'
  180. '* "npcache" will load the weights in pytorch format and store '
  181. 'a numpy cache to speed up the loading.\n'
  182. '* "dummy" will initialize the weights with random values, '
  183. 'which is mainly for profiling.\n'
  184. '* "tensorizer" will load the weights using tensorizer from '
  185. 'CoreWeave. See the Tensorize Aphrodite Model script in the '
  186. 'Examples section for more information.\n')
  187. parser.add_argument(
  188. '--dtype',
  189. type=str,
  190. default=EngineArgs.dtype,
  191. choices=[
  192. 'auto', 'half', 'float16', 'bfloat16', 'float', 'float32'
  193. ],
  194. help='Data type for model weights and activations.\n\n'
  195. '* "auto" will use FP16 precision for FP32 and FP16 models, and '
  196. 'BF16 precision for BF16 models.\n'
  197. '* "half" for FP16. Recommended for AWQ quantization.\n'
  198. '* "float16" is the same as "half".\n'
  199. '* "bfloat16" for a balance between precision and range.\n'
  200. '* "float" is shorthand for FP32 precision.\n'
  201. '* "float32" for FP32 precision.')
  202. parser.add_argument(
  203. '--kv-cache-dtype',
  204. type=str,
  205. choices=['auto', 'fp8', 'fp8_e5m2', 'fp8_e4m3'],
  206. default=EngineArgs.kv_cache_dtype,
  207. help='Data type for kv cache storage. If "auto", will use model '
  208. 'data type. CUDA 11.8+ supports fp8 (=fp8_e4m3) and fp8_e5m2. '
  209. 'ROCm (AMD GPU) supports fp8 (=fp8_e4m3)')
  210. parser.add_argument(
  211. '--quantization-param-path',
  212. type=str,
  213. default=None,
  214. help='Path to the JSON file containing the KV cache '
  215. 'scaling factors. This should generally be supplied, when '
  216. 'KV cache dtype is FP8. Otherwise, KV cache scaling factors '
  217. 'default to 1.0, which may cause accuracy issues. '
  218. 'FP8_E5M2 (without scaling) is only supported on cuda version'
  219. 'greater than 11.8. On ROCm (AMD GPU), FP8_E4M3 is instead '
  220. 'supported for common inference criteria. ')
  221. parser.add_argument(
  222. "--max-model-len",
  223. type=int,
  224. default=EngineArgs.max_model_len,
  225. help="model context length. If unspecified, "
  226. "will be automatically derived from the model.",
  227. )
  228. parser.add_argument(
  229. '--guided-decoding-backend',
  230. type=str,
  231. default='outlines',
  232. choices=['outlines', 'lm-format-enforcer'],
  233. help='Which engine will be used for guided decoding'
  234. ' (JSON schema / regex etc) by default. Currently support '
  235. 'https://github.com/outlines-dev/outlines and '
  236. 'https://github.com/noamgat/lm-format-enforcer.'
  237. ' Can be overridden per request via guided_decoding_backend'
  238. ' parameter.')
  239. # Parallel arguments
  240. parser.add_argument(
  241. '--distributed-executor-backend',
  242. choices=['ray', 'mp'],
  243. default=EngineArgs.distributed_executor_backend,
  244. help='Backend to use for distributed serving. When more than 1 GPU '
  245. 'is used, will be automatically set to "ray" if installed '
  246. 'or "mp" (multiprocessing) otherwise.')
  247. parser.add_argument(
  248. '--worker-use-ray',
  249. action='store_true',
  250. help='Deprecated, use --distributed-executor-backend=ray.')
  251. parser.add_argument(
  252. "--pipeline-parallel-size",
  253. "-pp",
  254. type=int,
  255. default=EngineArgs.pipeline_parallel_size,
  256. help="number of pipeline stages. Currently not supported.")
  257. parser.add_argument(
  258. "--tensor-parallel-size",
  259. "-tp",
  260. type=int,
  261. default=EngineArgs.tensor_parallel_size,
  262. help="number of tensor parallel replicas, i.e. the number of GPUs "
  263. "to use.")
  264. parser.add_argument(
  265. "--max-parallel-loading-workers",
  266. type=int,
  267. default=EngineArgs.max_parallel_loading_workers,
  268. help="load model sequentially in multiple batches, "
  269. "to avoid RAM OOM when using tensor "
  270. "parallel and large models",
  271. )
  272. parser.add_argument(
  273. "--ray-workers-use-nsight",
  274. action="store_true",
  275. help="If specified, use nsight to profile ray workers",
  276. )
  277. # KV cache arguments
  278. parser.add_argument(
  279. "--block-size",
  280. type=int,
  281. default=EngineArgs.block_size,
  282. choices=[8, 16, 32],
  283. help="token block size",
  284. )
  285. parser.add_argument(
  286. "--enable-prefix-caching",
  287. "--context-shift",
  288. action="store_true",
  289. help="Enable automatic prefix caching.",
  290. )
  291. parser.add_argument('--disable-sliding-window',
  292. action='store_true',
  293. help='Disables sliding window, '
  294. 'capping to sliding window size')
  295. parser.add_argument("--use-v2-block-manager",
  296. action="store_true",
  297. help="Use the v2 block manager.")
  298. parser.add_argument(
  299. "--num-lookahead-slots",
  300. type=int,
  301. default=EngineArgs.num_lookahead_slots,
  302. help="Experimental scheduling config necessary for "
  303. "speculative decoding. This will be replaced by "
  304. "speculative decoding config in the future; it is "
  305. "present for testing purposes until then.")
  306. parser.add_argument("--seed",
  307. type=int,
  308. default=EngineArgs.seed,
  309. help="random seed")
  310. parser.add_argument(
  311. "--swap-space",
  312. type=int,
  313. default=EngineArgs.swap_space,
  314. help="CPU swap space size (GiB) per GPU",
  315. )
  316. parser.add_argument(
  317. "--gpu-memory-utilization",
  318. "-gmu",
  319. type=float,
  320. default=EngineArgs.gpu_memory_utilization,
  321. help="the fraction of GPU memory to be used for "
  322. "the model executor, which can range from 0 to 1."
  323. "If unspecified, will use the default value of 0.9.",
  324. )
  325. parser.add_argument(
  326. "--num-gpu-blocks-override",
  327. type=int,
  328. default=None,
  329. help="If specified, ignore GPU profiling result and use this "
  330. "number of GPU blocks. Used for testing preemption.")
  331. parser.add_argument(
  332. "--max-num-batched-tokens",
  333. type=int,
  334. default=EngineArgs.max_num_batched_tokens,
  335. help="maximum number of batched tokens per "
  336. "iteration",
  337. )
  338. parser.add_argument(
  339. "--max-num-seqs",
  340. type=int,
  341. default=EngineArgs.max_num_seqs,
  342. help="maximum number of sequences per iteration",
  343. )
  344. parser.add_argument(
  345. "--max-logprobs",
  346. type=int,
  347. default=EngineArgs.max_logprobs,
  348. help="maximum number of log probabilities to "
  349. "return.",
  350. )
  351. parser.add_argument(
  352. "--disable-log-stats",
  353. action="store_true",
  354. help="disable logging statistics",
  355. )
  356. # Quantization settings.
  357. parser.add_argument(
  358. "--quantization",
  359. "-q",
  360. type=str,
  361. choices=[*QUANTIZATION_METHODS, None],
  362. default=EngineArgs.quantization,
  363. help="Method used to quantize the weights. If "
  364. "None, we first check the `quantization_config` "
  365. "attribute in the model config file. If that is "
  366. "None, we assume the model weights are not "
  367. "quantized and use `dtype` to determine the data "
  368. "type of the weights.",
  369. )
  370. parser.add_argument(
  371. "--load-in-4bit",
  372. action="store_true",
  373. help="Load the FP16 model in 4-bit format. Also "
  374. "works with AWQ models. Throughput at 2.5x of "
  375. "FP16.",
  376. )
  377. parser.add_argument(
  378. "--load-in-8bit",
  379. action="store_true",
  380. help="Load the FP16 model in 8-bit format. "
  381. "Throughput at 0.3x of FP16.",
  382. )
  383. parser.add_argument(
  384. "--load-in-smooth",
  385. action="store_true",
  386. help="Load the FP16 model in smoothquant "
  387. "8bit format. Throughput at 0.7x of FP16. ",
  388. )
  389. parser.add_argument(
  390. "--deepspeed-fp-bits",
  391. type=int,
  392. default=None,
  393. help="Number of floating bits to use for the deepseed "
  394. "quantization. Supported bits are: 4, 6, 8, 12. ")
  395. parser.add_argument('--rope-scaling',
  396. default=None,
  397. type=json.loads,
  398. help='RoPE scaling configuration in JSON format. '
  399. 'For example, {"type":"dynamic","factor":2.0}')
  400. parser.add_argument(
  401. "--enforce-eager",
  402. type=lambda x: (str(x).lower() == 'true'),
  403. default=EngineArgs.enforce_eager,
  404. help="Always use eager-mode PyTorch. If False, "
  405. "will use eager mode and CUDA graph in hybrid "
  406. "for maximal performance and flexibility.",
  407. )
  408. parser.add_argument("--max-context-len-to-capture",
  409. type=int,
  410. default=EngineArgs.max_context_len_to_capture,
  411. help="Maximum context length covered by CUDA "
  412. "graphs. When a sequence has context length "
  413. "larger than this, we fall back to eager mode. "
  414. "(DEPRECATED. Use --max-seq_len-to-capture instead"
  415. ")")
  416. parser.add_argument("--max-seq_len-to-capture",
  417. type=int,
  418. default=EngineArgs.max_seq_len_to_capture,
  419. help="Maximum sequence length covered by CUDA "
  420. "graphs. When a sequence has context length "
  421. "larger than this, we fall back to eager mode.")
  422. parser.add_argument(
  423. "--disable-custom-all-reduce",
  424. action="store_true",
  425. default=EngineArgs.disable_custom_all_reduce,
  426. help="See ParallelConfig",
  427. )
  428. parser.add_argument("--tokenizer-pool-size",
  429. type=int,
  430. default=EngineArgs.tokenizer_pool_size,
  431. help="Size of tokenizer pool to use for "
  432. "asynchronous tokenization. If 0, will "
  433. "use synchronous tokenization.")
  434. parser.add_argument("--tokenizer-pool-type",
  435. type=str,
  436. default=EngineArgs.tokenizer_pool_type,
  437. help="The type of tokenizer pool to use for "
  438. "asynchronous tokenization. Ignored if "
  439. "tokenizer_pool_size is 0.")
  440. parser.add_argument("--tokenizer-pool-extra-config",
  441. type=str,
  442. default=EngineArgs.tokenizer_pool_extra_config,
  443. help="Extra config for tokenizer pool. "
  444. "This should be a JSON string that will be "
  445. "parsed into a dictionary. Ignored if "
  446. "tokenizer_pool_size is 0.")
  447. # LoRA related configs
  448. parser.add_argument(
  449. "--enable-lora",
  450. action="store_true",
  451. help="If True, enable handling of LoRA adapters.",
  452. )
  453. parser.add_argument(
  454. "--max-loras",
  455. type=int,
  456. default=EngineArgs.max_loras,
  457. help="Max number of LoRAs in a single batch.",
  458. )
  459. parser.add_argument(
  460. "--max-lora-rank",
  461. type=int,
  462. default=EngineArgs.max_lora_rank,
  463. help="Max LoRA rank.",
  464. )
  465. parser.add_argument(
  466. "--lora-extra-vocab-size",
  467. type=int,
  468. default=EngineArgs.lora_extra_vocab_size,
  469. help=("Maximum size of extra vocabulary that can be "
  470. "present in a LoRA adapter (added to the base "
  471. "model vocabulary)."),
  472. )
  473. parser.add_argument(
  474. "--lora-dtype",
  475. type=str,
  476. default=EngineArgs.lora_dtype,
  477. choices=["auto", "float16", "bfloat16", "float32"],
  478. help=("Data type for LoRA. If auto, will default to "
  479. "base model dtype."),
  480. )
  481. parser.add_argument(
  482. "--long-lora-scaling-factors",
  483. type=str,
  484. default=EngineArgs.long_lora_scaling_factors,
  485. help=("Specify multiple scaling factors (which can "
  486. "be different from base model scaling factor "
  487. "- see eg. Long LoRA) to allow for multiple "
  488. "LoRA adapters trained with those scaling "
  489. "factors to be used at the same time. If not "
  490. "specified, only adapters trained with the "
  491. "base model scaling factor are allowed."))
  492. parser.add_argument(
  493. "--max-cpu-loras",
  494. type=int,
  495. default=EngineArgs.max_cpu_loras,
  496. help=("Maximum number of LoRAs to store in CPU memory. "
  497. "Must be >= than max_num_seqs. "
  498. "Defaults to max_num_seqs."),
  499. )
  500. parser.add_argument(
  501. "--fully-sharded-loras",
  502. action='store_true',
  503. help=("By default, only half of the LoRA computation is sharded "
  504. "with tensor parallelism. Enabling this will use the fully "
  505. "sharded layers. At high sequence length, max rank or "
  506. "tensor parallel size, this is likely faster."))
  507. parser.add_argument(
  508. "--device",
  509. type=str,
  510. default=EngineArgs.device,
  511. choices=["auto", "cuda", "neuron", "cpu"],
  512. help=("Device to use for model execution."),
  513. )
  514. # Related to Vision-language models such as llava
  515. parser.add_argument(
  516. "--image-input-type",
  517. type=str,
  518. default=None,
  519. choices=[
  520. t.name.lower() for t in VisionLanguageConfig.ImageInputType
  521. ],
  522. help=("The image input type passed into Aphrodite. "
  523. "Should be one of `pixel_values` or `image_features`"))
  524. parser.add_argument("--image-token-id",
  525. type=int,
  526. default=None,
  527. help=("Input id for image token."))
  528. parser.add_argument(
  529. '--image-input-shape',
  530. type=str,
  531. default=None,
  532. help=(
  533. 'The biggest image input shape (worst for memory footprint) '
  534. 'given an input type. Only used for Aphrodite\'s profile_run.'
  535. ))
  536. parser.add_argument(
  537. '--image-feature-size',
  538. type=int,
  539. default=None,
  540. help=('The image feature size along the context dimension.'))
  541. parser.add_argument(
  542. "--scheduler-delay-factor",
  543. "-sdf",
  544. type=float,
  545. default=EngineArgs.scheduler_delay_factor,
  546. help="Apply a delay (of delay factor multiplied by previous "
  547. "prompt latency) before scheduling next prompt.")
  548. parser.add_argument(
  549. "--enable-chunked-prefill",
  550. action="store_true",
  551. help="If True, the prefill requests can be chunked based on the "
  552. "max_num_batched_tokens.")
  553. parser.add_argument(
  554. "--speculative-model",
  555. type=str,
  556. default=EngineArgs.speculative_model,
  557. help=
  558. "The name of the draft model to be used in speculative decoding.")
  559. parser.add_argument(
  560. "--num-speculative-tokens",
  561. type=int,
  562. default=EngineArgs.num_speculative_tokens,
  563. help="The number of speculative tokens to sample from "
  564. "the draft model in speculative decoding")
  565. parser.add_argument(
  566. "--speculative-max-model-len",
  567. type=str,
  568. default=EngineArgs.speculative_max_model_len,
  569. help="The maximum sequence length supported by the "
  570. "draft model. Sequences over this length will skip "
  571. "speculation.")
  572. parser.add_argument(
  573. "--speculative-disable-by-batch-size",
  574. type=int,
  575. default=EngineArgs.speculative_disable_by_batch_size,
  576. help="Disable speculative decoding for new incoming requests "
  577. "if the number of enqueue requests is larger than this value.")
  578. parser.add_argument(
  579. "--ngram-prompt-lookup-max",
  580. type=int,
  581. default=EngineArgs.ngram_prompt_lookup_max,
  582. help="Max size of window for ngram prompt lookup in speculative "
  583. "decoding.")
  584. parser.add_argument(
  585. "--ngram-prompt-lookup-min",
  586. type=int,
  587. default=EngineArgs.ngram_prompt_lookup_min,
  588. help="Min size of window for ngram prompt lookup in speculative "
  589. "decoding.")
  590. parser.add_argument("--model-loader-extra-config",
  591. type=str,
  592. default=EngineArgs.model_loader_extra_config,
  593. help="Extra config for model loader. "
  594. "This will be passed to the model loader "
  595. "corresponding to the chosen load_format. "
  596. "This should be a JSON string that will be "
  597. "parsed into a dictionary.")
  598. return parser
  599. @classmethod
  600. def from_cli_args(cls, args: argparse.Namespace) -> "EngineArgs":
  601. # Get the list of attributes of this dataclass.
  602. attrs = [attr.name for attr in dataclasses.fields(cls)]
  603. # Set the attributes from the parsed arguments.
  604. engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})
  605. return engine_args
  606. def create_engine_config(self, ) -> EngineConfig:
  607. device_config = DeviceConfig(self.device)
  608. model_config = ModelConfig(
  609. self.model,
  610. self.tokenizer,
  611. self.tokenizer_mode,
  612. self.trust_remote_code,
  613. self.dtype,
  614. self.seed,
  615. self.revision,
  616. self.code_revision,
  617. self.rope_scaling,
  618. self.tokenizer_revision,
  619. self.max_model_len,
  620. self.quantization,
  621. self.load_in_4bit,
  622. self.load_in_8bit,
  623. self.load_in_smooth,
  624. self.deepspeed_fp_bits,
  625. self.quantization_param_path,
  626. self.enforce_eager,
  627. self.max_context_len_to_capture,
  628. self.max_seq_len_to_capture,
  629. self.max_logprobs,
  630. self.disable_sliding_window,
  631. self.skip_tokenizer_init,
  632. )
  633. cache_config = CacheConfig(
  634. self.block_size,
  635. self.gpu_memory_utilization,
  636. self.swap_space,
  637. self.kv_cache_dtype,
  638. # self.kv_quant_params_path,
  639. self.num_gpu_blocks_override,
  640. model_config.get_sliding_window(),
  641. self.enable_prefix_caching,
  642. )
  643. parallel_config = ParallelConfig(
  644. self.pipeline_parallel_size,
  645. self.tensor_parallel_size,
  646. self.worker_use_ray,
  647. self.max_parallel_loading_workers,
  648. self.disable_custom_all_reduce,
  649. TokenizerPoolConfig.create_config(
  650. self.tokenizer_pool_size,
  651. self.tokenizer_pool_type,
  652. self.tokenizer_pool_extra_config,
  653. ),
  654. self.ray_workers_use_nsight,
  655. distributed_executor_backend=self.distributed_executor_backend)
  656. speculative_config = SpeculativeConfig.maybe_create_spec_config(
  657. target_model_config=model_config,
  658. target_parallel_config=parallel_config,
  659. target_dtype=self.dtype,
  660. speculative_model=self.speculative_model,
  661. num_speculative_tokens=self.num_speculative_tokens,
  662. speculative_disable_by_batch_size=self.
  663. speculative_disable_by_batch_size,
  664. speculative_max_model_len=self.speculative_max_model_len,
  665. enable_chunked_prefill=self.enable_chunked_prefill,
  666. use_v2_block_manager=self.use_v2_block_manager,
  667. ngram_prompt_lookup_max=self.ngram_prompt_lookup_max,
  668. ngram_prompt_lookup_min=self.ngram_prompt_lookup_min,
  669. )
  670. scheduler_config = SchedulerConfig(
  671. self.max_num_batched_tokens,
  672. self.max_num_seqs,
  673. model_config.max_model_len,
  674. self.use_v2_block_manager,
  675. num_lookahead_slots=(self.num_lookahead_slots
  676. if speculative_config is None else
  677. speculative_config.num_lookahead_slots),
  678. delay_factor=self.scheduler_delay_factor,
  679. enable_chunked_prefill=self.enable_chunked_prefill,
  680. embedding_mode=model_config.embedding_mode,
  681. )
  682. lora_config = LoRAConfig(
  683. max_lora_rank=self.max_lora_rank,
  684. max_loras=self.max_loras,
  685. fully_sharded_loras=self.fully_sharded_loras,
  686. lora_extra_vocab_size=self.lora_extra_vocab_size,
  687. long_lora_scaling_factors=self.long_lora_scaling_factors,
  688. lora_dtype=self.lora_dtype,
  689. max_cpu_loras=self.max_cpu_loras if self.max_cpu_loras
  690. and self.max_cpu_loras > 0 else None) if self.enable_lora else None
  691. load_config = LoadConfig(
  692. load_format=self.load_format,
  693. download_dir=self.download_dir,
  694. model_loader_extra_config=self.model_loader_extra_config,
  695. )
  696. if self.image_input_type:
  697. if (not self.image_token_id or not self.image_input_shape
  698. or not self.image_feature_size):
  699. raise ValueError(
  700. "Specify `image_token_id`, `image_input_shape` and "
  701. "`image_feature_size` together with `image_input_type`.")
  702. vision_language_config = VisionLanguageConfig(
  703. image_input_type=VisionLanguageConfig.
  704. get_image_input_enum_type(self.image_input_type),
  705. image_token_id=self.image_token_id,
  706. image_input_shape=str_to_int_tuple(self.image_input_shape),
  707. image_feature_size=self.image_feature_size,
  708. )
  709. else:
  710. vision_language_config = None
  711. decoding_config = DecodingConfig(
  712. guided_decoding_backend=self.guided_decoding_backend)
  713. if (model_config.get_sliding_window() is not None
  714. and scheduler_config.chunked_prefill_enabled):
  715. raise ValueError(
  716. "Chunked prefill is not supported with sliding window. "
  717. "Set --disable-sliding-window to disable sliding window.")
  718. return EngineConfig(model_config=model_config,
  719. cache_config=cache_config,
  720. parallel_config=parallel_config,
  721. scheduler_config=scheduler_config,
  722. device_config=device_config,
  723. lora_config=lora_config,
  724. vision_language_config=vision_language_config,
  725. speculative_config=speculative_config,
  726. load_config=load_config,
  727. decoding_config=decoding_config)
  728. @dataclass
  729. class AsyncEngineArgs(EngineArgs):
  730. """Arguments for asynchronous Aphrodite engine."""
  731. engine_use_ray: bool = False
  732. disable_log_requests: bool = False
  733. max_log_len: int = 0
  734. uvloop: bool = False
  735. @staticmethod
  736. def add_cli_args(
  737. parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
  738. parser = EngineArgs.add_cli_args(parser)
  739. parser.add_argument(
  740. "--engine-use-ray",
  741. action="store_true",
  742. help="use Ray to start the LLM engine in a "
  743. "separate process as the server process.",
  744. )
  745. parser.add_argument(
  746. "--disable-log-requests",
  747. action="store_true",
  748. help="disable logging requests",
  749. )
  750. parser.add_argument(
  751. "--max-log-len",
  752. type=int,
  753. default=0,
  754. help="max number of prompt characters or prompt "
  755. "ID numbers being printed in log. "
  756. "Default: unlimited.",
  757. )
  758. parser.add_argument(
  759. "--uvloop",
  760. action="store_true",
  761. default=EngineArgs.ngram_prompt_lookup_min,
  762. help="Use the Uvloop asyncio event loop to possibly increase "
  763. "performance")
  764. return parser