args_tools.py 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685
  1. import argparse
  2. import dataclasses
  3. from dataclasses import dataclass
  4. from typing import Optional
  5. from aphrodite.common.config import (CacheConfig, DecodingConfig, DeviceConfig,
  6. EngineConfig, LoadConfig, LoRAConfig,
  7. ModelConfig, ParallelConfig,
  8. SchedulerConfig, SpeculativeConfig,
  9. TokenizerPoolConfig, VisionLanguageConfig)
  10. from aphrodite.common.utils import str_to_int_tuple
  11. from aphrodite.quantization import QUANTIZATION_METHODS
  12. @dataclass
  13. class EngineArgs:
  14. """Arguments for Aphrodite engine."""
  15. model: str
  16. tokenizer: Optional[str] = None
  17. skip_tokenizer_init: bool = False
  18. tokenizer_mode: str = "auto"
  19. trust_remote_code: bool = False
  20. download_dir: Optional[str] = None
  21. load_format: str = "auto"
  22. dtype: str = "auto"
  23. kv_cache_dtype: str = "auto"
  24. quantization_param_path: Optional[str] = None
  25. seed: int = 0
  26. max_model_len: Optional[int] = None
  27. worker_use_ray: bool = False
  28. pipeline_parallel_size: int = 1
  29. tensor_parallel_size: int = 1
  30. max_parallel_loading_workers: Optional[int] = None
  31. block_size: int = 16
  32. enable_prefix_caching: bool = False
  33. use_v2_block_manager: bool = False
  34. swap_space: int = 4 # GiB
  35. gpu_memory_utilization: float = 0.90
  36. max_num_batched_tokens: Optional[int] = None
  37. max_num_seqs: int = 256
  38. max_logprobs: int = 10 # OpenAI default is 5, setting to 10 because ST
  39. disable_log_stats: bool = False
  40. revision: Optional[str] = None
  41. code_revision: Optional[str] = None
  42. tokenizer_revision: Optional[str] = None
  43. quantization: Optional[str] = None
  44. load_in_4bit: bool = False
  45. load_in_8bit: bool = False
  46. load_in_smooth: bool = False
  47. enforce_eager: bool = True
  48. max_context_len_to_capture: int = 8192
  49. disable_custom_all_reduce: bool = False
  50. tokenizer_pool_size: int = 0
  51. tokenizer_pool_type: str = "ray"
  52. tokenizer_pool_extra_config: Optional[dict] = None
  53. enable_lora: bool = False
  54. max_loras: int = 1
  55. max_lora_rank: int = 16
  56. lora_extra_vocab_size: int = 256
  57. lora_dtype = "auto"
  58. max_cpu_loras: Optional[int] = None
  59. device: str = "auto"
  60. ray_workers_use_nsight: bool = False
  61. num_gpu_blocks_override: Optional[int] = None
  62. num_lookahead_slots: int = 0
  63. model_loader_extra_config: Optional[dict] = None
  64. # Related to Vision-language models such as llava
  65. image_input_type: Optional[str] = None
  66. image_token_id: Optional[int] = None
  67. image_input_shape: Optional[str] = None
  68. image_feature_size: Optional[int] = None
  69. scheduler_delay_factor: float = 0.0
  70. enable_chunked_prefill: bool = False
  71. guided_decoding_backend: str = 'outlines'
  72. # Speculative decoding config
  73. speculative_model: Optional[str] = None
  74. num_speculative_tokens: Optional[int] = None
  75. speculative_max_model_len: Optional[int] = None
  76. def __post_init__(self):
  77. if self.tokenizer is None:
  78. self.tokenizer = self.model
  79. @staticmethod
  80. def add_cli_args(
  81. parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
  82. """Shared CLI arguments for the Aphrodite engine."""
  83. # NOTE: If you update any of the arguments below, please also
  84. # make sure to update docs/source/models/engine_args.rst
  85. # Model arguments
  86. parser.add_argument(
  87. "--model",
  88. type=str,
  89. default="EleutherAI/pythia-70m-deduped",
  90. help="name or path of the huggingface model to use",
  91. )
  92. parser.add_argument(
  93. "--tokenizer",
  94. type=str,
  95. default=EngineArgs.tokenizer,
  96. help="name or path of the huggingface tokenizer to use",
  97. )
  98. parser.add_argument(
  99. "--skip-tokenizer-init",
  100. action="store_true",
  101. help="Skip initialization of tokenizer and detokenizer")
  102. parser.add_argument(
  103. "--revision",
  104. type=str,
  105. default=None,
  106. help="the specific model version to use. It can be a branch "
  107. "name, a tag name, or a commit id. If unspecified, will use "
  108. "the default version.",
  109. )
  110. parser.add_argument(
  111. "--code-revision",
  112. type=str,
  113. default=None,
  114. help="the specific revision to use for the model code on "
  115. "Hugging Face Hub. It can be a branch name, a tag name, or a "
  116. "commit id. If unspecified, will use the default version.",
  117. )
  118. parser.add_argument(
  119. "--tokenizer-revision",
  120. type=str,
  121. default=None,
  122. help="the specific tokenizer version to use. It can be a branch "
  123. "name, a tag name, or a commit id. If unspecified, will use "
  124. "the default version.",
  125. )
  126. parser.add_argument(
  127. "--tokenizer-mode",
  128. type=str,
  129. default=EngineArgs.tokenizer_mode,
  130. choices=["auto", "slow"],
  131. help='tokenizer mode. "auto" will use the fast '
  132. 'tokenizer if available, and "slow" will '
  133. "always use the slow tokenizer.",
  134. )
  135. parser.add_argument(
  136. "--trust-remote-code",
  137. action="store_true",
  138. help="trust remote code from huggingface",
  139. )
  140. parser.add_argument(
  141. "--download-dir",
  142. type=str,
  143. default=EngineArgs.download_dir,
  144. help="directory to download and load the weights, "
  145. "default to the default cache dir of "
  146. "huggingface",
  147. )
  148. parser.add_argument(
  149. '--load-format',
  150. type=str,
  151. default=EngineArgs.load_format,
  152. choices=[
  153. 'auto', 'pt', 'safetensors', 'npcache', 'dummy', 'tensorizer'
  154. ],
  155. help='The format of the model weights to load.\n\n'
  156. '* "auto" will try to load the weights in the safetensors format '
  157. 'and fall back to the pytorch bin format if safetensors format '
  158. 'is not available.\n'
  159. '* "pt" will load the weights in the pytorch bin format.\n'
  160. '* "safetensors" will load the weights in the safetensors format.\n'
  161. '* "npcache" will load the weights in pytorch format and store '
  162. 'a numpy cache to speed up the loading.\n'
  163. '* "dummy" will initialize the weights with random values, '
  164. 'which is mainly for profiling.\n'
  165. '* "tensorizer" will load the weights using tensorizer from '
  166. 'CoreWeave which assumes tensorizer_uri is set to the location of '
  167. 'the serialized weights.')
  168. parser.add_argument(
  169. '--dtype',
  170. type=str,
  171. default=EngineArgs.dtype,
  172. choices=[
  173. 'auto', 'half', 'float16', 'bfloat16', 'float', 'float32'
  174. ],
  175. help='Data type for model weights and activations.\n\n'
  176. '* "auto" will use FP16 precision for FP32 and FP16 models, and '
  177. 'BF16 precision for BF16 models.\n'
  178. '* "half" for FP16. Recommended for AWQ quantization.\n'
  179. '* "float16" is the same as "half".\n'
  180. '* "bfloat16" for a balance between precision and range.\n'
  181. '* "float" is shorthand for FP32 precision.\n'
  182. '* "float32" for FP32 precision.')
  183. parser.add_argument(
  184. '--kv-cache-dtype',
  185. type=str,
  186. choices=['auto', 'fp8'],
  187. default=EngineArgs.kv_cache_dtype,
  188. help='Data type for kv cache storage. If "auto", will use model '
  189. 'data type. FP8_E5M2 (without scaling) is only supported on cuda '
  190. 'version greater than 11.8. On ROCm (AMD GPU), FP8_E4M3 is instead '
  191. 'supported for common inference criteria. ')
  192. parser.add_argument(
  193. '--quantization-param-path',
  194. type=str,
  195. default=None,
  196. help='Path to the JSON file containing the KV cache '
  197. 'scaling factors. This should generally be supplied, when '
  198. 'KV cache dtype is FP8. Otherwise, KV cache scaling factors '
  199. 'default to 1.0, which may cause accuracy issues. '
  200. 'FP8_E5M2 (without scaling) is only supported on cuda version'
  201. 'greater than 11.8. On ROCm (AMD GPU), FP8_E4M3 is instead '
  202. 'supported for common inference criteria. ')
  203. parser.add_argument(
  204. "--max-model-len",
  205. type=int,
  206. default=EngineArgs.max_model_len,
  207. help="model context length. If unspecified, "
  208. "will be automatically derived from the model.",
  209. )
  210. parser.add_argument(
  211. '--guided-decoding-backend',
  212. type=str,
  213. default='outlines',
  214. choices=['outlines', 'lm-format-enforcer'],
  215. help='Which engine will be used for guided decoding'
  216. ' (JSON schema / regex etc) by default. Currently support '
  217. 'https://github.com/outlines-dev/outlines and '
  218. 'https://github.com/noamgat/lm-format-enforcer.'
  219. ' Can be overridden per request via guided_decoding_backend'
  220. ' parameter.')
  221. # Parallel arguments
  222. parser.add_argument(
  223. "--worker-use-ray",
  224. action="store_true",
  225. help="use Ray for distributed serving, will be "
  226. "automatically set when using more than 1 GPU",
  227. )
  228. parser.add_argument(
  229. "--pipeline-parallel-size",
  230. "-pp",
  231. type=int,
  232. default=EngineArgs.pipeline_parallel_size,
  233. help="number of pipeline stages. Currently not supported.")
  234. parser.add_argument(
  235. "--tensor-parallel-size",
  236. "-tp",
  237. type=int,
  238. default=EngineArgs.tensor_parallel_size,
  239. help="number of tensor parallel replicas, i.e. the number of GPUs "
  240. "to use.")
  241. parser.add_argument(
  242. "--max-parallel-loading-workers",
  243. type=int,
  244. default=EngineArgs.max_parallel_loading_workers,
  245. help="load model sequentially in multiple batches, "
  246. "to avoid RAM OOM when using tensor "
  247. "parallel and large models",
  248. )
  249. parser.add_argument(
  250. "--ray-workers-use-nsight",
  251. action="store_true",
  252. help="If specified, use nsight to profile ray workers",
  253. )
  254. # KV cache arguments
  255. parser.add_argument(
  256. "--block-size",
  257. type=int,
  258. default=EngineArgs.block_size,
  259. choices=[8, 16, 32],
  260. help="token block size",
  261. )
  262. parser.add_argument(
  263. "--enable-prefix-caching",
  264. "--context-shift",
  265. action="store_true",
  266. help="Enable context shifting.",
  267. )
  268. parser.add_argument("--use-v2-block-manager",
  269. action="store_true",
  270. help="Use the v2 block manager.")
  271. parser.add_argument(
  272. "--num-lookahead-slots",
  273. type=int,
  274. default=EngineArgs.num_lookahead_slots,
  275. help="Experimental scheduling config necessary for "
  276. "speculative decoding. This will be replaced by "
  277. "speculative decoding config in the future; it is "
  278. "present for testing purposes until then.")
  279. parser.add_argument("--seed",
  280. type=int,
  281. default=EngineArgs.seed,
  282. help="random seed")
  283. parser.add_argument(
  284. "--swap-space",
  285. type=int,
  286. default=EngineArgs.swap_space,
  287. help="CPU swap space size (GiB) per GPU",
  288. )
  289. parser.add_argument(
  290. "--gpu-memory-utilization",
  291. "-gmu",
  292. type=float,
  293. default=EngineArgs.gpu_memory_utilization,
  294. help="the fraction of GPU memory to be used for "
  295. "the model executor, which can range from 0 to 1."
  296. "If unspecified, will use the default value of 0.9.",
  297. )
  298. parser.add_argument(
  299. "--num-gpu-blocks-override",
  300. type=int,
  301. default=None,
  302. help="If specified, ignore GPU profiling result and use this "
  303. "number of GPU blocks. Used for testing preemption.")
  304. parser.add_argument(
  305. "--max-num-batched-tokens",
  306. type=int,
  307. default=EngineArgs.max_num_batched_tokens,
  308. help="maximum number of batched tokens per "
  309. "iteration",
  310. )
  311. parser.add_argument(
  312. "--max-num-seqs",
  313. type=int,
  314. default=EngineArgs.max_num_seqs,
  315. help="maximum number of sequences per iteration",
  316. )
  317. parser.add_argument(
  318. "--max-logprobs",
  319. type=int,
  320. default=EngineArgs.max_logprobs,
  321. help="maximum number of log probabilities to "
  322. "return.",
  323. )
  324. parser.add_argument(
  325. "--disable-log-stats",
  326. action="store_true",
  327. help="disable logging statistics",
  328. )
  329. # Quantization settings.
  330. parser.add_argument(
  331. "--quantization",
  332. "-q",
  333. type=str,
  334. choices=[*QUANTIZATION_METHODS, None],
  335. default=EngineArgs.quantization,
  336. help="Method used to quantize the weights. If "
  337. "None, we first check the `quantization_config` "
  338. "attribute in the model config file. If that is "
  339. "None, we assume the model weights are not "
  340. "quantized and use `dtype` to determine the data "
  341. "type of the weights.",
  342. )
  343. parser.add_argument(
  344. "--load-in-4bit",
  345. action="store_true",
  346. help="Load the FP16 model in 4-bit format. Also "
  347. "works with AWQ models. Throughput at 2.5x of "
  348. "FP16.",
  349. )
  350. parser.add_argument(
  351. "--load-in-8bit",
  352. action="store_true",
  353. help="Load the FP16 model in 8-bit format. "
  354. "Throughput at 0.3x of FP16.",
  355. )
  356. parser.add_argument(
  357. "--load-in-smooth",
  358. action="store_true",
  359. help="Load the FP16 model in smoothquant "
  360. "8bit format. Throughput at 0.7x of FP16. ",
  361. )
  362. parser.add_argument(
  363. "--enforce-eager",
  364. type=lambda x: (str(x).lower() == 'true'),
  365. default=EngineArgs.enforce_eager,
  366. help="Always use eager-mode PyTorch. If False, "
  367. "will use eager mode and CUDA graph in hybrid "
  368. "for maximal performance and flexibility.",
  369. )
  370. parser.add_argument(
  371. "--max-context-len-to-capture",
  372. type=int,
  373. default=EngineArgs.max_context_len_to_capture,
  374. help="maximum context length covered by CUDA "
  375. "graphs. When a sequence has context length "
  376. "larger than this, we fall back to eager mode.",
  377. )
  378. parser.add_argument(
  379. "--disable-custom-all-reduce",
  380. action="store_true",
  381. default=EngineArgs.disable_custom_all_reduce,
  382. help="See ParallelConfig",
  383. )
  384. parser.add_argument("--tokenizer-pool-size",
  385. type=int,
  386. default=EngineArgs.tokenizer_pool_size,
  387. help="Size of tokenizer pool to use for "
  388. "asynchronous tokenization. If 0, will "
  389. "use synchronous tokenization.")
  390. parser.add_argument("--tokenizer-pool-type",
  391. type=str,
  392. default=EngineArgs.tokenizer_pool_type,
  393. help="The type of tokenizer pool to use for "
  394. "asynchronous tokenization. Ignored if "
  395. "tokenizer_pool_size is 0.")
  396. parser.add_argument("--tokenizer-pool-extra-config",
  397. type=str,
  398. default=EngineArgs.tokenizer_pool_extra_config,
  399. help="Extra config for tokenizer pool. "
  400. "This should be a JSON string that will be "
  401. "parsed into a dictionary. Ignored if "
  402. "tokenizer_pool_size is 0.")
  403. # LoRA related configs
  404. parser.add_argument(
  405. "--enable-lora",
  406. action="store_true",
  407. help="If True, enable handling of LoRA adapters.",
  408. )
  409. parser.add_argument(
  410. "--max-loras",
  411. type=int,
  412. default=EngineArgs.max_loras,
  413. help="Max number of LoRAs in a single batch.",
  414. )
  415. parser.add_argument(
  416. "--max-lora-rank",
  417. type=int,
  418. default=EngineArgs.max_lora_rank,
  419. help="Max LoRA rank.",
  420. )
  421. parser.add_argument(
  422. "--lora-extra-vocab-size",
  423. type=int,
  424. default=EngineArgs.lora_extra_vocab_size,
  425. help=("Maximum size of extra vocabulary that can be "
  426. "present in a LoRA adapter (added to the base "
  427. "model vocabulary)."),
  428. )
  429. parser.add_argument(
  430. "--lora-dtype",
  431. type=str,
  432. default=EngineArgs.lora_dtype,
  433. choices=["auto", "float16", "bfloat16", "float32"],
  434. help=("Data type for LoRA. If auto, will default to "
  435. "base model dtype."),
  436. )
  437. parser.add_argument(
  438. "--max-cpu-loras",
  439. type=int,
  440. default=EngineArgs.max_cpu_loras,
  441. help=("Maximum number of LoRAs to store in CPU memory. "
  442. "Must be >= than max_num_seqs. "
  443. "Defaults to max_num_seqs."),
  444. )
  445. parser.add_argument(
  446. "--device",
  447. type=str,
  448. default=EngineArgs.device,
  449. choices=["auto", "cuda", "neuron", "cpu"],
  450. help=("Device to use for model execution."),
  451. )
  452. # Related to Vision-language models such as llava
  453. parser.add_argument(
  454. "--image-input-type",
  455. type=str,
  456. default=None,
  457. choices=[
  458. t.name.lower() for t in VisionLanguageConfig.ImageInputType
  459. ],
  460. help=("The image input type passed into Aphrodite. "
  461. "Should be one of `pixel_values` or `image_features`"))
  462. parser.add_argument("--image-token-id",
  463. type=int,
  464. default=None,
  465. help=("Input id for image token."))
  466. parser.add_argument(
  467. '--image-input-shape',
  468. type=str,
  469. default=None,
  470. help=(
  471. 'The biggest image input shape (worst for memory footprint) '
  472. 'given an input type. Only used for Aphrodite\'s profile_run.'
  473. ))
  474. parser.add_argument(
  475. '--image-feature-size',
  476. type=int,
  477. default=None,
  478. help=('The image feature size along the context dimension.'))
  479. parser.add_argument(
  480. "--scheduler-delay-factor",
  481. "-sdf",
  482. type=float,
  483. default=EngineArgs.scheduler_delay_factor,
  484. help="Apply a delay (of delay factor multiplied by previous "
  485. "prompt latency) before scheduling next prompt.")
  486. parser.add_argument(
  487. "--enable-chunked-prefill",
  488. action="store_true",
  489. help="If True, the prefill requests can be chunked based on the "
  490. "max_num_batched_tokens.")
  491. parser.add_argument(
  492. "--speculative-model",
  493. type=str,
  494. default=EngineArgs.speculative_model,
  495. help=
  496. "The name of the draft model to be used in speculative decoding.")
  497. parser.add_argument(
  498. "--num-speculative-tokens",
  499. type=int,
  500. default=EngineArgs.num_speculative_tokens,
  501. help="The number of speculative tokens to sample from "
  502. "the draft model in speculative decoding")
  503. parser.add_argument(
  504. "--speculative-max-model-len",
  505. type=str,
  506. default=EngineArgs.speculative_max_model_len,
  507. help="The maximum sequence length supported by the "
  508. "draft model. Sequences over this length will skip "
  509. "speculation.")
  510. parser.add_argument("--model-loader-extra-config",
  511. type=str,
  512. default=EngineArgs.model_loader_extra_config,
  513. help="Extra config for model loader. "
  514. "This will be passed to the model loader "
  515. "corresponding to the chosen load_format. "
  516. "This should be a JSON string that will be "
  517. "parsed into a dictionary.")
  518. return parser
  519. @classmethod
  520. def from_cli_args(cls, args: argparse.Namespace) -> "EngineArgs":
  521. # Get the list of attributes of this dataclass.
  522. attrs = [attr.name for attr in dataclasses.fields(cls)]
  523. # Set the attributes from the parsed arguments.
  524. engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})
  525. return engine_args
  526. def create_engine_config(self, ) -> EngineConfig:
  527. device_config = DeviceConfig(self.device)
  528. model_config = ModelConfig(
  529. self.model,
  530. self.tokenizer,
  531. self.tokenizer_mode,
  532. self.trust_remote_code,
  533. self.dtype,
  534. self.seed,
  535. self.revision,
  536. self.code_revision,
  537. self.tokenizer_revision,
  538. self.max_model_len,
  539. self.quantization,
  540. self.load_in_4bit,
  541. self.load_in_8bit,
  542. self.load_in_smooth,
  543. self.quantization_param_path,
  544. self.enforce_eager,
  545. self.max_context_len_to_capture,
  546. self.max_logprobs,
  547. self.skip_tokenizer_init,
  548. )
  549. cache_config = CacheConfig(
  550. self.block_size,
  551. self.gpu_memory_utilization,
  552. self.swap_space,
  553. self.kv_cache_dtype,
  554. # self.kv_quant_params_path,
  555. self.num_gpu_blocks_override,
  556. model_config.get_sliding_window(),
  557. self.enable_prefix_caching,
  558. )
  559. parallel_config = ParallelConfig(
  560. self.pipeline_parallel_size,
  561. self.tensor_parallel_size,
  562. self.worker_use_ray,
  563. self.max_parallel_loading_workers,
  564. self.disable_custom_all_reduce,
  565. TokenizerPoolConfig.create_config(
  566. self.tokenizer_pool_size,
  567. self.tokenizer_pool_type,
  568. self.tokenizer_pool_extra_config,
  569. ),
  570. self.ray_workers_use_nsight,
  571. )
  572. speculative_config = SpeculativeConfig.maybe_create_spec_config(
  573. target_model_config=model_config,
  574. target_parallel_config=parallel_config,
  575. target_dtype=self.dtype,
  576. speculative_model=self.speculative_model,
  577. num_speculative_tokens=self.num_speculative_tokens,
  578. speculative_max_model_len=self.speculative_max_model_len,
  579. enable_chunked_prefill=self.enable_chunked_prefill,
  580. use_v2_block_manager=self.use_v2_block_manager,
  581. )
  582. scheduler_config = SchedulerConfig(
  583. self.max_num_batched_tokens,
  584. self.max_num_seqs,
  585. model_config.max_model_len,
  586. self.use_v2_block_manager,
  587. num_lookahead_slots=(self.num_lookahead_slots
  588. if speculative_config is None else
  589. speculative_config.num_lookahead_slots),
  590. delay_factor=self.scheduler_delay_factor,
  591. enable_chunked_prefill=self.enable_chunked_prefill,
  592. )
  593. lora_config = LoRAConfig(
  594. max_lora_rank=self.max_lora_rank,
  595. max_loras=self.max_loras,
  596. lora_extra_vocab_size=self.lora_extra_vocab_size,
  597. lora_dtype=self.lora_dtype,
  598. max_cpu_loras=self.max_cpu_loras if self.max_cpu_loras
  599. and self.max_cpu_loras > 0 else None) if self.enable_lora else None
  600. load_config = LoadConfig(
  601. load_format=self.load_format,
  602. download_dir=self.download_dir,
  603. model_loader_extra_config=self.model_loader_extra_config,
  604. )
  605. if self.image_input_type:
  606. if (not self.image_token_id or not self.image_input_shape
  607. or not self.image_feature_size):
  608. raise ValueError(
  609. "Specify `image_token_id`, `image_input_shape` and "
  610. "`image_feature_size` together with `image_input_type`.")
  611. vision_language_config = VisionLanguageConfig(
  612. image_input_type=VisionLanguageConfig.
  613. get_image_input_enum_type(self.image_input_type),
  614. image_token_id=self.image_token_id,
  615. image_input_shape=str_to_int_tuple(self.image_input_shape),
  616. image_feature_size=self.image_feature_size,
  617. )
  618. else:
  619. vision_language_config = None
  620. decoding_config = DecodingConfig(
  621. guided_decoding_backend=self.guided_decoding_backend)
  622. return EngineConfig(model_config=model_config,
  623. cache_config=cache_config,
  624. parallel_config=parallel_config,
  625. scheduler_config=scheduler_config,
  626. device_config=device_config,
  627. lora_config=lora_config,
  628. vision_language_config=vision_language_config,
  629. speculative_config=speculative_config,
  630. load_config=load_config,
  631. decoding_config=decoding_config)
  632. @dataclass
  633. class AsyncEngineArgs(EngineArgs):
  634. """Arguments for asynchronous Aphrodite engine."""
  635. engine_use_ray: bool = False
  636. disable_log_requests: bool = False
  637. max_log_len: int = 0
  638. @staticmethod
  639. def add_cli_args(
  640. parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
  641. parser = EngineArgs.add_cli_args(parser)
  642. parser.add_argument(
  643. "--engine-use-ray",
  644. action="store_true",
  645. help="use Ray to start the LLM engine in a "
  646. "separate process as the server process.",
  647. )
  648. parser.add_argument(
  649. "--disable-log-requests",
  650. action="store_true",
  651. help="disable logging requests",
  652. )
  653. parser.add_argument(
  654. "--max-log-len",
  655. type=int,
  656. default=0,
  657. help="max number of prompt characters or prompt "
  658. "ID numbers being printed in log. "
  659. "Default: unlimited.",
  660. )
  661. return parser