123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254 |
- import argparse
- import dataclasses
- from dataclasses import dataclass
- from typing import Optional, Tuple
- from aphrodite.common.config import (CacheConfig, ModelConfig, ParallelConfig,
- SchedulerConfig)
- @dataclass
- class EngineArgs:
- """Arguments for the Aphrodite engine."""
- model: str
- tokenizer: Optional[str] = None
- tokenizer_mode: str = 'auto'
- trust_remote_code: bool = False
- download_dir: Optional[str] = None
- load_format: str = 'auto'
- dtype: str = 'auto'
- seed: int = 0
- max_model_len: Optional[int] = None
- worker_use_ray: bool = False
- pipeline_parallel_size: int = 1
- tensor_parallel_size: int = 1
- max_parallel_loading_workers: Optional[int] = None
- block_size: int = 16
- swap_space: int = 4 # GiB
- gpu_memory_utilization: float = 0.90
- max_num_batched_tokens: Optional[int] = None
- max_num_seqs: int = 256
- max_paddings: int = 256
- disable_log_stats: bool = False
- revision: Optional[str] = None
- quantization: Optional[str] = None
- enforce_eager: bool = False
- max_context_len_to_capture: int = 8192
- kv_cache_dtype: Optional[str] = None
- def __post_init__(self):
- if self.tokenizer is None:
- self.tokenizer = self.model
- @staticmethod
- def add_cli_args(
- parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
- """Shared CLI arguments for the Aphrodite engine."""
- # Model arguments
- parser.add_argument(
- '--model',
- type=str,
- default='EleutherAI/pythia-70m-deduped',
- help='name or path of the huggingface model to use')
- parser.add_argument(
- '--tokenizer',
- type=str,
- default=EngineArgs.tokenizer,
- help='name or path of the huggingface tokenizer to use')
- parser.add_argument(
- '--revision',
- type=str,
- default=None,
- help='the specific model version to use. It can be a branch '
- 'name, a tag name, or a commit id. If unspecified, will use '
- 'the default version.')
- parser.add_argument('--tokenizer-mode',
- type=str,
- default=EngineArgs.tokenizer_mode,
- choices=['auto', 'slow'],
- help='tokenizer mode. "auto" will use the fast '
- 'tokenizer if available, and "slow" will '
- 'always use the slow tokenizer.')
- parser.add_argument('--trust-remote-code',
- action='store_true',
- help='trust remote code from huggingface')
- parser.add_argument('--download-dir',
- type=str,
- default=EngineArgs.download_dir,
- help='directory to download and load the weights, '
- 'default to the default cache dir of '
- 'huggingface')
- parser.add_argument(
- '--load-format',
- type=str,
- default=EngineArgs.load_format,
- choices=['auto', 'pt', 'safetensors', 'npcache', 'dummy'],
- help='The format of the model weights to load. '
- '"auto" will try to load the weights in the safetensors '
- 'and fall back to the pytorch bin format if safetensors '
- 'is not available. '
- '"pt" will load the weights in the pytorch bin format. '
- '"safetensors" will load the weights in the safetensors. '
- '"npcache" will load the weights in pytorch format and store '
- 'a numpy cache to speed up the loading. '
- '"dummy" will initialize the weights with random values, '
- 'which is mainly for profiling.')
- parser.add_argument(
- '--dtype',
- type=str,
- default=EngineArgs.dtype,
- choices=[
- 'auto', 'half', 'float16', 'bfloat16', 'float', 'float32'
- ],
- help='data type for model weights and activations. '
- 'The "auto" option will use FP16 precision '
- 'for FP32 and FP16 models, and BF16 precision '
- 'for BF16 models.')
- parser.add_argument('--max-model-len',
- type=int,
- default=None,
- help='model context length. If unspecified, '
- 'will be automatically derived from the model.')
- # Parallel arguments
- parser.add_argument('--worker-use-ray',
- action='store_true',
- help='use Ray for distributed serving, will be '
- 'automatically set when using more than 1 GPU')
- parser.add_argument('--pipeline-parallel-size',
- '-pp',
- type=int,
- default=EngineArgs.pipeline_parallel_size,
- help='number of pipeline stages')
- parser.add_argument('--tensor-parallel-size',
- '-tp',
- type=int,
- default=EngineArgs.tensor_parallel_size,
- help='number of tensor parallel replicas')
- parser.add_argument(
- '--max-parallel-loading-workers',
- '-mplw',
- type=int,
- help='load model sequentially in multiple batches, '
- 'to avoid CPU OOM when using tensor parallel '
- 'with large models.')
- # KV cache arguments
- parser.add_argument('--block-size',
- type=int,
- default=EngineArgs.block_size,
- choices=[8, 16, 32],
- help='token block size')
- # TODO: Support fine-grained seeds (e.g., seed per request).
- parser.add_argument('--seed',
- type=int,
- default=EngineArgs.seed,
- help='random seed')
- parser.add_argument('--swap-space',
- type=int,
- default=EngineArgs.swap_space,
- help='CPU swap space size (GiB) per GPU')
- parser.add_argument('--gpu-memory-utilization',
- '-gmu',
- type=float,
- default=EngineArgs.gpu_memory_utilization,
- help='the percentage of GPU memory to be used for'
- 'the model executor')
- parser.add_argument('--max-num-batched-tokens',
- '-mnbt',
- type=int,
- default=EngineArgs.max_num_batched_tokens,
- help='maximum number of batched tokens per '
- 'iteration')
- parser.add_argument('--max-num-seqs',
- type=int,
- default=EngineArgs.max_num_seqs,
- help='maximum number of sequences per iteration')
- parser.add_argument('--max-paddings',
- type=int,
- default=EngineArgs.max_paddings,
- help='maximum number of paddings in a batch')
- parser.add_argument('--disable-log-stats',
- action='store_true',
- help='disable logging statistics')
- # Quantization settings.
- parser.add_argument('--quantization',
- '-q',
- type=str,
- choices=['awq', 'squeezellm', 'gptq', None],
- default=None,
- help='Method used to quantize the weights')
- parser.add_argument('--enforce-eager',
- action='store_true',
- help='Always use eager-mode PyTorch. If False, '
- 'will use eager mode and CUDA graph in hybrid '
- 'for maximum performance and flexibility.')
- parser.add_argument('--max-context-len-to-capture',
- type=int,
- default=EngineArgs.max_context_len_to_capture,
- help='maximum context length covered by CUDA '
- 'graphs. When a sequence has context length '
- 'larger than this, we fall back to eager mode.')
- parser.add_argument('--kv-cache-dtype',
- type=str,
- choices=['fp8', None],
- default=None,
- help='Data type for the KV cache.')
- return parser
- @classmethod
- def from_cli_args(cls, args: argparse.Namespace) -> 'EngineArgs':
- # Get the list of attributes of this dataclass.
- attrs = [attr.name for attr in dataclasses.fields(cls)]
- # Set the attributes from the parsed arguments.
- engine_args = cls(**{attr: getattr(args, attr) for attr in attrs})
- return engine_args
- def create_engine_configs(
- self,
- ) -> Tuple[ModelConfig, CacheConfig, ParallelConfig, SchedulerConfig]:
- model_config = ModelConfig(self.model, self.tokenizer,
- self.tokenizer_mode, self.trust_remote_code,
- self.download_dir, self.load_format,
- self.dtype, self.seed, self.revision,
- self.max_model_len, self.quantization,
- self.enforce_eager,
- self.max_context_len_to_capture)
- cache_config = CacheConfig(self.block_size,
- self.gpu_memory_utilization,
- self.swap_space, self.kv_cache_dtype,
- model_config.get_sliding_window())
- parallel_config = ParallelConfig(self.pipeline_parallel_size,
- self.tensor_parallel_size,
- self.worker_use_ray,
- self.max_parallel_loading_workers)
- scheduler_config = SchedulerConfig(self.max_num_batched_tokens,
- self.max_num_seqs,
- model_config.max_model_len,
- self.max_paddings)
- return model_config, cache_config, parallel_config, scheduler_config
- @dataclass
- class AsyncEngineArgs(EngineArgs):
- """Arguments for asynchronous Aohrodite engine."""
- engine_use_ray: bool = False
- disable_log_requests: bool = False
- max_log_len: Optional[int] = None
- @staticmethod
- def add_cli_args(
- parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
- parser = EngineArgs.add_cli_args(parser)
- parser.add_argument('--engine-use-ray',
- action='store_true',
- help='use Ray to start the LLM engine in a '
- 'separate process as the server process.')
- parser.add_argument('--disable-log-requests',
- action='store_true',
- help='disable logging requests')
- parser.add_argument('--max-log-len',
- type=int,
- default=None,
- help='max number of prompt characters or prompt '
- 'ID numbers being printed in log. '
- 'Default: unlimited.')
- return parser
|