llm.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. from typing import List, Optional, Union
  2. import torch
  3. from tqdm import tqdm
  4. from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast
  5. from aphrodite.common.outputs import EmbeddingRequestOutput, RequestOutput
  6. from aphrodite.common.pooling_params import PoolingParams
  7. from aphrodite.common.sampling_params import SamplingParams
  8. from aphrodite.common.sequence import MultiModalData
  9. from aphrodite.common.utils import Counter
  10. from aphrodite.engine.aphrodite_engine import AphroditeEngine
  11. from aphrodite.engine.args_tools import EngineArgs
  12. from aphrodite.lora.request import LoRARequest
  13. class LLM:
  14. """An LLM for generating texts from given prompts and sampling parameters.
  15. This class includes a tokenizer, a language model (possibly distributed
  16. across multiple GPUs), and GPU memory space allocated for intermediate
  17. states (aka KV cache). Given a batch of prompts and sampling parameters,
  18. this class generates texts from the model, using an intelligent batching
  19. mechanism and efficient memory management.
  20. NOTE: This class is intended to be used for offline inference. For online
  21. serving, use the `AsyncLLMEngine` class instead.
  22. NOTE: For the comprehensive list of arguments, see `EngineArgs`.
  23. Args:
  24. model: The name or path of a HuggingFace Transformers model.
  25. tokenizer: The name or path of a HuggingFace Transformers tokenizer.
  26. tokenizer_mode: The tokenizer mode. "auto" will use the fast tokenizer
  27. if available, and "slow" will always use the slow tokenizer.
  28. trust_remote_code: Trust remote code (e.g., from HuggingFace) when
  29. downloading the model and tokenizer.
  30. tensor_parallel_size: The number of GPUs to use for distributed
  31. execution with tensor parallelism.
  32. dtype: The data type for the model weights and activations. Currently,
  33. we support `float32`, `float16`, and `bfloat16`. If `auto`, we use
  34. the `torch_dtype` attribute specified in the model config file.
  35. However, if the `torch_dtype` in the config is `float32`, we will
  36. use `float16` instead.
  37. quantization: The method used to quantize the model weights. Currently,
  38. we support "awq", "gptq", "quip" and "squeezellm". If None,
  39. we first check the `quantization_config` attribute in the model
  40. config file. If that is None, we assume the model weights are not
  41. quantized and use `dtype` to determine the data type of the weights.
  42. revision: The specific model version to use. It can be a branch name,
  43. a tag name, or a commit id.
  44. seed: The seed to initialize the random number generator for sampling.
  45. gpu_memory_utilization: The ratio (between 0 and 1) of GPU memory to
  46. reserve for the model weights, activations, and KV cache. Higher
  47. values will increase the KV cache size and thus improve the model's
  48. throughput. However, if the value is too high, it may cause out-of-
  49. memory (OOM) errors.
  50. swap_space: The size (GiB) of CPU memory per GPU to use as swap space.
  51. This can be used for temporarily storing the states of the requests
  52. when their `best_of` sampling parameters are larger than 1. If all
  53. requests will have `best_of=1`, you can safely set this to 0.
  54. Otherwise, too small values may cause out-of-memory (OOM) errors.
  55. enforce_eager: Whether to enforce eager execution. If True, we will
  56. disable CUDA graph and always execute the model in eager mode.
  57. If False, we will use CUDA graph and eager execution in hybrid.
  58. max_context_len_to_capture: Maximum context len covered by CUDA graphs.
  59. When a sequence has context length larger than this, we fall back
  60. to eager mode (DEPRECATED. Use `max_seq_len_to_capture` instead).
  61. max_seq_len_to_capture: Maximum sequence len covered by CUDA graphs.
  62. When a sequence has context length larger than this, we fall back
  63. to eager mode.
  64. disable_custom_all_reduce: See ParallelConfig.
  65. """
  66. def __init__(
  67. self,
  68. model: str,
  69. tokenizer: Optional[str] = None,
  70. tokenizer_mode: str = "auto",
  71. trust_remote_code: bool = False,
  72. tensor_parallel_size: int = 1,
  73. dtype: str = "auto",
  74. quantization: Optional[str] = None,
  75. revision: Optional[str] = None,
  76. seed: int = 0,
  77. gpu_memory_utilization: float = 0.9,
  78. swap_space: int = 4,
  79. enforce_eager: bool = True,
  80. max_context_len_to_capture: Optional[int] = None,
  81. max_seq_len_to_capture: int = 8192,
  82. disable_custom_all_reduce: bool = False,
  83. enable_prefix_caching: bool = False,
  84. **kwargs,
  85. ) -> None:
  86. if "disable_log_stats" not in kwargs:
  87. kwargs["disable_log_stats"] = True
  88. engine_args = EngineArgs(
  89. model=model,
  90. tokenizer=tokenizer,
  91. tokenizer_mode=tokenizer_mode,
  92. trust_remote_code=trust_remote_code,
  93. tensor_parallel_size=tensor_parallel_size,
  94. dtype=dtype,
  95. quantization=quantization,
  96. revision=revision,
  97. seed=seed,
  98. gpu_memory_utilization=gpu_memory_utilization,
  99. swap_space=swap_space,
  100. enforce_eager=enforce_eager,
  101. max_context_len_to_capture=max_context_len_to_capture,
  102. max_seq_len_to_capture=max_seq_len_to_capture,
  103. disable_custom_all_reduce=disable_custom_all_reduce,
  104. enable_prefix_caching=enable_prefix_caching,
  105. **kwargs,
  106. )
  107. self.llm_engine = AphroditeEngine.from_engine_args(engine_args)
  108. self.request_counter = Counter()
  109. def get_tokenizer(
  110. self) -> Union[PreTrainedTokenizer, PreTrainedTokenizerFast]:
  111. return self.llm_engine.tokenizer
  112. def set_tokenizer(
  113. self,
  114. tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],
  115. ) -> None:
  116. self.llm_engine.tokenizer = tokenizer
  117. def generate(
  118. self,
  119. prompts: Optional[Union[str, List[str]]] = None,
  120. sampling_params: Optional[Union[SamplingParams,
  121. List[SamplingParams]]] = None,
  122. prompt_token_ids: Optional[List[List[int]]] = None,
  123. use_tqdm: bool = True,
  124. lora_request: Optional[LoRARequest] = None,
  125. multi_modal_data: Optional[MultiModalData] = None,
  126. ) -> List[RequestOutput]:
  127. """Generates the completions for the input prompts.
  128. NOTE: This class automatically batches the given prompts, considering
  129. the memory constraint. For the best performance, put all of your prompts
  130. into a single list and pass it to this method.
  131. Args:
  132. prompts: A list of prompts to generate completions for.
  133. sampling_params: The sampling parameters for text generation. If
  134. None, we use the default sampling parameters.
  135. When it's a single value, it's applied to every prompt.
  136. When it's a list, the list must have the same length as
  137. the prompts and it's paired one-to-one with the prompts.
  138. prompt_token_ids: A list of token IDs for the prompts. If None, we
  139. use the tokenizer to convert the prompts to token IDs.
  140. use_tqdm: Whether to use tqdm to display the progress bar.
  141. lora_request: LoRA request to use for generation, if any.
  142. multi_modal_data: Multi-modal data to use for generation, if any.
  143. Returns:
  144. A list of `RequestOutput` objects containing the
  145. generated completions in the same order as the input prompts.
  146. """
  147. if sampling_params is None:
  148. # Use default sampling params.
  149. sampling_params = SamplingParams()
  150. requests_data = self._validate_and_prepare_requests(
  151. prompts,
  152. sampling_params,
  153. prompt_token_ids,
  154. lora_request,
  155. multi_modal_data,
  156. )
  157. # Add requests to the engine and run the engine
  158. for request_data in requests_data:
  159. self._add_request(**request_data)
  160. return self._run_engine(use_tqdm)
  161. def encode(
  162. self,
  163. prompts: Optional[Union[str, List[str]]] = None,
  164. pooling_params: Optional[Union[PoolingParams,
  165. List[PoolingParams]]] = None,
  166. prompt_token_ids: Optional[List[List[int]]] = None,
  167. use_tqdm: bool = True,
  168. lora_request: Optional[LoRARequest] = None,
  169. multi_modal_data: Optional[MultiModalData] = None,
  170. ) -> List[EmbeddingRequestOutput]:
  171. """Generates the completions for the input prompts.
  172. NOTE: This class automatically batches the given prompts, considering
  173. the memory constraint. For the best performance, put all of your prompts
  174. into a single list and pass it to this method.
  175. Args:
  176. prompts: A list of prompts to generate completions for.
  177. pooling_params: The pooling parameters for pooling. If None, we
  178. use the default pooling parameters.
  179. prompt_token_ids: A list of token IDs for the prompts. If None, we
  180. use the tokenizer to convert the prompts to token IDs.
  181. use_tqdm: Whether to use tqdm to display the progress bar.
  182. lora_request: LoRA request to use for generation, if any.
  183. multi_modal_data: Multi modal data.
  184. Returns:
  185. A list of `EmbeddingRequestOutput` objects containing the
  186. generated embeddings in the same order as the input prompts.
  187. """
  188. if pooling_params is None:
  189. # Use default pooling params.
  190. pooling_params = PoolingParams()
  191. requests_data = self._validate_and_prepare_requests(
  192. prompts,
  193. pooling_params,
  194. prompt_token_ids,
  195. lora_request,
  196. multi_modal_data,
  197. )
  198. # Add requests to the engine and run the engine
  199. for request_data in requests_data:
  200. self._add_request(**request_data)
  201. return self._run_engine(use_tqdm)
  202. def _validate_and_prepare_requests(
  203. self,
  204. prompts: Optional[Union[str, List[str]]],
  205. params: Union[Union[SamplingParams, PoolingParams],
  206. List[Union[SamplingParams,
  207. PoolingParams]]], # Unified parameter
  208. prompt_token_ids: Optional[List[List[int]]] = None,
  209. lora_request: Optional[LoRARequest] = None,
  210. multi_modal_data: Optional[MultiModalData] = None,
  211. ) -> List[dict]:
  212. """Validates and prepares request data for adding to the engine.
  213. Ensures prompts and token IDs are consistent, and returns a list of
  214. dictionaries with request data for further processing.
  215. """
  216. if prompts is None and prompt_token_ids is None:
  217. raise ValueError("Either prompts or prompt_token_ids must be "
  218. "provided.")
  219. if isinstance(prompts, str):
  220. # Convert a single prompt to a list.
  221. prompts = [prompts]
  222. if prompts is not None and prompt_token_ids is not None and len(
  223. prompts) != len(prompt_token_ids):
  224. raise ValueError(
  225. "The lengths of prompts and prompt_token_ids must "
  226. "be the same.")
  227. if prompts is not None:
  228. num_requests = len(prompts)
  229. else:
  230. assert prompt_token_ids is not None
  231. num_requests = len(prompt_token_ids)
  232. if isinstance(params, list) and len(params) != num_requests:
  233. raise ValueError("The lengths of prompts and params "
  234. "be the same.")
  235. if multi_modal_data:
  236. multi_modal_data.data = multi_modal_data.data.to(torch.float16)
  237. # Add requests to the engine.
  238. requests_data = []
  239. num_requests = len(prompts) if prompts is not None else len(
  240. prompt_token_ids)
  241. for i in range(num_requests):
  242. prompt = prompts[i] if prompts is not None else None
  243. token_ids = None if prompt_token_ids is None else prompt_token_ids[
  244. i]
  245. multi_modal_item = MultiModalData(
  246. type=multi_modal_data.type,
  247. data=multi_modal_data.data[i].unsqueeze(0),
  248. ) if multi_modal_data else None
  249. requests_data.append({
  250. "prompt":
  251. prompt,
  252. "params":
  253. params[i] if isinstance(params, list) else params,
  254. "prompt_token_ids":
  255. token_ids,
  256. "lora_request":
  257. lora_request,
  258. "multi_modal_data":
  259. multi_modal_item,
  260. })
  261. return requests_data
  262. def _add_request(
  263. self,
  264. prompt: Optional[str],
  265. params: Union[SamplingParams, PoolingParams],
  266. prompt_token_ids: Optional[List[int]],
  267. lora_request: Optional[LoRARequest] = None,
  268. multi_modal_data: Optional[MultiModalData] = None,
  269. ) -> None:
  270. request_id = str(next(self.request_counter))
  271. self.llm_engine.add_request(request_id,
  272. prompt,
  273. params,
  274. prompt_token_ids,
  275. lora_request=lora_request,
  276. multi_modal_data=multi_modal_data)
  277. def _run_engine(
  278. self, use_tqdm: bool
  279. ) -> List[Union[RequestOutput, EmbeddingRequestOutput]]:
  280. # Initialize tqdm.
  281. if use_tqdm:
  282. num_requests = self.llm_engine.get_num_unfinished_requests()
  283. pbar = tqdm(
  284. total=num_requests,
  285. desc="Processed prompts",
  286. dynamic_ncols=True,
  287. postfix=f"Generation Speed: {0:.2f} toks/s",
  288. )
  289. # Run the engine.
  290. outputs: List[Union[RequestOutput, EmbeddingRequestOutput]] = []
  291. total_toks = 0
  292. while self.llm_engine.has_unfinished_requests():
  293. step_outputs = self.llm_engine.step()
  294. for output in step_outputs:
  295. if output.finished:
  296. outputs.append(output)
  297. if use_tqdm:
  298. if isinstance(output, RequestOutput):
  299. # Calculate tokens only for RequestOutput
  300. total_toks += sum(
  301. len(stp.token_ids) for stp in output.outputs)
  302. spd = total_toks / pbar.format_dict["elapsed"]
  303. pbar.postfix = f"Generation Speed: {spd:.2f} toks/s"
  304. pbar.update(1)
  305. if use_tqdm:
  306. pbar.close()
  307. # Sort the outputs by request ID.
  308. # This is necessary because some requests may be finished earlier than
  309. # its previous requests.
  310. outputs = sorted(outputs, key=lambda x: int(x.request_id))
  311. return outputs