serving_engine.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479
  1. import json
  2. import pathlib
  3. from dataclasses import dataclass
  4. from http import HTTPStatus
  5. from typing import Iterable, Iterator, List, Optional, Tuple, TypedDict, Union
  6. from loguru import logger
  7. from pydantic import Field
  8. from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast
  9. from typing_extensions import Annotated
  10. from aphrodite.common.config import ModelConfig
  11. from aphrodite.common.pooling_params import PoolingParams
  12. from aphrodite.common.sampling_params import (LogitsProcessorFunc,
  13. SamplingParams)
  14. from aphrodite.common.sequence import Logprob
  15. from aphrodite.endpoints.logger import RequestLogger
  16. # yapf conflicts with isort here
  17. # yapf: disable
  18. from aphrodite.endpoints.openai.protocol import (ChatCompletionRequest,
  19. CompletionRequest,
  20. DetokenizeRequest,
  21. EmbeddingRequest,
  22. ErrorResponse, ModelCard,
  23. ModelList, ModelPermission,
  24. TokenizeChatRequest,
  25. TokenizeCompletionRequest,
  26. TokenizeRequest)
  27. # yapf: enable
  28. from aphrodite.engine.protocol import EngineClient
  29. from aphrodite.inputs.parse import parse_and_batch_prompt
  30. from aphrodite.lora.request import LoRARequest
  31. from aphrodite.modeling.guided_decoding import (
  32. get_guided_decoding_logits_processor)
  33. from aphrodite.prompt_adapter.request import PromptAdapterRequest
  34. @dataclass
  35. class BaseModelPath:
  36. name: str
  37. model_path: str
  38. @dataclass
  39. class PromptAdapterPath:
  40. name: str
  41. local_path: str
  42. @dataclass
  43. class LoRAModulePath:
  44. name: str
  45. path: str
  46. base_model_name: Optional[str] = None
  47. AnyRequest = Union[ChatCompletionRequest, CompletionRequest, DetokenizeRequest,
  48. EmbeddingRequest, TokenizeRequest]
  49. AnyTokenizer = Union[PreTrainedTokenizer, PreTrainedTokenizerFast]
  50. class TextTokensPrompt(TypedDict):
  51. prompt: str
  52. prompt_token_ids: List[int]
  53. class OpenAIServing:
  54. def __init__(
  55. self,
  56. engine_client: EngineClient,
  57. model_config: ModelConfig,
  58. base_model_paths: List[BaseModelPath],
  59. *,
  60. lora_modules: Optional[List[LoRAModulePath]],
  61. prompt_adapters: Optional[List[PromptAdapterPath]],
  62. request_logger: Optional[RequestLogger],
  63. return_tokens_as_token_ids: bool = False,
  64. ):
  65. super().__init__()
  66. self.engine_client = engine_client
  67. self.model_config = model_config
  68. self.max_model_len = model_config.max_model_len
  69. self.base_model_paths = base_model_paths
  70. self.lora_requests = []
  71. if lora_modules is not None:
  72. self.lora_requests = [
  73. LoRARequest(lora_name=lora.name,
  74. lora_int_id=i,
  75. lora_path=lora.path,
  76. base_model_name=lora.base_model_name
  77. if lora.base_model_name
  78. and self._is_model_supported(lora.base_model_name)
  79. else self.base_model_paths[0].name)
  80. for i, lora in enumerate(lora_modules, start=1)
  81. ]
  82. self.prompt_adapter_requests = []
  83. if prompt_adapters is not None:
  84. for i, prompt_adapter in enumerate(prompt_adapters, start=1):
  85. with pathlib.Path(prompt_adapter.local_path,
  86. "adapter_config.json").open() as f:
  87. adapter_config = json.load(f)
  88. num_virtual_tokens = adapter_config["num_virtual_tokens"]
  89. self.prompt_adapter_requests.append(
  90. PromptAdapterRequest(
  91. prompt_adapter_name=prompt_adapter.name,
  92. prompt_adapter_id=i,
  93. prompt_adapter_local_path=prompt_adapter.local_path,
  94. prompt_adapter_num_virtual_tokens=num_virtual_tokens))
  95. self.request_logger = request_logger
  96. self.return_tokens_as_token_ids = return_tokens_as_token_ids
  97. async def show_available_models(self) -> ModelList:
  98. """Show available models. Right now we only have one model."""
  99. model_cards = [
  100. ModelCard(id=base_model.name,
  101. max_model_len=self.max_model_len,
  102. root=base_model.model_path,
  103. permission=[ModelPermission()])
  104. for base_model in self.base_model_paths
  105. ]
  106. lora_cards = [
  107. ModelCard(id=lora.lora_name,
  108. root=lora.local_path,
  109. parent=lora.base_model_name if lora.base_model_name else
  110. self.base_model_paths[0].name,
  111. permission=[ModelPermission()])
  112. for lora in self.lora_requests
  113. ]
  114. prompt_adapter_cards = [
  115. ModelCard(id=prompt_adapter.prompt_adapter_name,
  116. root=self.base_model_paths[0].name,
  117. permission=[ModelPermission()])
  118. for prompt_adapter in self.prompt_adapter_requests
  119. ]
  120. model_cards.extend(lora_cards)
  121. model_cards.extend(prompt_adapter_cards)
  122. return ModelList(data=model_cards)
  123. def create_error_response(
  124. self,
  125. message: str,
  126. err_type: str = "BadRequestError",
  127. status_code: HTTPStatus = HTTPStatus.BAD_REQUEST) -> ErrorResponse:
  128. return ErrorResponse(message=message,
  129. type=err_type,
  130. code=status_code.value)
  131. def create_streaming_error_response(
  132. self,
  133. message: str,
  134. err_type: str = "BadRequestError",
  135. status_code: HTTPStatus = HTTPStatus.BAD_REQUEST) -> str:
  136. json_str = json.dumps({
  137. "error":
  138. self.create_error_response(message=message,
  139. err_type=err_type,
  140. status_code=status_code).model_dump()
  141. })
  142. return json_str
  143. async def _guided_decode_logits_processor(
  144. self, request: Union[ChatCompletionRequest, CompletionRequest],
  145. tokenizer: AnyTokenizer) -> Optional[LogitsProcessorFunc]:
  146. decoding_config = await self.engine_client.get_decoding_config()
  147. guided_decoding_backend = request.guided_decoding_backend \
  148. or decoding_config.guided_decoding_backend
  149. return await get_guided_decoding_logits_processor(
  150. guided_decoding_backend, request, tokenizer)
  151. async def _check_model(
  152. self,
  153. request: AnyRequest,
  154. ) -> Optional[ErrorResponse]:
  155. # only check these if it's not a Tokenizer/Detokenize Request
  156. if not isinstance(request, (TokenizeRequest, DetokenizeRequest)):
  157. if self._is_model_supported(request.model):
  158. return None
  159. if request.model in [
  160. lora.lora_name for lora in self.lora_requests
  161. ]:
  162. return None
  163. if request.model in [
  164. prompt_adapter.prompt_adapter_name
  165. for prompt_adapter in self.prompt_adapter_requests
  166. ]:
  167. return None
  168. return self.create_error_response(
  169. message=f"The model `{request.model}` does not exist.",
  170. err_type="NotFoundError",
  171. status_code=HTTPStatus.NOT_FOUND)
  172. def _maybe_get_adapters(
  173. self, request: AnyRequest
  174. ) -> Union[Tuple[None, None], Tuple[LoRARequest, None], Tuple[
  175. None, PromptAdapterRequest]]:
  176. if self._is_model_supported(request.model):
  177. return None, None
  178. for lora in self.lora_requests:
  179. if request.model == lora.lora_name:
  180. return lora, None
  181. for prompt_adapter in self.prompt_adapter_requests:
  182. if request.model == prompt_adapter.prompt_adapter_name:
  183. return None, prompt_adapter
  184. # if _check_model has been called earlier, this will be unreachable
  185. raise ValueError(f"The model `{request.model}` does not exist.")
  186. def add_lora(self, lora: LoRAModulePath):
  187. if lora.name in [
  188. lora.lora_name for lora in self.lora_requests
  189. ]:
  190. logger.error(f"LoRA module {lora.name} already exists.")
  191. return
  192. self.lora_requests.append(
  193. LoRARequest(
  194. lora_name=lora.name,
  195. lora_int_id=len(self.lora_requests) + 1,
  196. lora_path=lora.path,
  197. ))
  198. def remove_lora(self, lora_name: str):
  199. self.lora_requests = [
  200. lora for lora in self.lora_requests if lora.lora_name != lora_name
  201. ]
  202. def add_prompt_adapter(self, prompt_adapter: PromptAdapterPath):
  203. if prompt_adapter.name in [
  204. prompt_adapter.prompt_adapter_name
  205. for prompt_adapter in self.prompt_adapter_requests
  206. ]:
  207. logger.error(
  208. f"Prompt adapter {prompt_adapter.name} already exists.")
  209. return
  210. with pathlib.Path(prompt_adapter.local_path,
  211. "adapter_config.json").open() as f:
  212. adapter_config = json.load(f)
  213. num_virtual_tokens = adapter_config["num_virtual_tokens"]
  214. self.prompt_adapter_requests.append(
  215. PromptAdapterRequest(
  216. prompt_adapter_name=prompt_adapter.name,
  217. prompt_adapter_id=len(self.prompt_adapter_requests) + 1,
  218. prompt_adapter_local_path=prompt_adapter.local_path,
  219. prompt_adapter_num_virtual_tokens=num_virtual_tokens))
  220. def remove_prompt_adapter(self, prompt_adapter_name: str):
  221. self.prompt_adapter_requests = [
  222. prompt_adapter for prompt_adapter in self.prompt_adapter_requests
  223. if prompt_adapter.prompt_adapter_name != prompt_adapter_name
  224. ]
  225. def _normalize_prompt_text_to_input(
  226. self,
  227. request: AnyRequest,
  228. tokenizer: AnyTokenizer,
  229. prompt: str,
  230. truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]],
  231. add_special_tokens: bool,
  232. ) -> TextTokensPrompt:
  233. if truncate_prompt_tokens is None:
  234. encoded = tokenizer(prompt, add_special_tokens=add_special_tokens)
  235. else:
  236. original_tokens = tokenizer(prompt,
  237. add_special_tokens=add_special_tokens)
  238. encoded = tokenizer(prompt,
  239. add_special_tokens=add_special_tokens,
  240. truncation=True,
  241. max_length=truncate_prompt_tokens)
  242. if len(original_tokens.input_ids) > truncate_prompt_tokens:
  243. tokens_removed = len(
  244. original_tokens.input_ids) - truncate_prompt_tokens
  245. logger.warning(
  246. f"Prompt truncated: Removed {tokens_removed} tokens "
  247. f"({len(original_tokens.input_ids)} -> "
  248. f"{truncate_prompt_tokens})")
  249. input_ids = encoded.input_ids
  250. input_text = prompt
  251. return self._validate_input(request, input_ids, input_text)
  252. def _normalize_prompt_tokens_to_input(
  253. self,
  254. request: AnyRequest,
  255. tokenizer: AnyTokenizer,
  256. prompt_ids: List[int],
  257. truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]],
  258. ) -> TextTokensPrompt:
  259. if truncate_prompt_tokens is None:
  260. input_ids = prompt_ids
  261. else:
  262. if len(prompt_ids) > truncate_prompt_tokens:
  263. tokens_removed = len(prompt_ids) - truncate_prompt_tokens
  264. logger.warning(
  265. f"Prompt truncated: Removed {tokens_removed} tokens "
  266. f"({len(prompt_ids)} -> {truncate_prompt_tokens})")
  267. input_ids = prompt_ids[-truncate_prompt_tokens:]
  268. input_text = tokenizer.decode(input_ids)
  269. return self._validate_input(request, input_ids, input_text)
  270. def _validate_input(
  271. self,
  272. request: AnyRequest,
  273. input_ids: List[int],
  274. input_text: str,
  275. ) -> TextTokensPrompt:
  276. token_num = len(input_ids)
  277. # Note: EmbeddingRequest doesn't have max_tokens
  278. if isinstance(request, EmbeddingRequest):
  279. if token_num > self.max_model_len:
  280. raise ValueError(
  281. f"This model's maximum context length is "
  282. f"{self.max_model_len} tokens. However, you requested "
  283. f"{token_num} tokens in the input for embedding "
  284. f"generation. Please reduce the length of the input.")
  285. return TextTokensPrompt(prompt=input_text,
  286. prompt_token_ids=input_ids)
  287. # Note: TokenizeRequest and DetokenizeRequest doesn't have max_tokens
  288. # and does not require model context length validation
  289. if isinstance(request, (TokenizeCompletionRequest, TokenizeChatRequest,
  290. DetokenizeRequest)):
  291. return TextTokensPrompt(prompt=input_text,
  292. prompt_token_ids=input_ids)
  293. if request.max_tokens is None:
  294. if token_num >= self.max_model_len:
  295. raise ValueError(
  296. f"This model's maximum context length is "
  297. f"{self.max_model_len} tokens. However, you requested "
  298. f"{token_num} tokens in the messages, "
  299. f"Please reduce the length of the messages.")
  300. elif token_num + request.max_tokens > self.max_model_len:
  301. raise ValueError(
  302. f"This model's maximum context length is "
  303. f"{self.max_model_len} tokens. However, you requested "
  304. f"{request.max_tokens + token_num} tokens "
  305. f"({token_num} in the messages, "
  306. f"{request.max_tokens} in the completion). "
  307. f"Please reduce the length of the messages or completion.")
  308. return TextTokensPrompt(prompt=input_text, prompt_token_ids=input_ids)
  309. def _tokenize_prompt_input(
  310. self,
  311. request: AnyRequest,
  312. tokenizer: AnyTokenizer,
  313. prompt_input: Union[str, List[int]],
  314. truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None,
  315. add_special_tokens: bool = True,
  316. ) -> TextTokensPrompt:
  317. """
  318. A simpler implementation of :meth:`_tokenize_prompt_input_or_inputs`
  319. that assumes single input.
  320. """
  321. return next(
  322. self._tokenize_prompt_inputs(
  323. request,
  324. tokenizer,
  325. [prompt_input],
  326. truncate_prompt_tokens=truncate_prompt_tokens,
  327. add_special_tokens=add_special_tokens,
  328. ))
  329. def _tokenize_prompt_inputs(
  330. self,
  331. request: AnyRequest,
  332. tokenizer: AnyTokenizer,
  333. prompt_inputs: Iterable[Union[str, List[int]]],
  334. truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None,
  335. add_special_tokens: bool = True,
  336. ) -> Iterator[TextTokensPrompt]:
  337. """
  338. A simpler implementation of :meth:`_tokenize_prompt_input_or_inputs`
  339. that assumes multiple inputs.
  340. """
  341. for text in prompt_inputs:
  342. if isinstance(text, str):
  343. yield self._normalize_prompt_text_to_input(
  344. request,
  345. tokenizer,
  346. prompt=text,
  347. truncate_prompt_tokens=truncate_prompt_tokens,
  348. add_special_tokens=add_special_tokens,
  349. )
  350. else:
  351. yield self._normalize_prompt_tokens_to_input(
  352. request,
  353. tokenizer,
  354. prompt_ids=text,
  355. truncate_prompt_tokens=truncate_prompt_tokens,
  356. )
  357. def _tokenize_prompt_input_or_inputs(
  358. self,
  359. request: AnyRequest,
  360. tokenizer: AnyTokenizer,
  361. input_or_inputs: Union[str, List[str], List[int], List[List[int]]],
  362. truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None,
  363. add_special_tokens: bool = True,
  364. ) -> Iterator[TextTokensPrompt]:
  365. """
  366. Tokenize/detokenize depending on the input format.
  367. According to `OpenAI API <https://platform.openai.com/docs/api-reference/embeddings/create>`_
  368. , each input can be a string or array of tokens. Note that each request
  369. can pass one or more inputs.
  370. """
  371. for prompt_input in parse_and_batch_prompt(input_or_inputs):
  372. # Although our type checking is based on mypy,
  373. # VSCode Pyright extension should still work properly
  374. # "is True" is required for Pyright to perform type narrowing
  375. # See: https://github.com/microsoft/pyright/issues/7672
  376. if prompt_input["is_tokens"] is False:
  377. yield self._normalize_prompt_text_to_input(
  378. request,
  379. tokenizer,
  380. prompt=prompt_input["content"],
  381. truncate_prompt_tokens=truncate_prompt_tokens,
  382. add_special_tokens=add_special_tokens,
  383. )
  384. else:
  385. yield self._normalize_prompt_tokens_to_input(
  386. request,
  387. tokenizer,
  388. prompt_ids=prompt_input["content"],
  389. truncate_prompt_tokens=truncate_prompt_tokens,
  390. )
  391. def _log_inputs(
  392. self,
  393. request_id: str,
  394. inputs: Union[str, List[int], TextTokensPrompt],
  395. params: Optional[Union[SamplingParams, PoolingParams]],
  396. lora_request: Optional[LoRARequest],
  397. prompt_adapter_request: Optional[PromptAdapterRequest],
  398. ) -> None:
  399. if self.request_logger is None:
  400. return
  401. if isinstance(inputs, str):
  402. prompt = inputs
  403. prompt_token_ids = None
  404. elif isinstance(inputs, list):
  405. prompt = None
  406. prompt_token_ids = inputs
  407. else:
  408. prompt = inputs["prompt"]
  409. prompt_token_ids = inputs["prompt_token_ids"]
  410. self.request_logger.log_inputs(
  411. request_id,
  412. prompt,
  413. prompt_token_ids,
  414. params=params,
  415. lora_request=lora_request,
  416. prompt_adapter_request=prompt_adapter_request,
  417. )
  418. @staticmethod
  419. def _get_decoded_token(logprob: Logprob,
  420. token_id: int,
  421. tokenizer: AnyTokenizer,
  422. return_as_token_id: bool = False) -> str:
  423. if return_as_token_id:
  424. return f"token_id:{token_id}"
  425. if logprob.decoded_token is not None:
  426. return logprob.decoded_token
  427. return tokenizer.decode(token_id)
  428. def _is_model_supported(self, model_name):
  429. return any(model.name == model_name for model in self.base_model_paths)