serving_chat.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326
  1. import time
  2. import codecs
  3. from fastapi import Request
  4. from typing import AsyncGenerator, AsyncIterator, Optional, List, Union
  5. from loguru import logger
  6. from aphrodite.common.utils import random_uuid
  7. from aphrodite.engine.async_aphrodite import AsyncAphrodite
  8. from aphrodite.endpoints.openai.protocol import (
  9. ChatCompletionRequest, ChatCompletionResponse,
  10. ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice,
  11. ChatCompletionStreamResponse, ChatMessage, DeltaMessage, ErrorResponse,
  12. UsageInfo)
  13. from aphrodite.common.outputs import RequestOutput
  14. from aphrodite.endpoints.openai.serving_engine import OpenAIServing, LoRA
  15. from aphrodite.modeling.outlines_decoding import (
  16. get_guided_decoding_logits_processor)
  17. class OpenAIServingChat(OpenAIServing):
  18. def __init__(self,
  19. engine: AsyncAphrodite,
  20. served_model: str,
  21. response_role: str,
  22. lora_modules: Optional[List[LoRA]] = None,
  23. chat_template=None):
  24. super().__init__(engine=engine,
  25. served_model=served_model,
  26. lora_modules=lora_modules)
  27. self.response_role = response_role
  28. self._load_chat_template(chat_template)
  29. async def create_chat_completion(
  30. self, request: ChatCompletionRequest, raw_request: Request
  31. ) -> Union[ErrorResponse, AsyncGenerator[str, None],
  32. ChatCompletionResponse]:
  33. """Completion API similar to OpenAI's API.
  34. See https://platform.openai.com/docs/api-reference/chat/create
  35. for the API specification. This API mimics the OpenAI ChatCompletion
  36. API.
  37. NOTE: Currently we do not support the following feature:
  38. - function_call (Users should implement this by themselves)
  39. """
  40. error_check_ret = await self._check_model(request)
  41. if error_check_ret is not None:
  42. return error_check_ret
  43. try:
  44. prompt = self.tokenizer.apply_chat_template(
  45. conversation=request.messages,
  46. tokenize=False,
  47. add_generation_prompt=request.add_generation_prompt)
  48. except Exception as e:
  49. logger.error(
  50. f"Error in applying chat template from request: {str(e)}")
  51. return self.create_error_response(str(e))
  52. request_id = f"cmpl-{random_uuid()}"
  53. try:
  54. token_ids = self._validate_prompt_and_tokenize(request,
  55. prompt=prompt)
  56. sampling_params = request.to_sampling_params()
  57. lora_request = self._maybe_get_lora(request)
  58. guided_decode_logits_processor = (
  59. await get_guided_decoding_logits_processor(
  60. request, self.engine.get_tokenizer()))
  61. if guided_decode_logits_processor:
  62. if sampling_params.logits_processors is None:
  63. sampling_params.logits_processors = []
  64. sampling_params.logits_processors.append(
  65. guided_decode_logits_processor)
  66. except ValueError as e:
  67. return self.create_error_response(str(e))
  68. result_generator = self.engine.generate(prompt, sampling_params,
  69. request_id, token_ids,
  70. lora_request)
  71. # Streaming response
  72. if request.stream:
  73. return self.chat_completion_stream_generator(
  74. request, result_generator, request_id)
  75. else:
  76. try:
  77. return await self.chat_completion_full_generator(
  78. request, raw_request, result_generator, request_id)
  79. except ValueError as e:
  80. # TODO: Use an aphrodite-specific Validation Error
  81. return self.create_error_response(str(e))
  82. def get_chat_request_role(self, request: ChatCompletionRequest) -> str:
  83. if request.add_generation_prompt:
  84. return self.response_role
  85. else:
  86. return request.messages[-1]["role"]
  87. async def chat_completion_stream_generator(
  88. self, request: ChatCompletionRequest,
  89. result_generator: AsyncIterator[RequestOutput], request_id: str
  90. ) -> Union[ErrorResponse, AsyncGenerator[str, None]]:
  91. model_name = request.model
  92. created_time = int(time.monotonic())
  93. chunk_object_type = "chat.completion.chunk"
  94. first_iteration = True
  95. # Send response for each token for each request.n (index)
  96. previous_texts = [""] * request.n
  97. previous_num_tokens = [0] * request.n
  98. finish_reason_sent = [False] * request.n
  99. try:
  100. async for res in result_generator:
  101. res: RequestOutput
  102. # We need to do it here, because if there are exceptions in
  103. # the result_generator, it needs to be sent as the FIRST
  104. # response (by the try...catch).
  105. if first_iteration:
  106. # Send first response for each request.n (index) with
  107. # the role
  108. role = self.get_chat_request_role(request)
  109. for i in range(request.n):
  110. choice_data = ChatCompletionResponseStreamChoice(
  111. index=i,
  112. delta=DeltaMessage(role=role),
  113. logprobs=None,
  114. finish_reason=None)
  115. chunk = ChatCompletionStreamResponse(
  116. id=request_id,
  117. object=chunk_object_type,
  118. created=created_time,
  119. choices=[choice_data],
  120. model=model_name)
  121. data = chunk.model_dump_json(exclude_unset=True)
  122. yield f"data: {data}\n\n"
  123. # Send response to echo the input portion of the last
  124. # message
  125. if request.echo:
  126. last_msg_content = ""
  127. if request.messages and isinstance(
  128. request.messages,
  129. list) and request.messages[-1].get(
  130. "content") and request.messages[-1].get(
  131. "role") == role:
  132. last_msg_content = request.messages[-1]["content"]
  133. if last_msg_content:
  134. for i in range(request.n):
  135. choice_data = ChatCompletionResponseStreamChoice( # noqa
  136. index=i,
  137. delta=DeltaMessage(
  138. content=last_msg_content),
  139. finish_reason=None)
  140. chunk = ChatCompletionStreamResponse(
  141. id=request_id,
  142. object=chunk_object_type,
  143. created=created_time,
  144. choices=[choice_data],
  145. logprobs=None,
  146. model=model_name)
  147. data = chunk.model_dump_json(
  148. exclude_unset=True)
  149. yield f"data: {data}\n\n"
  150. first_iteration = False
  151. for output in res.outputs:
  152. i = output.index
  153. if finish_reason_sent[i]:
  154. continue
  155. delta_token_ids = output.token_ids[previous_num_tokens[i]:]
  156. top_logprobs = output.logprobs[
  157. previous_num_tokens[i]:] if output.logprobs else None
  158. if request.logprobs:
  159. logprobs = self._create_logprobs(
  160. token_ids=delta_token_ids,
  161. top_logprobs=top_logprobs,
  162. num_output_top_logprobs=request.logprobs,
  163. initial_text_offset=len(previous_texts[i]),
  164. )
  165. else:
  166. logprobs = None
  167. delta_text = output.text[len(previous_texts[i]):]
  168. previous_texts[i] = output.text
  169. previous_num_tokens[i] = len(output.token_ids)
  170. if output.finish_reason is None:
  171. # Send token-by-token response for each request.n
  172. choice_data = ChatCompletionResponseStreamChoice(
  173. index=i,
  174. delta=DeltaMessage(content=delta_text),
  175. logprobs=logprobs,
  176. finish_reason=None)
  177. chunk = ChatCompletionStreamResponse(
  178. id=request_id,
  179. object=chunk_object_type,
  180. created=created_time,
  181. choices=[choice_data],
  182. model=model_name)
  183. data = chunk.model_dump_json(exclude_unset=True)
  184. yield f"data: {data}\n\n"
  185. else:
  186. # Send the finish response for each request.n only once
  187. prompt_tokens = len(res.prompt_token_ids)
  188. final_usage = UsageInfo(
  189. prompt_tokens=prompt_tokens,
  190. completion_tokens=previous_num_tokens[i],
  191. total_tokens=prompt_tokens +
  192. previous_num_tokens[i],
  193. )
  194. choice_data = ChatCompletionResponseStreamChoice(
  195. index=i,
  196. delta=DeltaMessage(content=delta_text),
  197. logprobs=logprobs,
  198. finish_reason=output.finish_reason)
  199. chunk = ChatCompletionStreamResponse(
  200. id=request_id,
  201. object=chunk_object_type,
  202. created=created_time,
  203. choices=[choice_data],
  204. model=model_name)
  205. if final_usage is not None:
  206. chunk.usage = final_usage
  207. data = chunk.model_dump_json(exclude_unset=True,
  208. exclude_none=True)
  209. yield f"data: {data}\n\n"
  210. finish_reason_sent[i] = True
  211. except ValueError as e:
  212. # TODO: Use an aphrodite-specific Validation Error
  213. data = self.create_streaming_error_response(str(e))
  214. yield f"data: {data}\n\n"
  215. # Send the final done message after all response.n are finished
  216. yield "data: [DONE]\n\n"
  217. async def chat_completion_full_generator(
  218. self, request: ChatCompletionRequest, raw_request: Request,
  219. result_generator: AsyncIterator[RequestOutput],
  220. request_id: str) -> Union[ErrorResponse, ChatCompletionResponse]:
  221. model_name = request.model
  222. created_time = int(time.monotonic())
  223. final_res: RequestOutput = None
  224. async for res in result_generator:
  225. if await raw_request.is_disconnected():
  226. # Abort the request if the client disconnects.
  227. await self.engine.abort(request_id)
  228. return self.create_error_response("Client disconnected")
  229. final_res = res
  230. assert final_res is not None
  231. choices = []
  232. role = self.get_chat_request_role(request)
  233. for output in final_res.outputs:
  234. token_ids = output.token_ids
  235. top_logprobs = output.logprobs
  236. if request.logprobs:
  237. logprobs = self._create_logprobs(
  238. token_ids=token_ids,
  239. top_logprobs=top_logprobs,
  240. num_output_top_logprobs=request.logprobs,
  241. )
  242. else:
  243. logprobs = None
  244. choice_data = ChatCompletionResponseChoice(
  245. index=output.index,
  246. message=ChatMessage(role=role, content=output.text),
  247. logprobs=logprobs,
  248. finish_reason=output.finish_reason,
  249. )
  250. choices.append(choice_data)
  251. if request.echo:
  252. last_msg_content = ""
  253. if request.messages and isinstance(
  254. request.messages, list) and request.messages[-1].get(
  255. "content") and request.messages[-1].get(
  256. "role") == role:
  257. last_msg_content = request.messages[-1]["content"]
  258. for choice in choices:
  259. full_message = last_msg_content + choice.message.content
  260. choice.message.content = full_message
  261. num_prompt_tokens = len(final_res.prompt_token_ids)
  262. num_generated_tokens = sum(
  263. len(output.token_ids) for output in final_res.outputs)
  264. usage = UsageInfo(
  265. prompt_tokens=num_prompt_tokens,
  266. completion_tokens=num_generated_tokens,
  267. total_tokens=num_prompt_tokens + num_generated_tokens,
  268. )
  269. response = ChatCompletionResponse(
  270. id=request_id,
  271. created=created_time,
  272. model=model_name,
  273. choices=choices,
  274. usage=usage,
  275. )
  276. return response
  277. def _load_chat_template(self, chat_template):
  278. if chat_template is not None:
  279. try:
  280. with open(chat_template, "r") as f:
  281. self.tokenizer.chat_template = f.read()
  282. except OSError:
  283. # If opening a file fails, set chat template to be args to
  284. # ensure we decode so our escape are interpreted correctly
  285. self.tokenizer.chat_template = codecs.decode(
  286. chat_template, "unicode_escape")
  287. logger.info("Using the supplied chat template.")
  288. elif self.tokenizer.chat_template is not None:
  289. logger.info("Using the default chat template")
  290. else:
  291. logger.warning(
  292. "No chat template provided. Chat API will not work.")