1
0

serving_embedding.py 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. import asyncio
  2. import base64
  3. import time
  4. from typing import (AsyncGenerator, AsyncIterator, List, Optional, Tuple,
  5. Union, cast)
  6. import numpy as np
  7. from fastapi import Request
  8. from loguru import logger
  9. from aphrodite.common.config import ModelConfig
  10. from aphrodite.common.outputs import EmbeddingRequestOutput
  11. from aphrodite.common.utils import merge_async_iterators, random_uuid
  12. from aphrodite.endpoints.logger import RequestLogger
  13. from aphrodite.endpoints.openai.protocol import (EmbeddingRequest,
  14. EmbeddingResponse,
  15. EmbeddingResponseData,
  16. ErrorResponse, UsageInfo)
  17. from aphrodite.endpoints.openai.serving_engine import OpenAIServing
  18. from aphrodite.engine.protocol import AsyncEngineClient
  19. TypeTokenIDs = List[int]
  20. def request_output_to_embedding_response(
  21. final_res_batch: List[EmbeddingRequestOutput], request_id: str,
  22. created_time: int, model_name: str,
  23. encoding_format: str) -> EmbeddingResponse:
  24. data: List[EmbeddingResponseData] = []
  25. num_prompt_tokens = 0
  26. for idx, final_res in enumerate(final_res_batch):
  27. prompt_token_ids = final_res.prompt_token_ids
  28. embedding = final_res.outputs.embedding
  29. if encoding_format == "base64":
  30. embedding_bytes = np.array(embedding).tobytes()
  31. embedding = base64.b64encode(embedding_bytes).decode("utf-8")
  32. embedding_data = EmbeddingResponseData(index=idx, embedding=embedding)
  33. data.append(embedding_data)
  34. num_prompt_tokens += len(prompt_token_ids)
  35. usage = UsageInfo(
  36. prompt_tokens=num_prompt_tokens,
  37. total_tokens=num_prompt_tokens,
  38. )
  39. return EmbeddingResponse(
  40. id=request_id,
  41. created=created_time,
  42. model=model_name,
  43. data=data,
  44. usage=usage,
  45. )
  46. class OpenAIServingEmbedding(OpenAIServing):
  47. def __init__(
  48. self,
  49. async_engine_client: AsyncEngineClient,
  50. model_config: ModelConfig,
  51. served_model_names: List[str],
  52. *,
  53. request_logger: Optional[RequestLogger],
  54. ):
  55. super().__init__(async_engine_client=async_engine_client,
  56. model_config=model_config,
  57. served_model_names=served_model_names,
  58. lora_modules=None,
  59. prompt_adapters=None,
  60. request_logger=request_logger)
  61. self._enabled = self._check_embedding_mode(model_config.embedding_mode)
  62. async def create_embedding(
  63. self,
  64. request: EmbeddingRequest,
  65. raw_request: Optional[Request] = None
  66. ) -> Union[ErrorResponse, EmbeddingResponse]:
  67. """Completion API similar to OpenAI's API.
  68. See https://platform.openai.com/docs/api-reference/embeddings/create
  69. for the API specification. This API mimics the OpenAI Embedding API.
  70. """
  71. if not self._enabled:
  72. return self.create_error_response("Embedding API disabled")
  73. error_check_ret = await self._check_model(request)
  74. if error_check_ret is not None:
  75. return error_check_ret
  76. encoding_format = (request.encoding_format
  77. if request.encoding_format else "float")
  78. if request.dimensions is not None:
  79. return self.create_error_response(
  80. "dimensions is currently not supported")
  81. model_name = request.model
  82. request_id = f"embd-{random_uuid()}"
  83. created_time = int(time.monotonic())
  84. # Schedule the request and get the result generator.
  85. generators: List[AsyncGenerator[EmbeddingRequestOutput, None]] = []
  86. try:
  87. (
  88. lora_request,
  89. prompt_adapter_request,
  90. ) = self._maybe_get_adapters(request)
  91. tokenizer = await self.async_engine_client.get_tokenizer(
  92. lora_request)
  93. pooling_params = request.to_pooling_params()
  94. prompts = list(
  95. self._tokenize_prompt_input_or_inputs(
  96. request,
  97. tokenizer,
  98. request.input,
  99. ))
  100. for i, prompt_inputs in enumerate(prompts):
  101. request_id_item = f"{request_id}-{i}"
  102. self._log_inputs(request_id_item,
  103. prompt_inputs,
  104. params=pooling_params,
  105. lora_request=lora_request,
  106. prompt_adapter_request=prompt_adapter_request)
  107. if prompt_adapter_request is not None:
  108. raise NotImplementedError(
  109. "Prompt adapter is not supported "
  110. "for embedding models")
  111. generator = self.async_engine_client.encode(
  112. {"prompt_token_ids": prompt_inputs["prompt_token_ids"]},
  113. pooling_params,
  114. request_id_item,
  115. lora_request=lora_request,
  116. )
  117. generators.append(generator)
  118. except ValueError as e:
  119. # TODO: Use an aphrodite-specific Validation Error
  120. return self.create_error_response(str(e))
  121. result_generator: AsyncIterator[Tuple[
  122. int, EmbeddingRequestOutput]] = merge_async_iterators(
  123. *generators,
  124. is_cancelled=raw_request.is_disconnected
  125. if raw_request else None)
  126. # Non-streaming response
  127. final_res_batch: List[Optional[EmbeddingRequestOutput]]
  128. final_res_batch = [None] * len(prompts)
  129. try:
  130. async for i, res in result_generator:
  131. final_res_batch[i] = res
  132. for final_res in final_res_batch:
  133. assert final_res is not None
  134. final_res_batch_checked = cast(List[EmbeddingRequestOutput],
  135. final_res_batch)
  136. response = request_output_to_embedding_response(
  137. final_res_batch_checked, request_id, created_time, model_name,
  138. encoding_format)
  139. except asyncio.CancelledError:
  140. return self.create_error_response("Client disconnected")
  141. except ValueError as e:
  142. # TODO: Use an aphrodite-specific Validation Error
  143. return self.create_error_response(str(e))
  144. return response
  145. def _check_embedding_mode(self, embedding_mode: bool):
  146. if not embedding_mode:
  147. logger.warning(
  148. "embedding_mode is False. Embedding API will not work.")
  149. else:
  150. logger.info("Activating the server engine with embedding enabled.")
  151. return embedding_mode