serving_embedding.py 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. import asyncio
  2. import base64
  3. import time
  4. from typing import AsyncGenerator, AsyncIterator, List, Optional, Tuple, cast
  5. import numpy as np
  6. from fastapi import Request
  7. from loguru import logger
  8. from aphrodite.common.config import ModelConfig
  9. from aphrodite.common.outputs import EmbeddingRequestOutput
  10. from aphrodite.common.utils import merge_async_iterators, random_uuid
  11. from aphrodite.endpoints.logger import RequestLogger
  12. from aphrodite.endpoints.openai.protocol import (EmbeddingRequest,
  13. EmbeddingResponse,
  14. EmbeddingResponseData,
  15. UsageInfo)
  16. from aphrodite.endpoints.openai.serving_engine import OpenAIServing
  17. from aphrodite.engine.protocol import AsyncEngineClient
  18. TypeTokenIDs = List[int]
  19. def request_output_to_embedding_response(
  20. final_res_batch: List[EmbeddingRequestOutput], request_id: str,
  21. created_time: int, model_name: str,
  22. encoding_format: str) -> EmbeddingResponse:
  23. data: List[EmbeddingResponseData] = []
  24. num_prompt_tokens = 0
  25. for idx, final_res in enumerate(final_res_batch):
  26. prompt_token_ids = final_res.prompt_token_ids
  27. embedding = final_res.outputs.embedding
  28. if encoding_format == "base64":
  29. embedding_bytes = np.array(embedding).tobytes()
  30. embedding = base64.b64encode(embedding_bytes).decode("utf-8")
  31. embedding_data = EmbeddingResponseData(index=idx, embedding=embedding)
  32. data.append(embedding_data)
  33. num_prompt_tokens += len(prompt_token_ids)
  34. usage = UsageInfo(
  35. prompt_tokens=num_prompt_tokens,
  36. total_tokens=num_prompt_tokens,
  37. )
  38. return EmbeddingResponse(
  39. id=request_id,
  40. created=created_time,
  41. model=model_name,
  42. data=data,
  43. usage=usage,
  44. )
  45. class OpenAIServingEmbedding(OpenAIServing):
  46. def __init__(
  47. self,
  48. async_engine_client: AsyncEngineClient,
  49. model_config: ModelConfig,
  50. served_model_names: List[str],
  51. *,
  52. request_logger: Optional[RequestLogger],
  53. ):
  54. super().__init__(async_engine_client=async_engine_client,
  55. model_config=model_config,
  56. served_model_names=served_model_names,
  57. lora_modules=None,
  58. prompt_adapters=None,
  59. request_logger=request_logger)
  60. self._check_embedding_mode(model_config.embedding_mode)
  61. async def create_embedding(self, request: EmbeddingRequest,
  62. raw_request: Request):
  63. """Completion API similar to OpenAI's API.
  64. See https://platform.openai.com/docs/api-reference/embeddings/create
  65. for the API specification. This API mimics the OpenAI Embedding API.
  66. """
  67. error_check_ret = await self._check_model(request)
  68. if error_check_ret is not None:
  69. return error_check_ret
  70. encoding_format = (request.encoding_format
  71. if request.encoding_format else "float")
  72. if request.dimensions is not None:
  73. return self.create_error_response(
  74. "dimensions is currently not supported")
  75. model_name = request.model
  76. request_id = f"embd-{random_uuid()}"
  77. created_time = int(time.monotonic())
  78. # Schedule the request and get the result generator.
  79. generators: List[AsyncGenerator[EmbeddingRequestOutput, None]] = []
  80. try:
  81. (
  82. lora_request,
  83. prompt_adapter_request,
  84. ) = self._maybe_get_adapters(request)
  85. tokenizer = await self.async_engine_client.get_tokenizer(
  86. lora_request)
  87. pooling_params = request.to_pooling_params()
  88. prompts = list(
  89. self._tokenize_prompt_input_or_inputs(
  90. request,
  91. tokenizer,
  92. request.input,
  93. ))
  94. for i, prompt_inputs in enumerate(prompts):
  95. request_id_item = f"{request_id}-{i}"
  96. self._log_inputs(request_id_item,
  97. prompt_inputs,
  98. params=pooling_params,
  99. lora_request=lora_request,
  100. prompt_adapter_request=prompt_adapter_request)
  101. if prompt_adapter_request is not None:
  102. raise NotImplementedError(
  103. "Prompt adapter is not supported "
  104. "for embedding models")
  105. generator = self.async_engine_client.encode(
  106. {"prompt_token_ids": prompt_inputs["prompt_token_ids"]},
  107. pooling_params,
  108. request_id_item,
  109. lora_request=lora_request,
  110. )
  111. generators.append(generator)
  112. except ValueError as e:
  113. # TODO: Use an aphrodite-specific Validation Error
  114. return self.create_error_response(str(e))
  115. result_generator: AsyncIterator[Tuple[
  116. int, EmbeddingRequestOutput]] = merge_async_iterators(
  117. *generators, is_cancelled=raw_request.is_disconnected)
  118. # Non-streaming response
  119. final_res_batch: List[Optional[EmbeddingRequestOutput]]
  120. final_res_batch = [None] * len(prompts)
  121. try:
  122. async for i, res in result_generator:
  123. final_res_batch[i] = res
  124. for final_res in final_res_batch:
  125. assert final_res is not None
  126. final_res_batch_checked = cast(List[EmbeddingRequestOutput],
  127. final_res_batch)
  128. response = request_output_to_embedding_response(
  129. final_res_batch_checked, request_id, created_time, model_name,
  130. encoding_format)
  131. except asyncio.CancelledError:
  132. return self.create_error_response("Client disconnected")
  133. except ValueError as e:
  134. # TODO: Use an aphrodite-specific Validation Error
  135. return self.create_error_response(str(e))
  136. return response
  137. def _check_embedding_mode(self, embedding_mode: bool):
  138. if not embedding_mode:
  139. logger.warning(
  140. "embedding_mode is False. Embedding API will not work.")
  141. else:
  142. logger.info("Activating the server engine with embedding enabled.")