tpu_executor.py 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. from typing import Any, Dict, List, Optional, Set, Tuple
  2. import torch
  3. from loguru import logger
  4. from aphrodite.common.sequence import ExecuteModelRequest, SamplerOutput
  5. from aphrodite.common.utils import (get_distributed_init_method, get_ip,
  6. get_open_port, make_async)
  7. from aphrodite.executor.executor_base import ExecutorAsyncBase, ExecutorBase
  8. from aphrodite.lora.request import LoRARequest
  9. class TPUExecutor(ExecutorBase):
  10. def _init_executor(self) -> None:
  11. assert not self.scheduler_config.chunked_prefill_enabled, (
  12. "Chunked prefill is not yet supported for TPU backend")
  13. assert not self.speculative_config, (
  14. "Speculative decoding is not yet supported for TPU backend")
  15. if self.model_config.dtype in (torch.float16, torch.float32):
  16. logger.warning("The TPU backend currently does not support "
  17. f"{self.model_config.dtype}. "
  18. "Using bfloat16 instead.")
  19. self.model_config.dtype = torch.bfloat16
  20. # Instantiate the worker and load the model to the device.
  21. self.driver_worker = self._create_worker()
  22. self.driver_worker.init_device()
  23. self.driver_worker.load_model()
  24. def _get_worker_kwargs(
  25. self,
  26. local_rank: int = 0,
  27. rank: int = 0,
  28. distributed_init_method: Optional[str] = None,
  29. ) -> Dict[str, Any]:
  30. """Return worker init args for a given rank."""
  31. if distributed_init_method is None:
  32. distributed_init_method = get_distributed_init_method(
  33. get_ip(), get_open_port())
  34. return dict(
  35. model_config=self.model_config,
  36. parallel_config=self.parallel_config,
  37. scheduler_config=self.scheduler_config,
  38. device_config=self.device_config,
  39. cache_config=self.cache_config,
  40. load_config=self.load_config,
  41. local_rank=local_rank,
  42. rank=rank,
  43. distributed_init_method=distributed_init_method,
  44. vision_language_config=self.vision_language_config,
  45. is_driver_worker=rank == 0,
  46. )
  47. def _create_worker(
  48. self,
  49. local_rank: int = 0,
  50. rank: int = 0,
  51. distributed_init_method: Optional[str] = None,
  52. ):
  53. from aphrodite.task_handler.tpu_worker import TPUWorker
  54. worker = TPUWorker(**self._get_worker_kwargs(local_rank, rank,
  55. distributed_init_method))
  56. return worker
  57. def initialize_cache(
  58. self,
  59. num_gpu_blocks: int,
  60. num_cpu_blocks: int,
  61. ) -> None:
  62. """Initialize the KV cache by invoking the underlying worker."""
  63. # NOTE: This is logged in the executor because there can be >1 worker
  64. # with other executors. We could log in the engine level, but work
  65. # remains to abstract away the device for non-GPU configurations.
  66. logger.info(f"# TPU blocks: {num_gpu_blocks}, "
  67. f"# CPU blocks: {num_cpu_blocks}")
  68. logger.info(
  69. f"Minimum concurrency: {num_gpu_blocks * self.cache_config.block_size / self.scheduler_config.max_model_len:.2f}x" # noqa: E501
  70. )
  71. self.driver_worker.initialize_cache(num_gpu_blocks, num_cpu_blocks)
  72. def determine_num_available_blocks(self) -> Tuple[int, int]:
  73. """Determine the number of available KV blocks by invoking the
  74. underlying worker.
  75. """
  76. return self.driver_worker.determine_num_available_blocks()
  77. def execute_model(
  78. self,
  79. execute_model_req: ExecuteModelRequest,
  80. ) -> List[SamplerOutput]:
  81. output = self.driver_worker.execute_model(execute_model_req)
  82. return output
  83. def add_lora(self, lora_request: LoRARequest) -> bool:
  84. raise NotImplementedError("LoRA is not implemented for TPU backend.")
  85. def remove_lora(self, lora_id: int) -> bool:
  86. raise NotImplementedError("LoRA is not implemented for TPU backend.")
  87. def pin_lora(self, lora_id: int) -> bool:
  88. raise NotImplementedError("LoRA is not implemented for TPU backend.")
  89. def list_loras(self) -> Set[int]:
  90. raise NotImplementedError("LoRA is not implemented for TPU backend.")
  91. def check_health(self) -> None:
  92. # TPUExecutor will always be healthy as long as it's running.
  93. return
  94. class TPUExecutorAsync(TPUExecutor, ExecutorAsyncBase):
  95. async def execute_model_async(
  96. self,
  97. sexecute_model_req: ExecuteModelRequest,
  98. ) -> SamplerOutput:
  99. output = await make_async(self.driver_worker.execute_model
  100. )(sexecute_model_req)
  101. return output