gpu_executor.py 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. from typing import Any, Dict, List, Optional, Set, Tuple, Union
  2. from loguru import logger
  3. from aphrodite.common.sequence import (ExecuteModelRequest, PoolerOutput,
  4. SamplerOutput)
  5. from aphrodite.common.utils import (get_distributed_init_method, get_ip,
  6. get_open_port, make_async)
  7. from aphrodite.executor.executor_base import ExecutorAsyncBase, ExecutorBase
  8. from aphrodite.lora.request import LoRARequest
  9. from aphrodite.task_handler.worker_base import WorkerWrapperBase
  10. class GPUExecutor(ExecutorBase):
  11. def _init_executor(self) -> None:
  12. """Initialize the worker and load the model.
  13. """
  14. assert self.parallel_config.world_size == 1, (
  15. "GPUExecutor only supports single GPU.")
  16. self.driver_worker = self._create_worker()
  17. self.driver_worker.init_device()
  18. self.driver_worker.load_model()
  19. def _get_worker_kwargs(
  20. self,
  21. local_rank: int = 0,
  22. rank: int = 0,
  23. distributed_init_method: Optional[str] = None) -> Dict[str, Any]:
  24. """Return worker init args for a given rank."""
  25. if distributed_init_method is None:
  26. distributed_init_method = get_distributed_init_method(
  27. get_ip(), get_open_port())
  28. return dict(
  29. model_config=self.model_config,
  30. parallel_config=self.parallel_config,
  31. scheduler_config=self.scheduler_config,
  32. device_config=self.device_config,
  33. cache_config=self.cache_config,
  34. load_config=self.load_config,
  35. local_rank=local_rank,
  36. rank=rank,
  37. distributed_init_method=distributed_init_method,
  38. lora_config=self.lora_config,
  39. vision_language_config=self.vision_language_config,
  40. speculative_config=self.speculative_config,
  41. is_driver_worker=(not self.parallel_config)
  42. or (rank % self.parallel_config.tensor_parallel_size == 0),
  43. )
  44. def _create_worker(self,
  45. local_rank: int = 0,
  46. rank: int = 0,
  47. distributed_init_method: Optional[str] = None):
  48. if self.speculative_config is None:
  49. worker_module_name = "aphrodite.task_handler.worker"
  50. worker_class_name = "Worker"
  51. else:
  52. worker_module_name = "aphrodite.spec_decode.spec_decode_worker"
  53. worker_class_name = "create_spec_worker"
  54. wrapper = WorkerWrapperBase(
  55. worker_module_name=worker_module_name,
  56. worker_class_name=worker_class_name,
  57. )
  58. wrapper.init_worker(**self._get_worker_kwargs(local_rank, rank,
  59. distributed_init_method))
  60. return wrapper.worker
  61. def determine_num_available_blocks(self) -> Tuple[int, int]:
  62. """Determine the number of available KV blocks by invoking the
  63. underlying worker.
  64. """
  65. return self.driver_worker.determine_num_available_blocks()
  66. def initialize_cache(self, num_gpu_blocks: int, num_cpu_blocks) -> None:
  67. """Initialize the KV cache by invoking the underlying worker.
  68. """
  69. # NOTE: This is logged in the executor because there can be >1 worker
  70. # with other executors. We could log in the engine level, but work
  71. # remains to abstract away the device for non-GPU configurations.
  72. logger.info(f"# GPU blocks: {num_gpu_blocks}, "
  73. f"# CPU blocks: {num_cpu_blocks}")
  74. logger.info(
  75. f"Minimum concurrency: {num_gpu_blocks * self.cache_config.block_size / self.scheduler_config.max_model_len:.2f}x" # noqa: E501
  76. )
  77. self.driver_worker.initialize_cache(num_gpu_blocks, num_cpu_blocks)
  78. def execute_model(
  79. self, execute_model_req: ExecuteModelRequest
  80. ) -> Optional[List[Union[SamplerOutput, PoolerOutput]]]:
  81. output = self.driver_worker.execute_model(execute_model_req)
  82. return output
  83. def add_lora(self, lora_request: LoRARequest) -> bool:
  84. assert lora_request.lora_int_id > 0, "lora_id must be greater than 0."
  85. return self.driver_worker.add_lora(lora_request)
  86. def remove_lora(self, lora_id: int) -> bool:
  87. assert lora_id > 0, "lora_id must be greater than 0."
  88. return self.driver_worker.remove_lora(lora_id)
  89. def list_loras(self) -> Set[int]:
  90. return self.driver_worker.list_loras()
  91. def pin_lora(self, lora_id: int) -> bool:
  92. assert lora_id > 0, "lora_id must be greater than 0."
  93. return self.driver_worker.pin_lora(lora_id)
  94. def check_health(self) -> None:
  95. # GPUExecutor will always be healthy as long as
  96. # it's running.
  97. return
  98. class GPUExecutorAsync(GPUExecutor, ExecutorAsyncBase):
  99. async def execute_model_async(
  100. self,
  101. execute_model_req: ExecuteModelRequest,
  102. ) -> List[Union[SamplerOutput, PoolerOutput]]:
  103. output = await make_async(self.driver_worker.execute_model
  104. )(execute_model_req=execute_model_req, )
  105. return output