gpu_executor.py 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. from typing import Any, Dict, List, Optional, Set, Tuple, Union
  2. from loguru import logger
  3. from aphrodite.common.sequence import (ExecuteModelRequest, PoolerOutput,
  4. SamplerOutput)
  5. from aphrodite.common.utils import (get_distributed_init_method, get_ip,
  6. get_open_port, make_async)
  7. from aphrodite.control_vectors.request import ControlVectorRequest
  8. from aphrodite.executor.executor_base import ExecutorAsyncBase, ExecutorBase
  9. from aphrodite.lora.request import LoRARequest
  10. from aphrodite.prompt_adapter.request import PromptAdapterRequest
  11. from aphrodite.task_handler.worker_base import WorkerWrapperBase
  12. def create_worker(worker_module_name, worker_class_name, **kwargs):
  13. wrapper = WorkerWrapperBase(
  14. worker_module_name=worker_module_name,
  15. worker_class_name=worker_class_name,
  16. )
  17. wrapper.init_worker(**kwargs)
  18. return wrapper.worker
  19. class GPUExecutor(ExecutorBase):
  20. uses_ray: bool = False
  21. def _init_executor(self) -> None:
  22. """Initialize the worker and load the model.
  23. """
  24. assert self.parallel_config.world_size == 1, (
  25. "GPUExecutor only supports single GPU.")
  26. self.driver_worker = self._create_worker()
  27. self.driver_worker.init_device()
  28. self.driver_worker.load_model()
  29. def _get_worker_kwargs(
  30. self,
  31. local_rank: int = 0,
  32. rank: int = 0,
  33. distributed_init_method: Optional[str] = None) -> Dict[str, Any]:
  34. """Return worker init args for a given rank."""
  35. if distributed_init_method is None:
  36. distributed_init_method = get_distributed_init_method(
  37. get_ip(), get_open_port())
  38. return dict(
  39. model_config=self.model_config,
  40. parallel_config=self.parallel_config,
  41. scheduler_config=self.scheduler_config,
  42. device_config=self.device_config,
  43. cache_config=self.cache_config,
  44. load_config=self.load_config,
  45. local_rank=local_rank,
  46. rank=rank,
  47. distributed_init_method=distributed_init_method,
  48. lora_config=self.lora_config,
  49. multimodal_config=self.multimodal_config,
  50. speculative_config=self.speculative_config,
  51. prompt_adapter_config=self.prompt_adapter_config,
  52. control_vector_config=self.control_vector_config,
  53. is_driver_worker=(not self.parallel_config)
  54. or (rank % self.parallel_config.tensor_parallel_size == 0),
  55. )
  56. def _get_create_worker_kwargs(
  57. self,
  58. local_rank: int = 0,
  59. rank: int = 0,
  60. distributed_init_method: Optional[str] = None) -> Dict:
  61. worker_kwargs = self._get_worker_kwargs(local_rank, rank,
  62. distributed_init_method)
  63. if self.speculative_config is None:
  64. worker_kwargs.update(
  65. worker_module_name="aphrodite.task_handler.worker",
  66. worker_class_name="Worker")
  67. else:
  68. worker_kwargs.update(
  69. worker_module_name="aphrodite.spec_decode.spec_decode_worker",
  70. worker_class_name="create_spec_worker")
  71. return worker_kwargs
  72. def _create_worker(self,
  73. local_rank: int = 0,
  74. rank: int = 0,
  75. distributed_init_method: Optional[str] = None):
  76. return create_worker(**self._get_create_worker_kwargs(
  77. local_rank=local_rank,
  78. rank=rank,
  79. distributed_init_method=distributed_init_method))
  80. def determine_num_available_blocks(self) -> Tuple[int, int]:
  81. """Determine the number of available KV blocks by invoking the
  82. underlying worker.
  83. """
  84. return self.driver_worker.determine_num_available_blocks()
  85. def initialize_cache(self, num_gpu_blocks: int, num_cpu_blocks) -> None:
  86. """Initialize the KV cache by invoking the underlying worker.
  87. """
  88. # NOTE: This is logged in the executor because there can be >1 worker
  89. # with other executors. We could log in the engine level, but work
  90. # remains to abstract away the device for non-GPU configurations.
  91. logger.info(f"# GPU blocks: {num_gpu_blocks}, "
  92. f"# CPU blocks: {num_cpu_blocks}")
  93. logger.info(
  94. f"Minimum concurrency: {num_gpu_blocks * self.cache_config.block_size / self.scheduler_config.max_model_len:.2f}x" # noqa: E501
  95. )
  96. self.driver_worker.initialize_cache(num_gpu_blocks, num_cpu_blocks)
  97. def execute_model(
  98. self, execute_model_req: ExecuteModelRequest
  99. ) -> Optional[List[Union[SamplerOutput, PoolerOutput]]]:
  100. output = self.driver_worker.execute_model(execute_model_req)
  101. return output
  102. def add_lora(self, lora_request: LoRARequest) -> bool:
  103. assert lora_request.lora_int_id > 0, "lora_id must be greater than 0."
  104. return self.driver_worker.add_lora(lora_request)
  105. def remove_lora(self, lora_id: int) -> bool:
  106. assert lora_id > 0, "lora_id must be greater than 0."
  107. return self.driver_worker.remove_lora(lora_id)
  108. def list_loras(self) -> Set[int]:
  109. return self.driver_worker.list_loras()
  110. def pin_lora(self, lora_id: int) -> bool:
  111. assert lora_id > 0, "lora_id must be greater than 0."
  112. return self.driver_worker.pin_lora(lora_id)
  113. def add_prompt_adapter(
  114. self, prompt_adapter_request: PromptAdapterRequest) -> bool:
  115. assert prompt_adapter_request.prompt_adapter_id > 0, \
  116. "prompt_adapter_id must be greater than 0."
  117. return self.driver_worker.add_prompt_adapter(prompt_adapter_request)
  118. def remove_prompt_adapter(self, prompt_adapter_id: int) -> bool:
  119. assert prompt_adapter_id > 0, \
  120. "prompt_adapter_id must be greater than 0."
  121. return self.driver_worker.remove_prompt_adapter(prompt_adapter_id)
  122. def pin_prompt_adapter(self, prompt_adapter_id: int) -> bool:
  123. assert prompt_adapter_id > 0, \
  124. "prompt_adapter_id must be greater than 0."
  125. return self.driver_worker.pin_prompt_adapter(prompt_adapter_id)
  126. def list_prompt_adapters(self) -> Set[int]:
  127. return self.driver_worker.list_prompt_adapters()
  128. def add_control_vector(
  129. self, control_vector_request: ControlVectorRequest) -> bool:
  130. assert control_vector_request.adapter_id > 0
  131. return self.driver_worker.add_control_vector(control_vector_request)
  132. def remove_control_vector(self, cv_id: int) -> bool:
  133. return self.driver_worker.add_control_vector(cv_id)
  134. def check_health(self) -> None:
  135. # GPUExecutor will always be healthy as long as
  136. # it's running.
  137. return
  138. class GPUExecutorAsync(GPUExecutor, ExecutorAsyncBase):
  139. async def execute_model_async(
  140. self,
  141. execute_model_req: ExecuteModelRequest,
  142. ) -> List[Union[SamplerOutput, PoolerOutput]]:
  143. output = await make_async(self.driver_worker.execute_model
  144. )(execute_model_req=execute_model_req, )
  145. return output