gpu_executor.py 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. from typing import Any, Dict, List, Optional, Set, Tuple
  2. from loguru import logger
  3. from aphrodite.common.sequence import SamplerOutput, SequenceGroupMetadata
  4. from aphrodite.common.utils import (get_distributed_init_method, get_ip,
  5. get_open_port, make_async)
  6. from aphrodite.task_handler.worker_base import WorkerWrapperBase
  7. from aphrodite.executor.executor_base import ExecutorAsyncBase, ExecutorBase
  8. from aphrodite.lora.request import LoRARequest
  9. class GPUExecutor(ExecutorBase):
  10. def _init_executor(self) -> None:
  11. """Initialize the worker and load the model.
  12. If speculative decoding is enabled, we instead create the speculative
  13. worker.
  14. """
  15. if self.speculative_config is None:
  16. self._init_non_spec_worker()
  17. else:
  18. self._init_spec_worker()
  19. def _get_worker_kwargs(
  20. self,
  21. local_rank: int = 0,
  22. rank: int = 0,
  23. distributed_init_method: Optional[str] = None) -> Dict[str, Any]:
  24. """Return worker init args for a given rank."""
  25. if distributed_init_method is None:
  26. distributed_init_method = get_distributed_init_method(
  27. get_ip(), get_open_port())
  28. return dict(
  29. model_config=self.model_config,
  30. parallel_config=self.parallel_config,
  31. scheduler_config=self.scheduler_config,
  32. device_config=self.device_config,
  33. cache_config=self.cache_config,
  34. load_config=self.load_config,
  35. local_rank=local_rank,
  36. rank=rank,
  37. distributed_init_method=distributed_init_method,
  38. lora_config=self.lora_config,
  39. vision_language_config=self.vision_language_config,
  40. is_driver_worker=rank == 0,
  41. )
  42. def _create_worker(self,
  43. local_rank: int = 0,
  44. rank: int = 0,
  45. distributed_init_method: Optional[str] = None):
  46. wrapper = WorkerWrapperBase(
  47. worker_module_name="vllm.worker.worker",
  48. worker_class_name="Worker",
  49. )
  50. wrapper.init_worker(**self._get_worker_kwargs(local_rank, rank,
  51. distributed_init_method))
  52. return wrapper.worker
  53. def _init_non_spec_worker(self):
  54. assert self.parallel_config.world_size == 1, (
  55. "GPUExecutor only supports single GPU.")
  56. self.driver_worker = self._create_worker()
  57. self.driver_worker.init_device()
  58. self.driver_worker.load_model()
  59. def _init_spec_worker(self):
  60. """Initialize a SpecDecodeWorker, using a draft model for proposals.
  61. """
  62. assert self.speculative_config is not None
  63. from aphrodite.spec_decode.multi_step_worker import MultiStepWorker
  64. from aphrodite.spec_decode.spec_decode_worker import SpecDecodeWorker
  65. target_worker = self._create_worker()
  66. draft_worker_kwargs = self._get_worker_kwargs()
  67. # Override draft-model specific worker args.
  68. draft_worker_kwargs.update(
  69. model_config=self.speculative_config.draft_model_config,
  70. parallel_config=self.speculative_config.draft_parallel_config,
  71. # TODO allow draft-model specific load config.
  72. #load_config=self.load_config,
  73. )
  74. draft_worker = MultiStepWorker(**draft_worker_kwargs)
  75. spec_decode_worker = SpecDecodeWorker.from_workers(
  76. proposer_worker=draft_worker, scorer_worker=target_worker)
  77. assert self.parallel_config.world_size == 1, (
  78. "GPUExecutor only supports single GPU.")
  79. self.driver_worker = spec_decode_worker
  80. # Load model handled in spec decode worker.
  81. self.driver_worker.init_device()
  82. def determine_num_available_blocks(self) -> Tuple[int, int]:
  83. """Determine the number of available KV blocks by invoking the
  84. underlying worker.
  85. """
  86. return self.driver_worker.determine_num_available_blocks()
  87. def initialize_cache(self, num_gpu_blocks: int, num_cpu_blocks) -> None:
  88. """Initialize the KV cache by invoking the underlying worker.
  89. """
  90. # NOTE: This is logged in the executor because there can be >1 worker
  91. # with other executors. We could log in the engine level, but work
  92. # remains to abstract away the device for non-GPU configurations.
  93. logger.info(f"# GPU blocks: {num_gpu_blocks}, "
  94. f"# CPU blocks: {num_cpu_blocks}")
  95. logger.info(
  96. f"Minimum concurrency: {num_gpu_blocks * self.cache_config.block_size / self.scheduler_config.max_model_len:.2f}x" # noqa: E501
  97. )
  98. self.driver_worker.initialize_cache(num_gpu_blocks, num_cpu_blocks)
  99. def execute_model(
  100. self,
  101. seq_group_metadata_list: List[SequenceGroupMetadata],
  102. blocks_to_swap_in: Dict[int, int],
  103. blocks_to_swap_out: Dict[int, int],
  104. blocks_to_copy: Dict[int, List[int]],
  105. num_lookahead_slots: int,
  106. ) -> List[SamplerOutput]:
  107. output = self.driver_worker.execute_model(
  108. seq_group_metadata_list=seq_group_metadata_list,
  109. blocks_to_swap_in=blocks_to_swap_in,
  110. blocks_to_swap_out=blocks_to_swap_out,
  111. blocks_to_copy=blocks_to_copy,
  112. num_lookahead_slots=num_lookahead_slots,
  113. )
  114. return output
  115. def add_lora(self, lora_request: LoRARequest) -> bool:
  116. assert lora_request.lora_int_id > 0, "lora_id must be greater than 0."
  117. return self.driver_worker.add_lora(lora_request)
  118. def remove_lora(self, lora_id: int) -> bool:
  119. assert lora_id > 0, "lora_id must be greater than 0."
  120. return self.driver_worker.remove_lora(lora_id)
  121. def list_loras(self) -> Set[int]:
  122. return self.driver_worker.list_loras()
  123. def check_health(self) -> None:
  124. # GPUExecutor will always be healthy as long as
  125. # it's running.
  126. return
  127. class GPUExecutorAsync(GPUExecutor, ExecutorAsyncBase):
  128. async def execute_model_async(
  129. self,
  130. seq_group_metadata_list: List[SequenceGroupMetadata],
  131. blocks_to_swap_in: Dict[int, int],
  132. blocks_to_swap_out: Dict[int, int],
  133. blocks_to_copy: Dict[int, List[int]],
  134. num_lookahead_slots: int,
  135. ) -> List[SamplerOutput]:
  136. output = await make_async(self.driver_worker.execute_model)(
  137. seq_group_metadata_list=seq_group_metadata_list,
  138. blocks_to_swap_in=blocks_to_swap_in,
  139. blocks_to_swap_out=blocks_to_swap_out,
  140. blocks_to_copy=blocks_to_copy,
  141. num_lookahead_slots=num_lookahead_slots)
  142. return output