1
0

executor_base.py 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. from abc import ABC, abstractmethod
  2. from typing import List, Optional, Set, Tuple
  3. from aphrodite.common.config import (CacheConfig, DeviceConfig, LoadConfig,
  4. LoRAConfig, ModelConfig, ParallelConfig,
  5. PromptAdapterConfig, SchedulerConfig,
  6. SpeculativeConfig)
  7. from aphrodite.common.sequence import ExecuteModelRequest, SamplerOutput
  8. from aphrodite.lora.request import LoRARequest
  9. from aphrodite.prompt_adapter.request import PromptAdapterRequest
  10. class ExecutorBase(ABC):
  11. """Base class for all executors.
  12. An executor is responsible for executing the model on a specific device
  13. type (e.g., CPU, GPU, Neuron, etc.). Or it can be a distributed executor
  14. that can execute the model on multiple devices.
  15. """
  16. uses_ray: bool # whether the executor uses Ray for orchestration.
  17. def __init__(
  18. self,
  19. model_config: ModelConfig,
  20. cache_config: CacheConfig,
  21. parallel_config: ParallelConfig,
  22. scheduler_config: SchedulerConfig,
  23. device_config: DeviceConfig,
  24. load_config: LoadConfig,
  25. lora_config: Optional[LoRAConfig],
  26. speculative_config: Optional[SpeculativeConfig],
  27. prompt_adapter_config: Optional[PromptAdapterConfig],
  28. ) -> None:
  29. self.model_config = model_config
  30. self.cache_config = cache_config
  31. self.lora_config = lora_config
  32. self.load_config = load_config
  33. self.parallel_config = parallel_config
  34. self.scheduler_config = scheduler_config
  35. self.device_config = device_config
  36. self.speculative_config = speculative_config
  37. self.prompt_adapter_config = prompt_adapter_config
  38. self._init_executor()
  39. @abstractmethod
  40. def _init_executor(self) -> None:
  41. pass
  42. @abstractmethod
  43. def determine_num_available_blocks(self) -> Tuple[int, int]:
  44. """Determine the number of available blocks for the GPU KV cache and
  45. swappable CPU KV cache.
  46. Normally, this should simply delegate to the underlying Worker. Some
  47. ExecutorBase may require modification of the result, e.g. to ensure the
  48. selected cache sizes are compatible with all workers.
  49. Returns a Tuple[num_gpu_blocks, num_cpu_blocks], where num_gpu_blocks
  50. are blocks that are "active" on the device and can be appended to.
  51. num_cpu_blocks refers to "swapped" blocks in CPU memory and cannot be
  52. appended to.
  53. """
  54. raise NotImplementedError
  55. @abstractmethod
  56. def initialize_cache(self, num_gpu_blocks: int,
  57. num_cpu_blocks: int) -> None:
  58. """Initialize the KV cache with the given size in blocks.
  59. """
  60. raise NotImplementedError
  61. @abstractmethod
  62. def execute_model(
  63. self, execute_model_req: ExecuteModelRequest
  64. ) -> Optional[List[SamplerOutput]]:
  65. """Executes at least one model step on the given sequences."""
  66. raise NotImplementedError
  67. def stop_remote_worker_execution_loop(self) -> None:
  68. """Releases parallel workers from model loop."""
  69. return
  70. @abstractmethod
  71. def add_lora(self, lora_request: LoRARequest) -> bool:
  72. raise NotImplementedError
  73. @abstractmethod
  74. def remove_lora(self, lora_id: int) -> bool:
  75. raise NotImplementedError
  76. @abstractmethod
  77. def list_loras(self) -> Set[int]:
  78. raise NotImplementedError
  79. @abstractmethod
  80. def pin_lora(self, lora_id: int) -> bool:
  81. raise NotImplementedError
  82. @abstractmethod
  83. def add_prompt_adapter(
  84. self, prompt_adapter_request: PromptAdapterRequest) -> bool:
  85. raise NotImplementedError
  86. @abstractmethod
  87. def remove_prompt_adapter(self, prompt_adapter_id: int) -> bool:
  88. raise NotImplementedError
  89. @abstractmethod
  90. def pin_prompt_adapter(self, prompt_adapter_id: int) -> bool:
  91. raise NotImplementedError # type: ignore
  92. @abstractmethod
  93. def list_prompt_adapters(self) -> Set[int]:
  94. raise NotImplementedError
  95. @abstractmethod
  96. def check_health(self) -> None:
  97. """Checks if the executor is healthy. If not, it should raise an
  98. exception."""
  99. raise NotImplementedError
  100. def shutdown(self) -> None:
  101. """Shutdown the executor."""
  102. return
  103. def __del__(self):
  104. self.shutdown()
  105. class ExecutorAsyncBase(ExecutorBase):
  106. @abstractmethod
  107. async def execute_model_async(
  108. self,
  109. execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]:
  110. """Executes one model step on the given sequences."""
  111. raise NotImplementedError
  112. async def stop_remote_worker_execution_loop_async(self) -> None:
  113. """Releases parallel workers from model loop."""
  114. return
  115. async def check_health_async(self) -> None:
  116. """Checks if the executor is healthy. If not, it should raise an
  117. exception."""
  118. self.check_health()