|
@@ -1,17 +1,28 @@
|
|
|
+import asyncio
|
|
|
from abc import abstractmethod
|
|
|
-from typing import Any, Dict, List, Optional, Set, Tuple
|
|
|
+from typing import Any, Awaitable, Dict, List, Optional, Set, Tuple, Union
|
|
|
|
|
|
from loguru import logger
|
|
|
|
|
|
+from aphrodite.common.sequence import ExecuteModelRequest, SamplerOutput
|
|
|
from aphrodite.executor.executor_base import ExecutorAsyncBase
|
|
|
from aphrodite.executor.gpu_executor import GPUExecutor
|
|
|
from aphrodite.lora.request import LoRARequest
|
|
|
-from aphrodite.common.sequence import SamplerOutput
|
|
|
|
|
|
|
|
|
class DistributedGPUExecutor(GPUExecutor):
|
|
|
"""Abstract superclass of multi-GPU executor implementations."""
|
|
|
|
|
|
+ def __init__(self, *args, **kwargs):
|
|
|
+ # This is non-None when the execute model loop is running
|
|
|
+ # in the parallel workers. It's a coroutine in the AsyncLLMEngine case.
|
|
|
+ self.parallel_worker_tasks: Optional[Union[Any, Awaitable[Any]]] = None
|
|
|
+ # Updated by implementations that require additional args to be passed
|
|
|
+ # to the _run_workers execute_model call
|
|
|
+ self.extra_execute_model_run_workers_kwargs: Dict[str, Any] = {}
|
|
|
+
|
|
|
+ super().__init__(*args, **kwargs)
|
|
|
+
|
|
|
def determine_num_available_blocks(self) -> Tuple[int, int]:
|
|
|
"""Determine the number of available KV blocks.
|
|
|
This invokes `determine_num_available_blocks` on each worker and takes
|
|
@@ -52,13 +63,28 @@ class DistributedGPUExecutor(GPUExecutor):
|
|
|
num_gpu_blocks=num_gpu_blocks,
|
|
|
num_cpu_blocks=num_cpu_blocks)
|
|
|
|
|
|
- def execute_model(self, *args, **kwargs) -> List[SamplerOutput]:
|
|
|
- all_outputs = self._run_workers("execute_model",
|
|
|
- driver_args=args,
|
|
|
- driver_kwargs=kwargs)
|
|
|
+ def execute_model(
|
|
|
+ self,
|
|
|
+ execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]:
|
|
|
+ if self.parallel_worker_tasks is None:
|
|
|
+ self.parallel_worker_tasks = self._run_workers(
|
|
|
+ "start_worker_execution_loop",
|
|
|
+ async_run_remote_workers_only=True,
|
|
|
+ **self.extra_execute_model_run_workers_kwargs)
|
|
|
|
|
|
# Only the driver worker returns the sampling results.
|
|
|
- return all_outputs[0]
|
|
|
+ return self._driver_execute_model(execute_model_req)
|
|
|
+
|
|
|
+ def stop_remote_worker_execution_loop(self) -> None:
|
|
|
+ if self.parallel_worker_tasks is None:
|
|
|
+ return
|
|
|
+
|
|
|
+ self._driver_execute_model()
|
|
|
+ parallel_worker_tasks = self.parallel_worker_tasks
|
|
|
+ self.parallel_worker_tasks = None
|
|
|
+ # Ensure that workers exit model loop cleanly
|
|
|
+ # (this will raise otherwise)
|
|
|
+ self._wait_for_tasks_completion(parallel_worker_tasks)
|
|
|
|
|
|
def add_lora(self, lora_request: LoRARequest) -> bool:
|
|
|
assert lora_request.lora_int_id > 0, "lora_id must be greater than 0."
|
|
@@ -88,39 +114,81 @@ class DistributedGPUExecutor(GPUExecutor):
|
|
|
pattern=pattern,
|
|
|
max_size=max_size)
|
|
|
|
|
|
+ @abstractmethod
|
|
|
+ def _driver_execute_model(
|
|
|
+ self,
|
|
|
+ execute_model_req: Optional[ExecuteModelRequest] = None
|
|
|
+ ) -> List[SamplerOutput]:
|
|
|
+ """Run execute_model in the driver worker.
|
|
|
+ Passing None will cause the driver to stop the model execution
|
|
|
+ loop running in each of the remote workers.
|
|
|
+ """
|
|
|
+ raise NotImplementedError
|
|
|
+
|
|
|
@abstractmethod
|
|
|
def _run_workers(
|
|
|
self,
|
|
|
method: str,
|
|
|
*args,
|
|
|
- driver_args: Optional[Tuple[Any, ...]] = None,
|
|
|
- driver_kwargs: Optional[Dict[str, Any]] = None,
|
|
|
+ async_run_remote_workers_only: bool = False,
|
|
|
max_concurrent_workers: Optional[int] = None,
|
|
|
**kwargs,
|
|
|
) -> Any:
|
|
|
- """Runs the given method on all workers."""
|
|
|
+ """Runs the given method on all workers.
|
|
|
+ Args:
|
|
|
+ async_run_remote_workers_only: If True the method will be run only
|
|
|
+ in the remote workers, not the driver worker. It will also be
|
|
|
+ run asynchronously and return a list of futures rather than
|
|
|
+ blocking on the results.
|
|
|
+ """
|
|
|
+ raise NotImplementedError
|
|
|
+
|
|
|
+ @abstractmethod
|
|
|
+ def _wait_for_tasks_completion(self, parallel_worker_tasks: Any) -> None:
|
|
|
+ """Wait for futures returned from _run_workers() with
|
|
|
+ async_run_remote_workers_only to complete."""
|
|
|
raise NotImplementedError
|
|
|
|
|
|
|
|
|
class DistributedGPUExecutorAsync(DistributedGPUExecutor, ExecutorAsyncBase):
|
|
|
|
|
|
+ async def execute_model_async(
|
|
|
+ self,
|
|
|
+ execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]:
|
|
|
+ if self.parallel_worker_tasks is None:
|
|
|
+ # Start model execution loop running in the parallel workers
|
|
|
+ self.parallel_worker_tasks = asyncio.create_task(
|
|
|
+ self._start_worker_execution_loop())
|
|
|
+
|
|
|
+ # Only the driver worker returns the sampling results.
|
|
|
+ return await self._driver_execute_model_async(execute_model_req)
|
|
|
+
|
|
|
+ async def stop_remote_worker_execution_loop_async(self) -> None:
|
|
|
+ if self.parallel_worker_tasks is None:
|
|
|
+ return
|
|
|
+
|
|
|
+ await self._driver_execute_model_async()
|
|
|
+ parallel_worker_tasks = self.parallel_worker_tasks
|
|
|
+ self.parallel_worker_tasks = None
|
|
|
+ # Ensure that workers exit model loop cleanly
|
|
|
+ # (this will raise otherwise)
|
|
|
+ await parallel_worker_tasks
|
|
|
+
|
|
|
@abstractmethod
|
|
|
- async def _run_workers_async(
|
|
|
+ async def _driver_execute_model_async(
|
|
|
self,
|
|
|
- method: str,
|
|
|
- *args,
|
|
|
- driver_args: Optional[Tuple[Any, ...]] = None,
|
|
|
- driver_kwargs: Optional[Dict[str, Any]] = None,
|
|
|
- **kwargs,
|
|
|
- ) -> Any:
|
|
|
- """Runs the given method on all workers."""
|
|
|
+ execute_model_req: Optional[ExecuteModelRequest] = None
|
|
|
+ ) -> List[SamplerOutput]:
|
|
|
+ """Execute the model asynchronously in the driver worker.
|
|
|
+ Passing None will cause the driver to stop the model execution
|
|
|
+ loop running in each of the remote workers.
|
|
|
+ """
|
|
|
raise NotImplementedError
|
|
|
|
|
|
- async def execute_model_async(self, *args,
|
|
|
- **kwargs) -> List[SamplerOutput]:
|
|
|
- all_outputs = await self._run_workers_async("execute_model",
|
|
|
- driver_args=args,
|
|
|
- driver_kwargs=kwargs)
|
|
|
-
|
|
|
- # Only the driver worker returns the sampling results.
|
|
|
- return all_outputs[0]
|
|
|
+ @abstractmethod
|
|
|
+ async def _start_worker_execution_loop(self):
|
|
|
+ """Run execution loop on all workers. It guarantees all workers run
|
|
|
+ the loop or None of them is running the loop. Loop can be stopped by
|
|
|
+ `stop_remote_worker_execution_loop`.
|
|
|
+ The API is idempotent (guarantee only 1 loop run at any moment)."""
|
|
|
+ raise NotImplementedError
|