worker_base.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457
  1. import dataclasses
  2. import importlib
  3. import os
  4. from abc import ABC, abstractmethod
  5. from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Type, Union
  6. import torch
  7. from loguru import logger
  8. from aphrodite.common.sequence import ExecuteModelRequest, IntermediateTensors
  9. from aphrodite.common.utils import (enable_trace_function_call_for_thread,
  10. update_environment_variables)
  11. from aphrodite.distributed import (broadcast_tensor_dict, get_pp_group,
  12. get_tp_group)
  13. from aphrodite.lora.request import LoRARequest
  14. from aphrodite.modeling.layers.sampler import SamplerOutput
  15. from aphrodite.platforms import current_platform
  16. from aphrodite.worker.model_runner_base import (BroadcastableModelInput,
  17. ModelRunnerBase,
  18. ModelRunnerInputBase)
  19. class WorkerBase(ABC):
  20. """Worker interface that allows Aphrodite to cleanly separate
  21. implementations for different hardware. Also abstracts control plane
  22. communication, e.g., to communicate request metadata to other workers.
  23. """
  24. @abstractmethod
  25. def init_device(self) -> None:
  26. """Initialize device state, such as loading the model or other on-device
  27. memory allocations.
  28. """
  29. raise NotImplementedError
  30. @abstractmethod
  31. def determine_num_available_blocks(self) -> Tuple[int, int]:
  32. """Determine the number of available blocks for the GPU KV cache and
  33. swappable CPU KV cache.
  34. The implementation may run profiling or other heuristics to determine
  35. the size of caches.
  36. Returns a Tuple[num_gpu_blocks, num_cpu_blocks], where num_gpu_blocks
  37. are blocks that are "active" on the device and can be appended to.
  38. num_cpu_blocks refers to "swapped" blocks in CPU memory and cannot be
  39. appended to.
  40. """
  41. raise NotImplementedError
  42. @abstractmethod
  43. def initialize_cache(self, num_gpu_blocks: int,
  44. num_cpu_blocks: int) -> None:
  45. """Initialize the KV cache with the given size in blocks.
  46. """
  47. raise NotImplementedError
  48. @current_platform.inference_mode()
  49. def start_worker_execution_loop(self) -> None:
  50. """Execute model loop in parallel worker.
  51. You can stop the loop by executing a driver worker with an empty output.
  52. See `stop_remote_worker_execution_loop` for more details.
  53. """
  54. while True:
  55. output = self.execute_model(execute_model_req=None)
  56. if output is None:
  57. return None
  58. @abstractmethod
  59. def execute_model(
  60. self,
  61. execute_model_req: Optional[ExecuteModelRequest] = None
  62. ) -> Optional[List[SamplerOutput]]:
  63. raise NotImplementedError
  64. @abstractmethod
  65. def get_cache_block_size_bytes(self) -> int:
  66. """Return the size of a single cache block, in bytes. Used in
  67. speculative decoding.
  68. """
  69. raise NotImplementedError
  70. @abstractmethod
  71. def add_lora(self, lora_request: LoRARequest) -> bool:
  72. raise NotImplementedError
  73. @abstractmethod
  74. def remove_lora(self, lora_id: int) -> bool:
  75. raise NotImplementedError
  76. @abstractmethod
  77. def pin_lora(self, lora_id: int) -> bool:
  78. raise NotImplementedError
  79. @abstractmethod
  80. def list_loras(self) -> Set[int]:
  81. raise NotImplementedError
  82. class LoraNotSupportedWorkerBase(WorkerBase):
  83. """Partial implementation of WorkerBase that raises exceptions when LoRA
  84. methods are invoked.
  85. """
  86. def add_lora(self, lora_request: LoRARequest) -> bool:
  87. raise ValueError(f"{type(self)} does not support LoRA")
  88. def remove_lora(self, lora_id: int) -> bool:
  89. raise ValueError(f"{type(self)} does not support LoRA")
  90. def pin_lora(self, lora_id: int) -> bool:
  91. return ValueError(
  92. f"{type(self)} does not support LoRA") # type: ignore
  93. def list_loras(self) -> Set[int]:
  94. raise ValueError(f"{type(self)} does not support LoRA")
  95. @dataclasses.dataclass(frozen=True)
  96. class WorkerInput:
  97. """Local inputs to each worker. May contain device-specific data. These
  98. fields should be broadcastable to other workers.
  99. """
  100. num_seq_groups: Optional[int] = None
  101. blocks_to_swap_in: Optional[torch.Tensor] = None
  102. blocks_to_swap_out: Optional[torch.Tensor] = None
  103. blocks_to_copy: Optional[torch.Tensor] = None
  104. virtual_engine: int = 0
  105. num_steps: int = 1
  106. @classmethod
  107. def from_broadcasted_tensor_dict(
  108. cls: Type["WorkerInput"],
  109. tensor_dict: Dict[str, Any],
  110. ) -> "WorkerInput":
  111. """
  112. Pop fields from the given tensor_dict and populate a new instance of
  113. WorkerInput.
  114. """
  115. return cls(
  116. num_seq_groups=tensor_dict.pop("num_seq_groups"),
  117. blocks_to_swap_in=tensor_dict.pop("blocks_to_swap_in"),
  118. blocks_to_swap_out=tensor_dict.pop("blocks_to_swap_out"),
  119. blocks_to_copy=tensor_dict.pop("blocks_to_copy"),
  120. virtual_engine=tensor_dict["virtual_engine"],
  121. num_steps=tensor_dict.pop("num_steps"),
  122. )
  123. def as_broadcastable_tensor_dict(
  124. self) -> Dict[str, Union[int, torch.Tensor]]:
  125. """
  126. Extract broadcastable fields.
  127. """
  128. tensor_dict = {
  129. "num_seq_groups": self.num_seq_groups,
  130. "blocks_to_swap_in": self.blocks_to_swap_in,
  131. "blocks_to_swap_out": self.blocks_to_swap_out,
  132. "blocks_to_copy": self.blocks_to_copy,
  133. "virtual_engine": self.virtual_engine,
  134. "num_steps": self.num_steps,
  135. }
  136. return tensor_dict
  137. class LocalOrDistributedWorkerBase(WorkerBase):
  138. """
  139. Partial implementation of WorkerBase that has a default `execute_model`
  140. definition to perform metadata transfer between workers when in distributed
  141. mode. Subclasses of this interface should use model runners that inherit
  142. from ModelRunnerBase, and should only need to implement worker-local logic.
  143. If custom control plane logic is needed to transfer metadata, or if the
  144. model runner cannot inherit from ModelRunnerBase, use WorkerBase instead.
  145. """
  146. is_driver_worker: bool
  147. model_runner: ModelRunnerBase
  148. @property
  149. @abstractmethod
  150. def do_metadata_broadcast(self) -> bool:
  151. """
  152. Used by the default `execute_model` to check whether broadcast is
  153. needed to transfer request inputs from the driver worker to other
  154. workers in the TP group. If WorkerBase subclass only supports
  155. single-worker execution, then this method should return False.
  156. """
  157. raise NotImplementedError
  158. @property
  159. @abstractmethod
  160. def kv_cache(self) -> Optional[List[List[torch.Tensor]]]:
  161. """
  162. Gets the list of kv caches to pass to the worker's model runner. Each
  163. element in the list is a kv cache corresponding to a particular virtual
  164. engine (PP stream). Used by the default `execute_model`. If the worker's
  165. model runner does not follow the ModelRunnerBase interface, then inherit
  166. from WorkerBase instead.
  167. """
  168. raise NotImplementedError
  169. @abstractmethod
  170. def prepare_worker_input(
  171. self, execute_model_req: ExecuteModelRequest) -> WorkerInput:
  172. """
  173. Prepare the inputs to WorkerBase.execute_worker from an execution
  174. request. This method may move data to the worker's local device. It is
  175. not allowed to communicate with other workers or devices.
  176. """
  177. raise NotImplementedError
  178. @abstractmethod
  179. def execute_worker(self, worker_input: WorkerInput) -> None:
  180. """
  181. Process an execution request.
  182. """
  183. raise NotImplementedError
  184. def _get_worker_input_from_broadcast(
  185. self
  186. ) -> Optional[Tuple[BroadcastableModelInput, WorkerInput, Dict[
  187. str, torch.Tensor]]]:
  188. """ Get the worker input from the broadcasted tensor dict. """
  189. assert self.do_metadata_broadcast
  190. assert not self.is_driver_worker
  191. broadcast_data = broadcast_tensor_dict(src=0)
  192. if not broadcast_data:
  193. return None
  194. worker_input = WorkerInput.from_broadcasted_tensor_dict(broadcast_data)
  195. model_input = (
  196. self.model_runner.make_model_input_from_broadcasted_tensor_dict(
  197. broadcast_data))
  198. kwargs = extract_previous_hidden_states(broadcast_data)
  199. return model_input, worker_input, kwargs
  200. def _get_driver_input_and_broadcast(
  201. self, execute_model_req: ExecuteModelRequest
  202. ) -> Tuple[BroadcastableModelInput, WorkerInput, Dict[str, torch.Tensor]]:
  203. """ Get the driver input and broadcast it to other workers. """
  204. assert self.is_driver_worker
  205. worker_input: WorkerInput = self.prepare_worker_input(
  206. execute_model_req=execute_model_req)
  207. model_input: ModelRunnerInputBase = (
  208. self.model_runner.prepare_model_input(
  209. execute_model_req.seq_group_metadata_list,
  210. execute_model_req.virtual_engine,
  211. execute_model_req.finished_requests_ids))
  212. kwargs = extract_previous_hidden_states(execute_model_req)
  213. if self.do_metadata_broadcast:
  214. broadcast_data = worker_input.as_broadcastable_tensor_dict()
  215. broadcast_data.update(model_input.as_broadcastable_tensor_dict())
  216. broadcast_data.update(kwargs)
  217. broadcast_tensor_dict(broadcast_data, src=0)
  218. if execute_model_req.async_callback:
  219. model_input = dataclasses.replace( # type: ignore
  220. model_input,
  221. async_callback=execute_model_req.async_callback)
  222. return model_input, worker_input, kwargs
  223. def prepare_input(
  224. self,
  225. execute_model_req: Optional[ExecuteModelRequest] = None
  226. ) -> Optional[Tuple[BroadcastableModelInput, WorkerInput, Dict[
  227. str, torch.Tensor]]]:
  228. """
  229. Prepare the inputs to ModelRunner and workers.
  230. """
  231. if self.is_driver_worker:
  232. if execute_model_req is None:
  233. if self.do_metadata_broadcast:
  234. # This signals that there's no more requests to process for
  235. # now. All workers are running infinite loop with
  236. # broadcast_tensor_dict, and it stops the loop when the
  237. # driver broadcasts an empty input. Send an empty input to
  238. # notify all other workers to stop their execution loop.
  239. broadcast_tensor_dict({}, src=0)
  240. return None
  241. return self._get_driver_input_and_broadcast(execute_model_req)
  242. else:
  243. return self._get_worker_input_from_broadcast()
  244. def execute_model(
  245. self,
  246. execute_model_req: Optional[ExecuteModelRequest] = None
  247. ) -> Optional[List[SamplerOutput]]:
  248. """Executes at least one model step on the given sequences, unless no
  249. sequences are provided."""
  250. inputs = self.prepare_input(execute_model_req)
  251. if inputs is None:
  252. return None
  253. model_input, worker_input, kwargs = inputs
  254. num_steps = worker_input.num_steps
  255. self.execute_worker(worker_input)
  256. # If there is no input, we don't need to execute the model.
  257. if worker_input.num_seq_groups == 0:
  258. return []
  259. intermediate_tensors = None
  260. if not get_pp_group().is_first_rank:
  261. intermediate_tensors = IntermediateTensors(
  262. get_pp_group().recv_tensor_dict(
  263. all_gather_group=get_tp_group()))
  264. output = self.model_runner.execute_model(
  265. model_input=model_input,
  266. kv_caches=self.kv_cache[worker_input.virtual_engine]
  267. if self.kv_cache is not None else None,
  268. intermediate_tensors=intermediate_tensors,
  269. num_steps=num_steps,
  270. **kwargs,
  271. )
  272. if not get_pp_group().is_last_rank:
  273. # output is IntermediateTensors
  274. get_pp_group().send_tensor_dict(output.tensors,
  275. all_gather_group=get_tp_group())
  276. return [None]
  277. # output is List[SamplerOutput]
  278. return output
  279. def _execute_model_spmd(
  280. self,
  281. execute_model_req: ExecuteModelRequest,
  282. intermediate_tensors: Optional[IntermediateTensors] = None
  283. ) -> Optional[List[SamplerOutput]]:
  284. """
  285. Execute model in Single Program Multiple Data (SPMD) fashion.
  286. All workers take the same request, prepare the input and
  287. execute the model.
  288. """
  289. assert execute_model_req is not None, (
  290. "_execute_model_spmd() requires each worker to take in an "
  291. "ExecuteModelRequest")
  292. worker_input: WorkerInput = self.prepare_worker_input(
  293. execute_model_req=execute_model_req)
  294. model_input: ModelRunnerInputBase = (
  295. self.model_runner.prepare_model_input(
  296. execute_model_req.seq_group_metadata_list))
  297. self.execute_worker(worker_input)
  298. # If there is no input, we don't need to execute the model.
  299. if worker_input.num_seq_groups == 0:
  300. return []
  301. kwargs = extract_previous_hidden_states(execute_model_req)
  302. return self.model_runner.execute_model(
  303. model_input=model_input,
  304. kv_caches=self.kv_cache[worker_input.virtual_engine]
  305. if self.kv_cache is not None else None,
  306. intermediate_tensors=intermediate_tensors,
  307. **kwargs,
  308. )
  309. class WorkerWrapperBase:
  310. """
  311. The whole point of this class is to lazily initialize the worker.
  312. We first instantiate the WorkerWrapper, which remembers the worker module
  313. and class name. Then, when we call `update_environment_variables`, and the
  314. real initialization happens in `init_worker`.
  315. If worker_class_fn is specified, it will be executed to get the worker
  316. class.
  317. Otherwise, the worker class will be obtained by dynamically importing it
  318. using worker_module_name and worker_class_name.
  319. """
  320. def __init__(
  321. self,
  322. worker_module_name: str,
  323. worker_class_name: str,
  324. trust_remote_code: bool = False,
  325. worker_class_fn: Optional[Callable[[],
  326. Type[WorkerBase]]] = None) -> None:
  327. self.worker_module_name = worker_module_name
  328. self.worker_class_name = worker_class_name
  329. self.worker_class_fn = worker_class_fn
  330. self.worker: Optional[WorkerBase] = None
  331. if trust_remote_code:
  332. # note: lazy import to avoid importing torch before initializing
  333. from aphrodite.common.utils import init_cached_hf_modules
  334. init_cached_hf_modules()
  335. @staticmethod
  336. def update_environment_variables(envs: Dict[str, str]) -> None:
  337. key = 'CUDA_VISIBLE_DEVICES'
  338. if key in envs and key in os.environ:
  339. # overwriting CUDA_VISIBLE_DEVICES is desired behavior
  340. # suppress the warning in `update_environment_variables`
  341. del os.environ[key]
  342. update_environment_variables(envs)
  343. def init_worker(self, *args, **kwargs):
  344. """
  345. Here we inject some common logic before initializing the worker.
  346. Arguments are passed to the worker class constructor.
  347. """
  348. enable_trace_function_call_for_thread()
  349. # see https://github.com/NVIDIA/nccl/issues/1234
  350. os.environ['NCCL_CUMEM_ENABLE'] = '0'
  351. from aphrodite.plugins import load_general_plugins
  352. load_general_plugins()
  353. if self.worker_class_fn:
  354. worker_class = self.worker_class_fn()
  355. else:
  356. mod = importlib.import_module(self.worker_module_name)
  357. worker_class = getattr(mod, self.worker_class_name)
  358. self.worker = worker_class(*args, **kwargs)
  359. assert self.worker is not None
  360. def execute_method(self, method, *args, **kwargs):
  361. try:
  362. target = self if self.worker is None else self.worker
  363. executor = getattr(target, method)
  364. return executor(*args, **kwargs)
  365. except Exception as e:
  366. # if the driver worker also execute methods,
  367. # exceptions in the rest worker may cause deadlock in rpc like ray
  368. # print the error and inform the user to solve the error
  369. msg = (f"Error executing method {method}. "
  370. "This might cause deadlock in distributed execution.")
  371. logger.exception(msg)
  372. raise e
  373. def extract_previous_hidden_states(
  374. data: Union[ExecuteModelRequest, Dict[str, torch.Tensor]]) -> \
  375. Dict[str, torch.Tensor]:
  376. """If data contains previous_hidden_states, extract it. This returns a dict
  377. which can be used directly as additional kwargs in any following
  378. execute_model calls. This is used in draft models like EAGLE."""
  379. output = {}
  380. # When called from non-driver worker, data is dict but when called from
  381. # driver worker, data is ExecuteModelRequest.
  382. if isinstance(data, dict):
  383. if "previous_hidden_states" in data:
  384. output["previous_hidden_states"] = data["previous_hidden_states"]
  385. elif data.previous_hidden_states is not None:
  386. output["previous_hidden_states"] = data.previous_hidden_states\
  387. .hidden_states
  388. return output