cpu_executor.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. import os
  2. from functools import partial
  3. from typing import Any, Awaitable, List, Optional, Set, Tuple, Union
  4. import torch
  5. from loguru import logger
  6. import aphrodite.common.envs as envs
  7. from aphrodite.common.config import CacheConfig, ModelConfig, SchedulerConfig
  8. from aphrodite.common.sequence import ExecuteModelRequest
  9. from aphrodite.common.utils import (GiB_bytes, get_aphrodite_instance_id,
  10. get_distributed_init_method, get_open_port,
  11. make_async)
  12. from aphrodite.executor.executor_base import ExecutorAsyncBase, ExecutorBase
  13. from aphrodite.executor.multiproc_worker_utils import (ProcessWorkerWrapper,
  14. ResultHandler,
  15. WorkerMonitor)
  16. from aphrodite.lora.request import LoRARequest
  17. from aphrodite.modeling.layers.sampler import SamplerOutput
  18. from aphrodite.prompt_adapter.request import PromptAdapterRequest
  19. from aphrodite.task_handler.worker_base import WorkerWrapperBase
  20. class CPUExecutor(ExecutorBase):
  21. uses_ray: bool = False
  22. def _init_executor(self) -> None:
  23. assert self.device_config.device_type == "cpu"
  24. assert self.lora_config is None, "cpu backend doesn't support LoRA"
  25. #
  26. # Environment variables for CPU executor
  27. #
  28. # Ensure that APHRODITE_INSTANCE_ID is set, to be inherited by workers
  29. os.environ["APHRODITE_INSTANCE_ID"] = get_aphrodite_instance_id()
  30. # Disable torch async compiling which won't work with daemonic
  31. # processes
  32. os.environ["TORCHINDUCTOR_COMPILE_THREADS"] = "1"
  33. # Intel OpenMP setting
  34. ld_prealod_str = os.getenv("LD_PRELOAD", "")
  35. if "libiomp5.so" in ld_prealod_str:
  36. # The time(milliseconds) that a thread should wait after
  37. # completing the execution of a parallel region, before sleeping.
  38. os.environ['KMP_BLOCKTIME'] = "1"
  39. # Prevents the CPU to run into low performance state
  40. os.environ['KMP_TPAUSE'] = "0"
  41. # Provides fine granularity parallelism
  42. os.environ['KMP_FORKJOIN_BARRIER_PATTERN'] = "dist,dist"
  43. os.environ['KMP_PLAIN_BARRIER_PATTERN'] = "dist,dist"
  44. os.environ['KMP_REDUCTION_BARRIER_PATTERN'] = "dist,dist"
  45. # To hint IPEX uses shared memory based AllReduce
  46. os.environ["LOCAL_WORLD_SIZE"] = str(
  47. self.parallel_config.tensor_parallel_size)
  48. self.model_config = _verify_and_get_model_config(self.model_config)
  49. self.cache_config = _verify_and_get_cache_config(self.cache_config)
  50. self.scheduler_config = _verify_and_get_scheduler_config(
  51. self.scheduler_config)
  52. # Multiprocessing-based executor does not support multi-node setting.
  53. # Since it only works for single node, we can use the loopback address
  54. # 127.0.0.1 for communication.
  55. ip = "127.0.0.1"
  56. port = get_open_port()
  57. self.distributed_init_method = get_distributed_init_method(ip, port)
  58. is_async = isinstance(self, CPUExecutorAsync)
  59. world_size = self.parallel_config.tensor_parallel_size
  60. result_handler = ResultHandler()
  61. self.parallel_worker_tasks: Optional[Union[Any, Awaitable[Any]]] = None
  62. self.workers = []
  63. if is_async:
  64. self.workers = [
  65. ProcessWorkerWrapper(
  66. result_handler,
  67. partial(
  68. self._create_worker,
  69. rank=rank,
  70. local_rank=rank,
  71. )) for rank in range(0, world_size)
  72. ]
  73. self.driver_worker = self.workers[0]
  74. self.workers = self.workers[1:]
  75. self.driver_method_invoker = _async_driver_method_invoker
  76. else:
  77. self.driver_worker = self._create_worker()
  78. self.driver_method_invoker = _driver_method_invoker
  79. if world_size != 1:
  80. self.workers = [
  81. ProcessWorkerWrapper(
  82. result_handler,
  83. partial(
  84. self._create_worker,
  85. rank=rank,
  86. local_rank=rank,
  87. )) for rank in range(1, world_size)
  88. ]
  89. if world_size != 1 or is_async:
  90. if is_async:
  91. async_worker_list = self.workers + [self.driver_worker]
  92. else:
  93. async_worker_list = self.workers
  94. self.worker_monitor = WorkerMonitor(async_worker_list,
  95. result_handler)
  96. result_handler.start()
  97. self.worker_monitor.start()
  98. self._run_workers("init_device")
  99. self._run_workers("load_model")
  100. def _create_worker(
  101. self,
  102. local_rank: int = 0,
  103. rank: int = 0,
  104. ):
  105. worker_module_name = "aphrodite.task_handler.cpu_worker"
  106. worker_class_name = "CPUWorker"
  107. wrapper = WorkerWrapperBase(
  108. worker_module_name=worker_module_name,
  109. worker_class_name=worker_class_name,
  110. )
  111. assert self.distributed_init_method is not None
  112. kwargs = dict(
  113. model_config=self.model_config,
  114. parallel_config=self.parallel_config,
  115. scheduler_config=self.scheduler_config,
  116. device_config=self.device_config,
  117. cache_config=self.cache_config,
  118. load_config=self.load_config,
  119. local_rank=local_rank,
  120. rank=rank,
  121. distributed_init_method=self.distributed_init_method,
  122. lora_config=self.lora_config,
  123. kv_cache_dtype=self.cache_config.cache_dtype,
  124. prompt_adapter_config=self.prompt_adapter_config,
  125. is_driver_worker=rank == 0,
  126. )
  127. wrapper.init_worker(**kwargs)
  128. return wrapper.worker
  129. def _run_workers(
  130. self,
  131. method: str,
  132. *args,
  133. async_run_remote_workers_only: bool = False,
  134. max_concurrent_workers: Optional[int] = None,
  135. **kwargs,
  136. ) -> Any:
  137. """Runs the given method on all workers.
  138. Args:
  139. async_run_remote_workers_only: If True the method will be run only
  140. in the remote workers, not the driver worker. It will also be
  141. run asynchronously and return a list of futures rather than
  142. blocking on the results.
  143. """
  144. if max_concurrent_workers:
  145. raise NotImplementedError(
  146. "max_concurrent_workers is not supported yet.")
  147. # Start the workers first.
  148. worker_outputs = [
  149. worker.execute_method(method, *args, **kwargs)
  150. for worker in self.workers
  151. ]
  152. if async_run_remote_workers_only:
  153. # Just return futures
  154. return worker_outputs
  155. driver_worker_output = self.driver_method_invoker(
  156. self.driver_worker, method, *args, **kwargs)
  157. # Get the results of the workers.
  158. return [driver_worker_output
  159. ] + [output.get() for output in worker_outputs]
  160. def determine_num_available_blocks(self) -> Tuple[int, int]:
  161. """Determine the number of available KV blocks by invoking the
  162. underlying worker.
  163. """
  164. return self.driver_method_invoker(self.driver_worker,
  165. "determine_num_available_blocks")
  166. def initialize_cache(self, num_gpu_blocks: int,
  167. num_cpu_blocks: int) -> None:
  168. """Initialize the KV cache by invoking the underlying worker.
  169. """
  170. # NOTE: We log here to avoid multiple logs when number of workers is
  171. # greater than one. We could log in the engine, but not all executors
  172. # have GPUs.
  173. # NOTE: `cpu block` for CPU backend is located on CPU memory but is
  174. # referred as `gpu block`. Because we want to reuse the existing block
  175. # management procedure.
  176. logger.info("# CPU blocks: %d", num_gpu_blocks)
  177. self._run_workers("initialize_cache",
  178. num_gpu_blocks=num_gpu_blocks,
  179. num_cpu_blocks=num_cpu_blocks)
  180. def execute_model(
  181. self,
  182. execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]:
  183. if (self.parallel_config.tensor_parallel_size > 1
  184. and self.parallel_worker_tasks is None):
  185. self.parallel_worker_tasks = self._run_workers(
  186. "start_worker_execution_loop",
  187. async_run_remote_workers_only=True,
  188. )
  189. output = self.driver_method_invoker(self.driver_worker,
  190. "execute_model", execute_model_req)
  191. return output
  192. def stop_remote_worker_execution_loop(self) -> None:
  193. if self.parallel_worker_tasks is None:
  194. return
  195. """
  196. Passing None will cause the driver to stop the model execution
  197. loop running in each of the remote workers.
  198. """
  199. self.driver_method_invoker(self.driver_worker, "execute_model", None)
  200. parallel_worker_tasks = self.parallel_worker_tasks
  201. self.parallel_worker_tasks = None
  202. # Ensure that workers exit model loop cleanly
  203. # (this will raise otherwise)
  204. self._wait_for_tasks_completion(parallel_worker_tasks)
  205. def add_lora(self, lora_request: LoRARequest) -> bool:
  206. return all(self._run_workers("add_lora", lora_request))
  207. def remove_lora(self, lora_id: int) -> bool:
  208. return all(self._run_workers("remove_lora", lora_id))
  209. def pin_lora(self, lora_id: int) -> bool:
  210. assert lora_id > 0, "lora_id must be greater than 0."
  211. return all(self._run_workers(
  212. "pin_lora",
  213. lora_id=lora_id,
  214. ))
  215. def list_loras(self) -> Set[int]:
  216. return self.driver_method_invoker(self.driver_worker, "list_loras")
  217. def add_prompt_adapter(
  218. self, prompt_adapter_request: PromptAdapterRequest) -> bool:
  219. return all(
  220. self._run_workers(
  221. "add_prompt_adapter",
  222. prompt_adapter_request,
  223. ))
  224. def remove_prompt_adapter(self, prompt_adapter_id: int) -> bool:
  225. return all(
  226. self._run_workers(
  227. "remove_prompt_adapter",
  228. prompt_adapter_id,
  229. ))
  230. def list_prompt_adapters(self) -> Set[int]:
  231. return self.driver_method_invoker(self.driver_worker,
  232. "list_prompt_adapters")
  233. def pin_prompt_adapter(self, prompt_adapter_id: int) -> bool:
  234. return all(self._run_workers(
  235. "pin_prompt_adapter",
  236. prompt_adapter_id,
  237. ))
  238. def check_health(self) -> None:
  239. """Raises an error if engine is unhealthy."""
  240. if self.worker_monitor is not None and not self.worker_monitor.is_alive(
  241. ):
  242. raise RuntimeError("Worker processes are not running")
  243. def shutdown(self):
  244. if (worker_monitor := getattr(self, "worker_monitor",
  245. None)) is not None:
  246. worker_monitor.close()
  247. def _wait_for_tasks_completion(self, parallel_worker_tasks: Any) -> None:
  248. """Wait for futures returned from _run_workers() with
  249. async_run_remote_workers_only to complete."""
  250. for result in parallel_worker_tasks:
  251. result.get()
  252. class CPUExecutorAsync(CPUExecutor, ExecutorAsyncBase):
  253. async def execute_model_async(
  254. self,
  255. execute_model_req: ExecuteModelRequest) -> List[SamplerOutput]:
  256. output = await make_async(self.execute_model
  257. )(execute_model_req=execute_model_req, )
  258. return output
  259. async def check_health_async(self) -> None:
  260. self.check_health()
  261. def _verify_and_get_model_config(config: ModelConfig) -> ModelConfig:
  262. if config.dtype == torch.float16:
  263. logger.warning("float16 is not supported on CPU, casting to bfloat16.")
  264. config.dtype = torch.bfloat16
  265. if not config.enforce_eager:
  266. logger.warning(
  267. "CUDA graph is not supported on CPU, fallback to the eager "
  268. "mode.")
  269. config.enforce_eager = True
  270. return config
  271. def _verify_and_get_scheduler_config(
  272. config: SchedulerConfig) -> SchedulerConfig:
  273. if config.chunked_prefill_enabled:
  274. logger.warning("Chunked prefill is not supported on CPU, disable it.")
  275. config.chunked_prefill_enabled = False
  276. return config
  277. def _verify_and_get_cache_config(config: CacheConfig) -> CacheConfig:
  278. if config.enable_prefix_caching:
  279. logger.warning("Prefix caching is not supported on CPU, disable it.")
  280. config.enable_prefix_caching = False
  281. kv_cache_space_str = envs.APHRODITE_CPU_KVCACHE_SPACE
  282. kv_cache_space = int(kv_cache_space_str)
  283. if kv_cache_space >= 0:
  284. if kv_cache_space == 0:
  285. config.cpu_kvcache_space_bytes = 4 * GiB_bytes # type: ignore
  286. logger.warning(
  287. "Environment variable APHRODITE_CPU_KVCACHE_SPACE (GB) "
  288. "for CPU backend is not set, using 4 by default.")
  289. else:
  290. config.cpu_kvcache_space_bytes = kv_cache_space * GiB_bytes # type: ignore
  291. else:
  292. raise RuntimeError(
  293. "Invalid environment variable APHRODITE_CPU_KVCACHE_SPACE"
  294. f" {kv_cache_space}, expect a positive integer value.")
  295. return config
  296. def _driver_method_invoker(driver, method: str, *args, **kwargs):
  297. return getattr(driver, method)(*args, **kwargs)
  298. def _async_driver_method_invoker(driver, method: str, *args, **kwargs):
  299. return driver.execute_method(method, *args, **kwargs).get()