worker.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. """A GPU worker class."""
  2. import gc
  3. import os
  4. from typing import Dict, List, Tuple, Set, Optional
  5. import torch
  6. import torch.distributed
  7. from aphrodite.common.config import (CacheConfig, ModelConfig, ParallelConfig,
  8. SchedulerConfig, LoRAConfig, DeviceConfig)
  9. from aphrodite.common.utils import in_wsl
  10. from aphrodite.modeling import set_random_seed
  11. from aphrodite.modeling.megatron import cupy_utils
  12. from aphrodite.modeling.megatron.communication_op import (broadcast_tensor_dict
  13. )
  14. from aphrodite.modeling.megatron.custom_all_reduce import init_custom_ar
  15. from aphrodite.modeling.megatron.parallel_state import (
  16. ensure_model_parallel_initialized)
  17. from aphrodite.common.sequence import SamplerOutput, SequenceGroupMetadata
  18. from aphrodite.task_handler.cache_engine import CacheEngine
  19. from aphrodite.task_handler.model_runner import ModelRunner
  20. from aphrodite.lora.request import LoRARequest
  21. from aphrodite.common.utils import is_hip
  22. class Worker:
  23. """A worker class that executes (a partition of) the model on a GPU.
  24. Each worker is associated with a single GPU. The worker is responsible for
  25. maintaining the KV cache and executing the model on the GPU. In case of
  26. distributed inference, each worker is assigned a partition of the model.
  27. """
  28. def __init__(
  29. self,
  30. model_config: ModelConfig,
  31. parallel_config: ParallelConfig,
  32. scheduler_config: SchedulerConfig,
  33. device_config: DeviceConfig,
  34. local_rank: int,
  35. rank: int,
  36. distributed_init_method: str,
  37. lora_config: Optional[LoRAConfig] = None,
  38. kv_cache_dtype: Optional[str] = "auto",
  39. kv_quant_params_path: Optional[str] = None,
  40. is_driver_worker: bool = False,
  41. ) -> None:
  42. self.model_config = model_config
  43. self.parallel_config = parallel_config
  44. self.scheduler_config = scheduler_config
  45. self.device_config = device_config
  46. self.local_rank = local_rank
  47. self.rank = rank
  48. self.distributed_init_method = distributed_init_method
  49. self.lora_config = lora_config
  50. self.is_driver_worker = is_driver_worker
  51. if self.is_driver_worker:
  52. assert self.rank == 0, "The driver worker must have rank 0."
  53. self.model_runner = ModelRunner(
  54. model_config,
  55. parallel_config,
  56. scheduler_config,
  57. device_config,
  58. lora_config=self.lora_config,
  59. kv_cache_dtype=kv_cache_dtype,
  60. kv_quant_params_path=kv_quant_params_path,
  61. is_driver_worker=is_driver_worker)
  62. # Uninitialized cache engine. Will be initialized by
  63. # self.init_cache_engine().
  64. self.cache_config = None
  65. self.cache_engine = None
  66. self.cache_events = None
  67. self.gpu_cache = None
  68. def init_model(self, cupy_port: Optional[int] = None) -> None:
  69. if self.device_config.device.type == "cuda":
  70. # torch.distributed.all_reduce does not free the input tensor until
  71. # the synchronization point. This causes the memory usage to grow
  72. # as the number of all_reduce calls increases. This env var disables
  73. # this behavior.
  74. # Related issue:
  75. # https://discuss.pytorch.org/t/cuda-allocation-lifetime-for-inputs-to-distributed-all-reduce/191573
  76. os.environ["TORCH_NCCL_AVOID_RECORD_STREAMS"] = "1"
  77. # This env var set by Ray causes exceptions with graph building.
  78. os.environ.pop("NCCL_ASYNC_ERROR_HANDLING", None)
  79. # Patch for torch.cuda.is_available() unexpected error in WSL;
  80. # always call torch.cuda.device_count() before initialising device
  81. if in_wsl():
  82. torch.cuda.device_count()
  83. self.device = torch.device(f"cuda:{self.local_rank}")
  84. torch.cuda.set_device(self.device)
  85. _check_if_gpu_supports_dtype(self.model_config.dtype)
  86. torch.cuda.empty_cache()
  87. self.init_gpu_memory = torch.cuda.mem_get_info()[0]
  88. else:
  89. raise RuntimeError(
  90. f"Not support device type: {self.device_config.device}")
  91. # Initialize the distributed environment.
  92. init_distributed_environment(self.parallel_config, self.rank,
  93. cupy_port, self.distributed_init_method)
  94. if not self.parallel_config.disable_custom_all_reduce:
  95. init_custom_ar()
  96. # Initialize the model.
  97. set_random_seed(self.model_config.seed)
  98. def load_model(self):
  99. self.model_runner.load_model()
  100. @torch.inference_mode()
  101. def profile_num_available_blocks(
  102. self,
  103. block_size: int,
  104. gpu_memory_utilization: float,
  105. cpu_swap_space: int,
  106. cache_dtype: str,
  107. ) -> Tuple[int, int]:
  108. """Profiles the peak memory usage of the model and returns the maximum
  109. number of GPU and CPU cache blocks that can be allocated.
  110. Args:
  111. block_size: The size of the cache block.
  112. gpu_memory_utilization: The fraction of the total GPU memory to use.
  113. cpu_swap_space: The size of the CPU swap space in bytes.
  114. """
  115. # Profile the memory usage of the model and get the maximum number of
  116. # cache blocks that can be allocated with the remaining free memory.
  117. torch.cuda.empty_cache()
  118. # Execute a forward pass with dummy inputs to profile the memory usage
  119. # of the model.
  120. self.model_runner.profile_run()
  121. # Calculate the number of blocks that can be allocated with the
  122. # profiled peak memory.
  123. torch.cuda.synchronize()
  124. free_gpu_memory, total_gpu_memory = torch.cuda.mem_get_info()
  125. # NOTE: Here we assume that the other processes using the same
  126. # GPU did not change their memory usage during the profiling.
  127. peak_memory = self.init_gpu_memory - free_gpu_memory
  128. cache_block_size = CacheEngine.get_cache_block_size(
  129. block_size, cache_dtype, self.model_config, self.parallel_config)
  130. num_gpu_blocks = int(
  131. (total_gpu_memory * gpu_memory_utilization - peak_memory) //
  132. cache_block_size)
  133. num_cpu_blocks = int(cpu_swap_space // cache_block_size)
  134. num_gpu_blocks = max(num_gpu_blocks, 0)
  135. num_cpu_blocks = max(num_cpu_blocks, 0)
  136. if self.model_runner.lora_manager:
  137. self.model_runner.remove_all_loras()
  138. gc.collect()
  139. torch.cuda.empty_cache()
  140. return num_gpu_blocks, num_cpu_blocks
  141. def init_cache_engine(self, cache_config: CacheConfig) -> None:
  142. self.cache_config = cache_config
  143. self.cache_engine = CacheEngine(self.cache_config, self.model_config,
  144. self.parallel_config)
  145. self.cache_events = self.cache_engine.events
  146. self.gpu_cache = self.cache_engine.gpu_cache
  147. self.model_runner.set_block_size(self.cache_engine.block_size)
  148. def warm_up_model(self) -> None:
  149. if not self.model_config.enforce_eager:
  150. self.model_runner.capture_model(self.gpu_cache)
  151. # Reset the seed to ensure that the random state is not affected by
  152. # the model initialization and profiling.
  153. set_random_seed(self.model_config.seed)
  154. def cache_swap(
  155. self,
  156. blocks_to_swap_in: Dict[int, int],
  157. blocks_to_swap_out: Dict[int, int],
  158. blocks_to_copy: Dict[int, List[int]],
  159. ) -> None:
  160. # Issue cache operations.
  161. issued_cache_op = False
  162. if blocks_to_swap_in:
  163. self.cache_engine.swap_in(blocks_to_swap_in)
  164. issued_cache_op = True
  165. if blocks_to_swap_out:
  166. self.cache_engine.swap_out(blocks_to_swap_out)
  167. issued_cache_op = True
  168. if blocks_to_copy:
  169. self.cache_engine.copy(blocks_to_copy)
  170. issued_cache_op = True
  171. cache_events = self.cache_events if issued_cache_op else None
  172. # Wait for cache operations to finish.
  173. # TODO: Profile swapping overhead and optimize if needed.
  174. if cache_events is not None:
  175. for event in cache_events: # pylint: disable=not-an-iterable
  176. event.wait()
  177. @torch.inference_mode()
  178. def execute_model(
  179. self,
  180. seq_group_metadata_list: Optional[List[SequenceGroupMetadata]] = None,
  181. blocks_to_swap_in: Optional[Dict[int, int]] = None,
  182. blocks_to_swap_out: Optional[Dict[int, int]] = None,
  183. blocks_to_copy: Optional[Dict[int, List[int]]] = None,
  184. ) -> Optional[SamplerOutput]:
  185. if self.is_driver_worker:
  186. assert seq_group_metadata_list is not None
  187. num_seq_groups = len(seq_group_metadata_list)
  188. assert blocks_to_swap_in is not None
  189. assert blocks_to_swap_out is not None
  190. assert blocks_to_copy is not None
  191. data = {
  192. "num_seq_groups": num_seq_groups,
  193. "blocks_to_swap_in": blocks_to_swap_in,
  194. "blocks_to_swap_out": blocks_to_swap_out,
  195. "blocks_to_copy": blocks_to_copy,
  196. }
  197. broadcast_tensor_dict(data, src=0)
  198. else:
  199. data = broadcast_tensor_dict(src=0)
  200. num_seq_groups = data["num_seq_groups"]
  201. blocks_to_swap_in = data["blocks_to_swap_in"]
  202. blocks_to_swap_out = data["blocks_to_swap_out"]
  203. blocks_to_copy = data["blocks_to_copy"]
  204. self.cache_swap(blocks_to_swap_in, blocks_to_swap_out, blocks_to_copy)
  205. # If there is no input, we don't need to execute the model.
  206. if num_seq_groups == 0:
  207. return {}
  208. output = self.model_runner.execute_model(seq_group_metadata_list,
  209. self.gpu_cache)
  210. return output
  211. def add_lora(self, lora_request: LoRARequest) -> bool:
  212. return self.model_runner.add_lora(lora_request)
  213. def remove_lora(self, lora_id: int) -> bool:
  214. return self.model_runner.remove_lora(lora_id)
  215. def list_loras(self) -> Set[int]:
  216. return self.model_runner.list_loras()
  217. def init_distributed_environment(
  218. parallel_config: ParallelConfig,
  219. rank: int,
  220. cupy_port: Optional[int],
  221. distributed_init_method: Optional[str] = None,
  222. ) -> None:
  223. """Initialize the distributed environment."""
  224. if torch.distributed.is_initialized():
  225. torch_world_size = torch.distributed.get_world_size()
  226. if torch_world_size != parallel_config.world_size:
  227. raise RuntimeError(
  228. "torch.distributed is already initialized but the torch world "
  229. "size does not match parallel_config.world_size "
  230. f"({torch_world_size} vs. {parallel_config.world_size}).")
  231. elif not distributed_init_method:
  232. raise ValueError(
  233. "distributed_init_method must be set if torch.distributed "
  234. "is not already initialized")
  235. else:
  236. torch.distributed.init_process_group(
  237. backend="nccl",
  238. world_size=parallel_config.world_size,
  239. rank=rank,
  240. init_method=distributed_init_method,
  241. )
  242. if cupy_utils.is_initialized():
  243. cupy_world_size = cupy_utils.get_world_size()
  244. if cupy_world_size != parallel_config.world_size:
  245. raise RuntimeError(
  246. "cupy.distributed is already initialized but the cupy world "
  247. "size does not match parallel_config.world_size "
  248. f"({cupy_world_size} vs. {parallel_config.world_size}).")
  249. elif (parallel_config.world_size > 1 and cupy_port is not None
  250. and not is_hip()):
  251. # NOTE: We don't initialize CuPy process group when world size
  252. # is 1.
  253. # TODO: Support multi-node connection.
  254. cupy_utils.init_process_group(
  255. world_size=parallel_config.world_size,
  256. rank=rank,
  257. host="localhost",
  258. port=cupy_port,
  259. )
  260. # A small all_reduce for warmup.
  261. torch.distributed.all_reduce(torch.zeros(1).cuda())
  262. if cupy_utils.is_initialized():
  263. cupy_utils.all_reduce(torch.zeros(1).cuda())
  264. ensure_model_parallel_initialized(parallel_config.tensor_parallel_size,
  265. parallel_config.pipeline_parallel_size)
  266. def _check_if_gpu_supports_dtype(torch_dtype: torch.dtype):
  267. # Check if the GPU supports the dtype.
  268. if torch_dtype == torch.bfloat16:
  269. compute_capability = torch.cuda.get_device_capability()
  270. if compute_capability[0] < 8:
  271. gpu_name = torch.cuda.get_device_name()
  272. raise ValueError(
  273. "Bfloat16 is only supported on GPUs with compute capability "
  274. f"of at least 8.0. Your {gpu_name} GPU has compute capability "
  275. f"{compute_capability[0]}.{compute_capability[1]}. "
  276. "You can use float16 instead by explicitly setting the"
  277. "`dtype` flag in CLI, for example: --dtype=half.")