parallel_state.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347
  1. # Copyright 2023 The PygmalionAI team.
  2. # Copyright 2023 The vLLM team.
  3. # Adapted from
  4. # https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/core/parallel_state.py
  5. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
  6. """Tensor and pipeline parallel groups."""
  7. import os
  8. from typing import List, Optional
  9. import torch
  10. from loguru import logger
  11. from torch.distributed import ProcessGroup
  12. _ENABLE_CUSTOM_ALL_REDUCE = True
  13. # Tensor model parallel group that the current rank belongs to.
  14. _TP_DEVICE_GROUP: Optional[ProcessGroup] = None
  15. _TP_CPU_GROUP: Optional[ProcessGroup] = None
  16. _TP_PYNCCL_COMMUNICATOR = None
  17. _TP_CA_COMMUNICATOR = None
  18. # Pipeline model parallel group that the current rank belongs to.
  19. _PP_DEVICE_GROUP: Optional[ProcessGroup] = None
  20. # when people blindly call `torch.distributed.all_reduce` etc,
  21. # it will use this group. It is initialized with the `backend`
  22. # parameter of `init_distributed_environment` below.
  23. # Essentially, this is `torch.distributed.group.WORLD`.
  24. # We leave a line here to note that this is device-specific.
  25. # Note that this variable is not safe to use, because when users
  26. # call `init_distributed_environment` first, and then destroy
  27. # the process group themselves, this variable will keep a reference to the
  28. # destroyed process group, which is not useful.
  29. _DEVICE_WORLD_GROUP = None
  30. # duing `init_distributed_environment`, we will also initialize a
  31. # group with `gloo` backend, to allow direct coordination between
  32. # processes through the CPU.
  33. _CPU_WORLD_GROUP = None
  34. # In summary, after calling `init_distributed_environment`, we will
  35. # always have two groups: one for device-specific (and is the default)
  36. # and one for CPU. All processes will be part of both groups.
  37. # A list of global ranks for each pipeline group to ease calculation of the
  38. # source rank when broadcasting from the first or last pipeline stage.
  39. _PP_GLOBAL_RANKS: Optional[List[int]] = None
  40. _LOCAL_RANK = -1
  41. def set_custom_all_reduce(enable: bool):
  42. global _ENABLE_CUSTOM_ALL_REDUCE
  43. _ENABLE_CUSTOM_ALL_REDUCE = enable
  44. def get_tp_pynccl_communicator():
  45. global _TP_PYNCCL_COMMUNICATOR
  46. return _TP_PYNCCL_COMMUNICATOR
  47. def get_tp_ca_communicator():
  48. global _TP_CA_COMMUNICATOR
  49. return _TP_CA_COMMUNICATOR
  50. def get_local_rank():
  51. global _LOCAL_RANK
  52. return _LOCAL_RANK
  53. def init_distributed_environment(
  54. world_size: int = -1,
  55. rank: int = -1,
  56. distributed_init_method: str = "env://",
  57. local_rank: int = -1,
  58. backend: str = "nccl",
  59. ):
  60. logger.debug(f"{world_size=} {rank=} {local_rank=} "
  61. f"{distributed_init_method=} {backend=}")
  62. if not torch.distributed.is_initialized():
  63. assert distributed_init_method is not None, (
  64. "distributed_init_method must be provided when initializing "
  65. "distributed environment")
  66. # this backend is used for WORLD
  67. torch.distributed.init_process_group(
  68. backend=backend,
  69. init_method=distributed_init_method,
  70. world_size=world_size,
  71. rank=rank)
  72. global _DEVICE_WORLD_GROUP, _CPU_WORLD_GROUP
  73. _DEVICE_WORLD_GROUP = torch.distributed.group.WORLD
  74. ranks = list(range(torch.distributed.get_world_size()))
  75. _CPU_WORLD_GROUP = torch.distributed.new_group(ranks=ranks,
  76. backend="gloo")
  77. if local_rank == -1:
  78. # local rank not set, this usually happens in single-node
  79. # setting, where we can use rank as local rank
  80. if distributed_init_method == "env://":
  81. local_rank = int(os.environ.get("LOCAL_RANK", rank))
  82. else:
  83. local_rank = rank
  84. global _LOCAL_RANK
  85. _LOCAL_RANK = local_rank
  86. # A small all_reduce for warmup.
  87. data = torch.zeros(1)
  88. if torch.cuda.is_available():
  89. data = data.to(device=f"cuda:{local_rank}")
  90. torch.distributed.all_reduce(data)
  91. if torch.cuda.is_available():
  92. torch.cuda.synchronize()
  93. del data
  94. def initialize_model_parallel(
  95. tensor_model_parallel_size: int = 1,
  96. pipeline_model_parallel_size: int = 1,
  97. backend: Optional[str] = None,
  98. ) -> None:
  99. """
  100. Initialize model parallel groups.
  101. Arguments:
  102. tensor_model_parallel_size: number of GPUs used for tensor model
  103. parallelism.
  104. pipeline_model_parallel_size: number of GPUs used for pipeline model
  105. parallelism.
  106. Let's say we have a total of 8 GPUs denoted by g0 ... g7 and we
  107. use 2 GPUs to parallelize the model tensor, and 4 GPUs to parallelize
  108. the model pipeline. The present function will
  109. create 4 tensor model-parallel groups and 2 pipeline model-parallel groups:
  110. 4 tensor model-parallel groups:
  111. [g0, g1], [g2, g3], [g4, g5], [g6, g7]
  112. 2 pipeline model-parallel groups:
  113. [g0, g2, g4, g6], [g1, g3, g5, g7]
  114. Note that for efficiency, the caller should make sure adjacent ranks
  115. are on the same DGX box. For example if we are using 2 DGX-1 boxes
  116. with a total of 16 GPUs, rank 0 to 7 belong to the first box and
  117. ranks 8 to 15 belong to the second box.
  118. """
  119. # Get world size and rank. Ensure some consistencies.
  120. assert torch.distributed.is_initialized()
  121. world_size: int = torch.distributed.get_world_size()
  122. # get the backend of _DEVICE_WORLD_GROUP
  123. backend = backend or torch.distributed.get_backend()
  124. if (world_size !=
  125. tensor_model_parallel_size * pipeline_model_parallel_size):
  126. raise RuntimeError(
  127. f"world_size ({world_size}) is not equal to "
  128. f"tensor_model_parallel_size ({tensor_model_parallel_size}) x "
  129. f"pipeline_model_parallel_size ({pipeline_model_parallel_size})")
  130. num_tensor_model_parallel_groups: int = (world_size //
  131. tensor_model_parallel_size)
  132. num_pipeline_model_parallel_groups: int = (world_size //
  133. pipeline_model_parallel_size)
  134. rank = torch.distributed.get_rank()
  135. # Build the tensor model-parallel groups.
  136. global _TP_DEVICE_GROUP, _TP_CPU_GROUP
  137. global _TP_PYNCCL_COMMUNICATOR, _TP_CA_COMMUNICATOR
  138. assert _TP_DEVICE_GROUP is None, (
  139. "tensor model parallel group is already initialized")
  140. for i in range(num_tensor_model_parallel_groups):
  141. ranks = list(
  142. range(i * tensor_model_parallel_size,
  143. (i + 1) * tensor_model_parallel_size))
  144. group = torch.distributed.new_group(ranks, backend=backend)
  145. cpu_group = torch.distributed.new_group(ranks, backend="gloo")
  146. if rank in ranks:
  147. _TP_DEVICE_GROUP = group
  148. _TP_CPU_GROUP = cpu_group
  149. from aphrodite.distributed.device_communicators.pynccl import \
  150. PyNcclCommunicator
  151. _TP_PYNCCL_COMMUNICATOR = PyNcclCommunicator(
  152. group=_TP_CPU_GROUP,
  153. device=_LOCAL_RANK,
  154. )
  155. # Initialize a custom fast all-reduce implementation.
  156. if _ENABLE_CUSTOM_ALL_REDUCE:
  157. from aphrodite.distributed.device_communicators.custom_all_reduce \
  158. import CustomAllreduce
  159. _TP_CA_COMMUNICATOR = CustomAllreduce(
  160. group=_TP_CPU_GROUP,
  161. device=_LOCAL_RANK,
  162. )
  163. # Build the pipeline model-parallel groups.
  164. global _PP_DEVICE_GROUP
  165. global _PP_GLOBAL_RANKS
  166. assert _PP_DEVICE_GROUP is None, (
  167. "pipeline model parallel group is already initialized")
  168. for i in range(num_pipeline_model_parallel_groups):
  169. ranks = list(range(i, world_size, num_pipeline_model_parallel_groups))
  170. group = torch.distributed.new_group(ranks, backend=backend)
  171. if rank in ranks:
  172. _PP_DEVICE_GROUP = group
  173. _PP_GLOBAL_RANKS = ranks
  174. def ensure_model_parallel_initialized(
  175. tensor_model_parallel_size: int,
  176. pipeline_model_parallel_size: int,
  177. backend: Optional[str] = None,
  178. ) -> None:
  179. """Helper to initialize model parallel groups if they are not initialized,
  180. or ensure tensor-parallel and pipeline-parallel sizes are equal to expected
  181. values if the model parallel groups are initialized.
  182. """
  183. # get the backend of _DEVICE_WORLD_GROUP
  184. backend = backend or torch.distributed.get_backend()
  185. if not model_parallel_is_initialized():
  186. initialize_model_parallel(tensor_model_parallel_size,
  187. pipeline_model_parallel_size, backend)
  188. return
  189. assert (
  190. get_tensor_model_parallel_world_size() == tensor_model_parallel_size
  191. ), ("tensor parallel group already initialized, but of unexpected size: "
  192. f"{get_tensor_model_parallel_world_size()=} vs. "
  193. f"{tensor_model_parallel_size=}")
  194. assert (get_pipeline_model_parallel_world_size(
  195. ) == pipeline_model_parallel_size), (
  196. "pipeline parallel group already initialized, but of unexpected size: "
  197. f"{get_pipeline_model_parallel_world_size()=} vs. "
  198. f"{pipeline_model_parallel_size=}")
  199. def model_parallel_is_initialized():
  200. """Check if tensor and pipeline parallel groups are initialized."""
  201. return (_TP_DEVICE_GROUP is not None and _PP_DEVICE_GROUP is not None)
  202. def get_cpu_world_group():
  203. """Get the CPU world group."""
  204. assert _CPU_WORLD_GROUP is not None, ("CPU world group is not initialized")
  205. return _CPU_WORLD_GROUP
  206. def get_tensor_model_parallel_group():
  207. """Get the tensor model parallel group the caller rank belongs to."""
  208. assert _TP_DEVICE_GROUP is not None, (
  209. "tensor model parallel group is not initialized")
  210. return _TP_DEVICE_GROUP
  211. def get_tensor_model_parallel_cpu_group():
  212. """Get the tensor model parallel cpu group the caller rank belongs to."""
  213. assert _TP_CPU_GROUP is not None, (
  214. "tensor model parallel cpu group is not initialized")
  215. return _TP_CPU_GROUP
  216. def get_pipeline_model_parallel_group():
  217. """Get the pipeline model parallel group the caller rank belongs to."""
  218. assert _PP_DEVICE_GROUP is not None, (
  219. "pipeline model parallel group is not initialized")
  220. return _PP_DEVICE_GROUP
  221. def get_tensor_model_parallel_world_size():
  222. """Return world size for the tensor model parallel group."""
  223. return torch.distributed.get_world_size(
  224. group=get_tensor_model_parallel_group())
  225. def get_pipeline_model_parallel_world_size():
  226. """Return world size for the pipeline model parallel group."""
  227. return torch.distributed.get_world_size(
  228. group=get_pipeline_model_parallel_group())
  229. def get_tensor_model_parallel_rank():
  230. """Return my rank for the tensor model parallel group."""
  231. return torch.distributed.get_rank(group=get_tensor_model_parallel_group())
  232. def get_pipeline_model_parallel_rank():
  233. """Return my rank for the pipeline model parallel group."""
  234. return torch.distributed.get_rank(
  235. group=get_pipeline_model_parallel_group())
  236. def get_tensor_model_parallel_src_rank():
  237. """Calculate the global rank corresponding to the first local rank
  238. in the tensor model parallel group."""
  239. global_rank = torch.distributed.get_rank()
  240. local_world_size = get_tensor_model_parallel_world_size()
  241. return (global_rank // local_world_size) * local_world_size
  242. def get_pipeline_model_parallel_first_rank():
  243. """Return the global rank of the first process in the pipeline for the
  244. current tensor parallel group"""
  245. assert _PP_GLOBAL_RANKS is not None, (
  246. "Pipeline parallel group is not initialized")
  247. return _PP_GLOBAL_RANKS[0]
  248. def get_pipeline_model_parallel_last_rank():
  249. """Return the global rank of the last process in the pipeline for the
  250. current tensor parallel group"""
  251. assert _PP_GLOBAL_RANKS is not None, (
  252. "Pipeline parallel group is not initialized")
  253. last_rank_local = get_pipeline_model_parallel_world_size() - 1
  254. return _PP_GLOBAL_RANKS[last_rank_local]
  255. def get_pipeline_model_parallel_next_rank():
  256. """Return the global rank that follows the caller in the pipeline"""
  257. assert _PP_GLOBAL_RANKS is not None, (
  258. "Pipeline parallel group is not initialized")
  259. rank_in_pipeline = get_pipeline_model_parallel_rank()
  260. world_size = get_pipeline_model_parallel_world_size()
  261. return _PP_GLOBAL_RANKS[(rank_in_pipeline + 1) % world_size]
  262. def get_pipeline_model_parallel_prev_rank():
  263. """Return the global rank that precedes the caller in the pipeline"""
  264. assert _PP_GLOBAL_RANKS is not None, (
  265. "Pipeline parallel group is not initialized")
  266. rank_in_pipeline = get_pipeline_model_parallel_rank()
  267. world_size = get_pipeline_model_parallel_world_size()
  268. return _PP_GLOBAL_RANKS[(rank_in_pipeline - 1) % world_size]
  269. def destroy_model_parallel():
  270. """Set the groups to none and destroy them."""
  271. global _TP_DEVICE_GROUP
  272. if _TP_DEVICE_GROUP:
  273. torch.distributed.destroy_process_group(_TP_DEVICE_GROUP)
  274. _TP_DEVICE_GROUP = None
  275. global _TP_CPU_GROUP
  276. if _TP_CPU_GROUP:
  277. torch.distributed.destroy_process_group(_TP_CPU_GROUP)
  278. _TP_CPU_GROUP = None
  279. global _TP_PYNCCL_COMMUNICATOR
  280. _TP_PYNCCL_COMMUNICATOR = None
  281. global _PP_DEVICE_GROUP
  282. if _PP_DEVICE_GROUP:
  283. torch.distributed.destroy_process_group(_PP_DEVICE_GROUP)
  284. _PP_DEVICE_GROUP = None
  285. global _PP_GLOBAL_RANKS
  286. _PP_GLOBAL_RANKS = None