2022-06-29 02:03:09 +00:00
|
|
|
from typing import List, Optional
|
2023-02-03 03:44:10 +00:00
|
|
|
|
|
|
|
import torch
|
|
|
|
|
2022-07-07 05:38:48 +00:00
|
|
|
from colossalai.context.singleton_meta import SingletonMeta
|
2023-02-03 03:44:10 +00:00
|
|
|
from colossalai.logging import get_dist_logger
|
2022-07-07 05:38:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
class PyTorchProcessGroupDict(metaclass=SingletonMeta):
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
# distributed settings
|
2023-02-03 03:44:10 +00:00
|
|
|
# use this dict to record all Pytorch ProcessGroups
|
2022-07-07 05:38:48 +00:00
|
|
|
self.dict = {}
|
2023-02-03 03:44:10 +00:00
|
|
|
# set a distributed logger
|
|
|
|
self.logger = get_dist_logger('ProcessGroup')
|
|
|
|
|
|
|
|
def log_pg_init(self, rank_list: List[int], backend: str):
|
|
|
|
str_list = ["Pytorch ProcessGroup Init:"]
|
|
|
|
str_list.append(f"backend: {backend}")
|
|
|
|
str_list.append(f"ranks: {rank_list}")
|
|
|
|
self.logger.info("\n\t".join(str_list), ranks=[0])
|
2022-07-07 05:38:48 +00:00
|
|
|
|
2022-07-07 05:55:24 +00:00
|
|
|
def get(self, rank_list: List[int], backend: str = 'nccl'):
|
|
|
|
"""Reuse Pytorch ProcessGroup when such a group is initialized
|
|
|
|
"""
|
|
|
|
# we need to convert the passed list to a tuple
|
|
|
|
# since List is unhashable
|
2023-02-03 03:44:10 +00:00
|
|
|
processgroup_key = (backend, tuple(rank_list))
|
|
|
|
if processgroup_key not in self.dict:
|
|
|
|
self.log_pg_init(rank_list=rank_list, backend=backend)
|
|
|
|
self.dict[processgroup_key] = torch.distributed.new_group(ranks=rank_list, backend=backend)
|
|
|
|
return self.dict[processgroup_key]
|
2022-07-07 05:38:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
PYTORCHPGDICT_ = PyTorchProcessGroupDict()
|
2022-06-29 02:03:09 +00:00
|
|
|
|
|
|
|
|
|
|
|
class ProcessGroup:
|
2022-08-16 01:21:05 +00:00
|
|
|
"""ProcessGroup
|
2022-08-19 05:41:57 +00:00
|
|
|
Process Group indicates how processes are organized in groups for parallel execution using Tensor Parallelism and Data Parallelism.
|
2022-08-16 01:21:05 +00:00
|
|
|
|
|
|
|
NOTE, the ProcessGroup must be used after `torch.distributed.initialize()`
|
|
|
|
|
|
|
|
|
|
|
|
Args:
|
2022-06-29 02:03:09 +00:00
|
|
|
rank: the global rank of the current process.
|
|
|
|
ranks: List[int], a list of rank id belongings to this process group.
|
|
|
|
backend: str, the backend of the process group.
|
2023-02-03 03:44:10 +00:00
|
|
|
tp_degree: Optional[int], tensor parallelism degree. How many processes are inside a tp process group. default None means 1.
|
2022-08-19 05:41:57 +00:00
|
|
|
dp_degree: Optional[int], data parallelism degree. How many processes are inside a dp process group. . default None means len(ranks).
|
2022-06-29 02:03:09 +00:00
|
|
|
"""
|
|
|
|
|
|
|
|
def __init__(self,
|
2022-06-29 06:08:40 +00:00
|
|
|
rank: Optional[int] = None,
|
|
|
|
ranks: Optional[List[int]] = None,
|
2022-06-29 02:03:09 +00:00
|
|
|
tp_degree: Optional[int] = None,
|
|
|
|
dp_degree: Optional[int] = None) -> None:
|
2022-07-06 08:15:16 +00:00
|
|
|
if not torch.distributed.is_initialized():
|
2022-07-14 08:37:33 +00:00
|
|
|
self.is_init = False
|
2022-07-06 08:15:16 +00:00
|
|
|
return
|
|
|
|
|
2022-06-29 06:08:40 +00:00
|
|
|
assert torch.distributed.is_initialized(), f"ProcessGroup must be used after distributed initialized"
|
2023-02-03 03:44:10 +00:00
|
|
|
|
|
|
|
self._rank = torch.distributed.get_rank()
|
|
|
|
if rank is not None:
|
|
|
|
assert self._rank == rank # make sure that the global rank is correct
|
2022-06-29 06:08:40 +00:00
|
|
|
|
|
|
|
if ranks is None:
|
|
|
|
self._rank_list = list(range(torch.distributed.get_world_size()))
|
|
|
|
else:
|
|
|
|
self._rank_list = ranks
|
2022-07-07 05:55:24 +00:00
|
|
|
self._rank_list.sort() # ensure that the list is in order
|
2022-06-29 06:08:40 +00:00
|
|
|
|
2022-06-29 02:03:09 +00:00
|
|
|
self._world_size = len(self._rank_list)
|
|
|
|
|
|
|
|
if dp_degree is None and tp_degree is None:
|
|
|
|
self._dp_degree = self._world_size
|
|
|
|
self._tp_degree = 1
|
2022-07-07 05:55:24 +00:00
|
|
|
elif dp_degree and not tp_degree:
|
2022-06-29 02:03:09 +00:00
|
|
|
self._dp_degree = dp_degree
|
|
|
|
assert self._world_size % self._dp_degree == 0, f"DP degree {dp_degree} should be divisible by {self._world_size} hen DP degree is None"
|
2022-07-04 10:54:37 +00:00
|
|
|
self._tp_degree = self._world_size // dp_degree
|
2022-07-07 05:55:24 +00:00
|
|
|
elif not dp_degree and tp_degree:
|
2022-06-29 02:03:09 +00:00
|
|
|
self._tp_degree = tp_degree
|
|
|
|
assert self._world_size % self._tp_degree == 0, f"TP degree {tp_degree} should be divisible by {self._world_size} when DP degree is None"
|
2022-07-04 10:54:37 +00:00
|
|
|
self._dp_degree = self._world_size // tp_degree
|
2022-07-07 05:55:24 +00:00
|
|
|
else:
|
|
|
|
self._dp_degree = dp_degree
|
|
|
|
self._tp_degree = tp_degree
|
|
|
|
assert self._dp_degree * self._tp_degree == self._world_size, \
|
|
|
|
f"the world size {self._world_size} should equals to the product of DP degree {self._dp_degree}" \
|
|
|
|
f"and TP degree {self._tp_degree}"
|
|
|
|
|
2022-07-15 10:19:52 +00:00
|
|
|
self._tp_rank_list = None
|
|
|
|
self._dp_rank_list = None
|
2022-07-07 05:55:24 +00:00
|
|
|
|
2022-07-15 10:19:52 +00:00
|
|
|
for i in range(self._dp_degree):
|
|
|
|
i_tp_list = [self._rank_list[i * self._tp_degree + j] for j in range(self._tp_degree)]
|
|
|
|
PYTORCHPGDICT_.get(i_tp_list, 'nccl')
|
|
|
|
if self._rank in i_tp_list:
|
|
|
|
self._tp_rank_list = i_tp_list
|
|
|
|
|
|
|
|
for j in range(self._tp_degree):
|
|
|
|
j_dp_list = [self._rank_list[i * self._tp_degree + j] for i in range(self._dp_degree)]
|
|
|
|
PYTORCHPGDICT_.get(j_dp_list, 'nccl')
|
|
|
|
if self._rank in j_dp_list:
|
|
|
|
self._dp_rank_list = j_dp_list
|
2022-07-07 05:55:24 +00:00
|
|
|
|
2022-07-05 06:58:28 +00:00
|
|
|
self._has_cpu_groups = False
|
2022-07-14 08:37:33 +00:00
|
|
|
self.is_init = True
|
2022-07-05 06:58:28 +00:00
|
|
|
|
|
|
|
def set_cpu_groups(self):
|
2023-02-03 03:44:10 +00:00
|
|
|
"""set_cpu_groups
|
2022-08-16 01:21:05 +00:00
|
|
|
Initialize Pytorch process groups for cpu communications.
|
|
|
|
"""
|
2022-07-05 06:58:28 +00:00
|
|
|
if self.has_cpu_groups:
|
|
|
|
return
|
2022-07-29 11:33:24 +00:00
|
|
|
|
|
|
|
for i in range(self._dp_degree):
|
|
|
|
i_tp_list = [self._rank_list[i * self._tp_degree + j] for j in range(self._tp_degree)]
|
|
|
|
PYTORCHPGDICT_.get(i_tp_list, 'gloo')
|
|
|
|
|
|
|
|
for j in range(self._tp_degree):
|
|
|
|
j_dp_list = [self._rank_list[i * self._tp_degree + j] for i in range(self._dp_degree)]
|
|
|
|
PYTORCHPGDICT_.get(j_dp_list, 'gloo')
|
|
|
|
|
2022-07-15 10:19:52 +00:00
|
|
|
self._has_cpu_groups = True
|
2022-07-07 05:38:48 +00:00
|
|
|
|
2022-07-04 10:54:37 +00:00
|
|
|
@property
|
2022-08-16 01:21:05 +00:00
|
|
|
def has_cpu_groups(self) -> bool:
|
2023-02-03 03:44:10 +00:00
|
|
|
"""has_cpu_groups
|
2022-08-16 01:21:05 +00:00
|
|
|
If cpu groups have been initailized.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
bool: cpu process groups have been initialized or not.
|
|
|
|
"""
|
2022-07-05 06:58:28 +00:00
|
|
|
return self._has_cpu_groups
|
2022-07-04 10:54:37 +00:00
|
|
|
|
2022-07-08 05:25:57 +00:00
|
|
|
def __repr__(self):
|
2022-07-14 08:37:33 +00:00
|
|
|
if self.is_init:
|
2023-02-03 03:44:10 +00:00
|
|
|
ranks_str = f"ProcessGroup(ranks={self._rank_list},\n"
|
|
|
|
personal_str = f" rank={self._rank}, dp={self._dp_degree}, tp={self._tp_degree})"
|
|
|
|
return ranks_str + personal_str
|
2022-07-14 08:37:33 +00:00
|
|
|
else:
|
|
|
|
return "ProcessGroup not initialized"
|
2022-07-08 05:25:57 +00:00
|
|
|
|
2022-07-04 10:54:37 +00:00
|
|
|
def __eq__(self, obj: 'ProcessGroup') -> bool:
|
|
|
|
if not isinstance(obj, ProcessGroup):
|
|
|
|
return False
|
|
|
|
if self._rank != obj._rank:
|
2022-07-12 02:24:05 +00:00
|
|
|
return False
|
2022-07-04 10:54:37 +00:00
|
|
|
if self._rank_list != obj._rank_list:
|
2022-07-12 02:24:05 +00:00
|
|
|
return False
|
2022-07-04 10:54:37 +00:00
|
|
|
if self._tp_rank_list != obj._tp_rank_list:
|
2022-07-12 02:24:05 +00:00
|
|
|
return False
|
2022-07-04 10:54:37 +00:00
|
|
|
if self._dp_rank_list != obj._dp_rank_list:
|
2022-07-12 02:24:05 +00:00
|
|
|
return False
|
2022-07-04 10:54:37 +00:00
|
|
|
if self._tp_degree != obj._tp_degree:
|
|
|
|
return False
|
|
|
|
if self._dp_degree != obj._dp_degree:
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
2022-08-16 01:21:05 +00:00
|
|
|
def rank(self) -> int:
|
2023-02-03 03:44:10 +00:00
|
|
|
"""rank
|
2022-08-16 01:21:05 +00:00
|
|
|
|
|
|
|
The current rank in the global process group.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
int: the rank number
|
|
|
|
"""
|
2022-07-04 10:54:37 +00:00
|
|
|
return self._rank
|
2022-06-29 02:03:09 +00:00
|
|
|
|
2022-08-16 01:21:05 +00:00
|
|
|
def ranks_in_group(self) -> List[int]:
|
2023-02-03 03:44:10 +00:00
|
|
|
"""ranks_in_group
|
2022-08-16 01:21:05 +00:00
|
|
|
|
2023-02-03 03:44:10 +00:00
|
|
|
a list of rank number in in the global process group.
|
2022-08-16 01:21:05 +00:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
List[int]: a list of rank number.
|
|
|
|
"""
|
2022-07-19 06:15:28 +00:00
|
|
|
return self._rank_list
|
|
|
|
|
2022-08-16 01:21:05 +00:00
|
|
|
def world_size(self) -> int:
|
|
|
|
"""world_size
|
|
|
|
|
2023-02-03 03:44:10 +00:00
|
|
|
The world size of the global process group.
|
2022-08-16 01:21:05 +00:00
|
|
|
|
|
|
|
Returns:
|
|
|
|
int: world size
|
|
|
|
"""
|
2022-06-29 02:03:09 +00:00
|
|
|
return self._world_size
|
|
|
|
|
2022-08-16 01:21:05 +00:00
|
|
|
def tp_rank_list(self) -> List[int]:
|
2023-02-03 03:44:10 +00:00
|
|
|
"""tp_rank_list
|
2022-08-16 01:21:05 +00:00
|
|
|
|
|
|
|
the rank list in the TP process group containing the current rank.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
List[int]: the list of rank number.
|
|
|
|
"""
|
2022-07-19 06:15:28 +00:00
|
|
|
return self._tp_rank_list
|
|
|
|
|
2022-08-16 01:21:05 +00:00
|
|
|
def dp_rank_list(self) -> List[int]:
|
2023-02-03 03:44:10 +00:00
|
|
|
"""dp_rank_list
|
2022-08-16 01:21:05 +00:00
|
|
|
|
|
|
|
the rank list in the DP process group containing the current rank.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
List[int]: the list of rank number.
|
|
|
|
"""
|
2022-07-19 06:15:28 +00:00
|
|
|
return self._dp_rank_list
|
|
|
|
|
2022-08-16 01:21:05 +00:00
|
|
|
def tp_local_rank(self) -> int:
|
2023-02-03 03:44:10 +00:00
|
|
|
"""tp_local_rank
|
2022-08-16 01:21:05 +00:00
|
|
|
|
|
|
|
The local rank number in the current TP process group.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
int: tp rank number.
|
|
|
|
"""
|
2022-07-04 10:54:37 +00:00
|
|
|
return self._rank % self._tp_degree
|
|
|
|
|
2022-08-16 01:21:05 +00:00
|
|
|
def dp_local_rank(self) -> int:
|
|
|
|
"""dp_local_rank
|
|
|
|
|
|
|
|
The local rank number in the current DP process group.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
int: dp rank number.
|
|
|
|
"""
|
2022-07-04 10:54:37 +00:00
|
|
|
return self._rank // self._tp_degree
|
|
|
|
|
2022-08-16 01:21:05 +00:00
|
|
|
def dp_world_size(self) -> int:
|
|
|
|
"""dp_world_size
|
|
|
|
|
|
|
|
The world size of the current DP process group.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
int: dp world size
|
|
|
|
"""
|
2022-06-29 02:03:09 +00:00
|
|
|
return len(self._dp_rank_list)
|
|
|
|
|
2022-08-16 01:21:05 +00:00
|
|
|
def tp_world_size(self) -> int:
|
|
|
|
"""tp_world_size
|
|
|
|
|
|
|
|
The world size of the current TP process group.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
int: tp world size
|
|
|
|
"""
|
2022-06-29 02:03:09 +00:00
|
|
|
return len(self._tp_rank_list)
|
|
|
|
|
|
|
|
def dp_process_group(self):
|
2022-08-16 01:21:05 +00:00
|
|
|
"""dp_process_group
|
|
|
|
|
|
|
|
the pytorch DP process group containing the current rank.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
`torch._C._distributed_c10d.ProcessGroup`: the pytorch DP process group.
|
|
|
|
"""
|
2022-07-08 08:33:13 +00:00
|
|
|
return PYTORCHPGDICT_.get(self._dp_rank_list, 'nccl')
|
2022-06-29 02:03:09 +00:00
|
|
|
|
|
|
|
def tp_process_group(self):
|
2022-08-16 01:21:05 +00:00
|
|
|
"""tp_process_group
|
|
|
|
|
|
|
|
the pytorch TP process group containing the current rank.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
`torch._C._distributed_c10d.ProcessGroup`: the pytorch TP process group.
|
|
|
|
"""
|
2022-07-08 08:33:13 +00:00
|
|
|
return PYTORCHPGDICT_.get(self._tp_rank_list, 'nccl')
|
2022-07-05 06:58:28 +00:00
|
|
|
|
|
|
|
def cpu_dp_process_group(self):
|
2022-08-16 01:21:05 +00:00
|
|
|
"""cpu_dp_process_group
|
|
|
|
|
|
|
|
the pytorch CPU DP process group containing the current rank.
|
2023-02-03 03:44:10 +00:00
|
|
|
|
2022-08-16 01:21:05 +00:00
|
|
|
assert failed if cpu process group is not initialized.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
`torch._C._distributed_c10d.ProcessGroup`: the pytorch DP process group.
|
|
|
|
"""
|
2022-07-15 10:19:52 +00:00
|
|
|
assert self._has_cpu_groups
|
2022-07-08 08:33:13 +00:00
|
|
|
return PYTORCHPGDICT_.get(self._dp_rank_list, 'gloo')
|
2022-07-05 06:58:28 +00:00
|
|
|
|
|
|
|
def cpu_tp_process_group(self):
|
2022-08-16 01:21:05 +00:00
|
|
|
"""cpu_tp_process_group
|
|
|
|
|
|
|
|
the pytorch CPU TP process group containing the current rank.
|
2023-02-03 03:44:10 +00:00
|
|
|
|
2022-08-16 01:21:05 +00:00
|
|
|
assert failed if cpu process group is not initialized.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
`torch._C._distributed_c10d.ProcessGroup`: the pytorch TP process group.
|
|
|
|
"""
|
2022-07-15 10:19:52 +00:00
|
|
|
assert self._has_cpu_groups
|
2022-07-08 08:33:13 +00:00
|
|
|
return PYTORCHPGDICT_.get(self._tp_rank_list, 'gloo')
|
2022-07-18 06:14:52 +00:00
|
|
|
|
2022-08-16 01:21:05 +00:00
|
|
|
def get_ranks_in_dp(self) -> List[int]:
|
|
|
|
"""get_ranks_in_dp
|
|
|
|
|
|
|
|
ranks in current dp process group.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
List[int]: a list of rank number.
|
|
|
|
"""
|
2022-07-18 06:14:52 +00:00
|
|
|
return self._dp_rank_list
|
|
|
|
|
|
|
|
def get_ranks_in_tp(self):
|
2022-08-16 01:21:05 +00:00
|
|
|
"""get_ranks_in_tp
|
|
|
|
|
|
|
|
ranks in current tp process group.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
List[int]: a list of rank number.
|
|
|
|
"""
|
2022-07-18 06:14:52 +00:00
|
|
|
return self._tp_rank_list
|