mirror of https://github.com/hpcaitech/ColossalAI
You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
57 lines
2.0 KiB
57 lines
2.0 KiB
3 years ago
|
#!/usr/bin/env python
|
||
|
# -*- encoding: utf-8 -*-
|
||
|
|
||
|
import torch
|
||
|
|
||
|
from colossalai.context.parallel_mode import ParallelMode
|
||
|
from colossalai.core import global_context as gpc
|
||
|
from colossalai.utils import get_current_device, synchronize
|
||
|
|
||
|
|
||
3 years ago
|
def ring_forward(tensor_send_next: torch.Tensor, parallel_mode: ParallelMode) -> torch.Tensor:
|
||
3 years ago
|
"""Sends a tensor to the next member and receives a tensor from the previous member.
|
||
|
This function returns the received tensor from the previous member.
|
||
|
|
||
|
Args:
|
||
3 years ago
|
tensor_send_next (:class:`torch.Tensor`): Tensor sent to next member
|
||
|
parallel_mode (ParallelMode): Parallel group mode used in this communication
|
||
3 years ago
|
|
||
|
Returns:
|
||
|
:class:`torch.Tensor`: The tensor received from the previous.
|
||
|
|
||
|
Note:
|
||
|
The parallel_mode should be concluded in ``ParallelMode``. More details about ``ParallelMode`` could be found
|
||
|
in `parallel_mode <https://github.com/hpcaitech/ColossalAI/blob/main/colossalai/context/parallel_mode.py>`_.
|
||
3 years ago
|
"""
|
||
|
buffer_shape = tensor_send_next.size()
|
||
|
|
||
|
ops = []
|
||
|
current_rank = gpc.get_global_rank()
|
||
|
|
||
|
tensor_recv_prev = torch.empty(buffer_shape,
|
||
|
requires_grad=True,
|
||
|
device=get_current_device(),
|
||
|
dtype=tensor_send_next.dtype)
|
||
|
|
||
|
# send to next rank
|
||
3 years ago
|
send_next_op = torch.distributed.P2POp(torch.distributed.isend, tensor_send_next,
|
||
|
gpc.get_next_global_rank(parallel_mode))
|
||
3 years ago
|
ops.append(send_next_op)
|
||
|
|
||
|
# receive from prev rank
|
||
3 years ago
|
recv_prev_op = torch.distributed.P2POp(torch.distributed.irecv, tensor_recv_prev,
|
||
|
gpc.get_prev_global_rank(parallel_mode))
|
||
3 years ago
|
ops.append(recv_prev_op)
|
||
|
|
||
|
if current_rank % 2 == 0:
|
||
|
ops = ops[::-1]
|
||
|
|
||
|
reqs = torch.distributed.batch_isend_irecv(ops)
|
||
|
for req in reqs:
|
||
|
req.wait()
|
||
|
|
||
|
# To protect against race condition when using batch_isend_irecv().
|
||
|
synchronize()
|
||
|
|
||
|
return tensor_recv_prev
|